aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.mailmap3
-rw-r--r--Documentation/ABI/testing/sysfs-block-rssd12
-rw-r--r--Documentation/ABI/testing/sysfs-bus-fcoe77
-rw-r--r--Documentation/ABI/testing/sysfs-bus-iio31
-rw-r--r--Documentation/ABI/testing/sysfs-class-mtd51
-rw-r--r--Documentation/CodingStyle16
-rw-r--r--Documentation/DocBook/mtdnand.tmpl2
-rw-r--r--Documentation/arm/OMAP/DSS46
-rw-r--r--Documentation/arm/SPEAr/overview.txt2
-rw-r--r--Documentation/device-mapper/thin-provisioning.txt11
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt93
-rw-r--r--Documentation/devicetree/bindings/mtd/gpmi-nand.txt33
-rw-r--r--Documentation/devicetree/bindings/mtd/mxc-nand.txt19
-rw-r--r--Documentation/feature-removal-schedule.txt6
-rw-r--r--Documentation/filesystems/Locking3
-rw-r--r--Documentation/filesystems/proc.txt23
-rw-r--r--Documentation/filesystems/vfs.txt4
-rw-r--r--Documentation/hwmon/coretemp22
-rw-r--r--Documentation/kernel-parameters.txt9
-rw-r--r--Documentation/networking/stmmac.txt44
-rw-r--r--Documentation/power/charger-manager.txt41
-rw-r--r--Documentation/power/power_supply_class.txt2
-rw-r--r--Documentation/sysctl/fs.txt7
-rw-r--r--Documentation/vm/frontswap.txt278
-rw-r--r--Documentation/vm/pagemap.txt2
-rw-r--r--Documentation/vm/slub.txt2
-rw-r--r--Documentation/x86/efi-stub.txt65
-rw-r--r--MAINTAINERS90
-rw-r--r--Makefile8
-rw-r--r--arch/alpha/include/asm/posix_types.h3
-rw-r--r--arch/alpha/kernel/signal.c20
-rw-r--r--arch/arm/Kconfig5
-rw-r--r--arch/arm/boot/dts/db8500.dtsi204
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi16
-rw-r--r--arch/arm/boot/dts/imx27.dtsi9
-rw-r--r--arch/arm/boot/dts/lpc32xx.dtsi41
-rw-r--r--arch/arm/boot/dts/mmp2-brownstone.dts4
-rw-r--r--arch/arm/boot/dts/omap2.dtsi2
-rw-r--r--arch/arm/boot/dts/phy3250.dts4
-rw-r--r--arch/arm/boot/dts/snowball.dts32
-rw-r--r--arch/arm/boot/dts/spear1310-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi2
-rw-r--r--arch/arm/boot/dts/spear1340-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi2
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi2
-rw-r--r--arch/arm/boot/dts/spear300-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear300.dtsi2
-rw-r--r--arch/arm/boot/dts/spear310-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear310.dtsi2
-rw-r--r--arch/arm/boot/dts/spear320-evb.dts2
-rw-r--r--arch/arm/boot/dts/spear320.dtsi2
-rw-r--r--arch/arm/boot/dts/spear3xx.dtsi2
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts13
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca5s.dts13
-rw-r--r--arch/arm/boot/dts/vexpress-v2p-ca9.dts9
-rw-r--r--arch/arm/common/dmabounce.c16
-rw-r--r--arch/arm/configs/u8500_defconfig1
-rw-r--r--arch/arm/include/asm/futex.h1
-rw-r--r--arch/arm/include/asm/hardware/sp810.h2
-rw-r--r--arch/arm/include/asm/posix_types.h3
-rw-r--r--arch/arm/kernel/entry-armv.S1
-rw-r--r--arch/arm/kernel/kprobes-thumb.c2
-rw-r--r--arch/arm/kernel/signal.c49
-rw-r--r--arch/arm/kernel/smp.c8
-rw-r--r--arch/arm/mach-ep93xx/snappercl15.c4
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c3
-rw-r--r--arch/arm/mach-exynos/Kconfig2
-rw-r--r--arch/arm/mach-exynos/Makefile2
-rw-r--r--arch/arm/mach-exynos/clock-exynos5.c51
-rw-r--r--arch/arm/mach-exynos/cpuidle.c2
-rw-r--r--arch/arm/mach-exynos/include/mach/pm-core.h2
-rw-r--r--arch/arm/mach-exynos/include/mach/pmu.h4
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-clock.h18
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-pmu.h141
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c26
-rw-r--r--arch/arm/mach-exynos/mach-origen.c24
-rw-r--r--arch/arm/mach-exynos/mach-smdkv310.c28
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c26
-rw-r--r--arch/arm/mach-exynos/pm.c223
-rw-r--r--arch/arm/mach-exynos/pmu.c200
-rw-r--r--arch/arm/mach-highbank/Makefile6
-rw-r--r--arch/arm/mach-highbank/core.h1
-rw-r--r--arch/arm/mach-highbank/highbank.c14
-rw-r--r--arch/arm/mach-highbank/smc.S27
-rw-r--r--arch/arm/mach-imx/Kconfig1
-rw-r--r--arch/arm/mach-imx/clk-imx1.c3
-rw-r--r--arch/arm/mach-imx/clk-imx21.c4
-rw-r--r--arch/arm/mach-imx/clk-imx25.c2
-rw-r--r--arch/arm/mach-imx/clk-imx27.c3
-rw-r--r--arch/arm/mach-imx/clk-imx31.c3
-rw-r--r--arch/arm/mach-imx/clk-imx35.c6
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c12
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c22
-rw-r--r--arch/arm/mach-imx/clk-pllv2.c93
-rw-r--r--arch/arm/mach-imx/crm-regs-imx5.h2
-rw-r--r--arch/arm/mach-imx/hotplug.c42
-rw-r--r--arch/arm/mach-imx/imx27-dt.c1
-rw-r--r--arch/arm/mach-imx/mach-cpuimx35.c1
-rw-r--r--arch/arm/mach-imx/mach-cpuimx51sd.c1
-rw-r--r--arch/arm/mach-imx/mach-imx27_visstrim_m10.c36
-rw-r--r--arch/arm/mach-imx/mach-mx21ads.c2
-rw-r--r--arch/arm/mach-imx/mm-imx3.c4
-rw-r--r--arch/arm/mach-imx/mm-imx5.c2
-rw-r--r--arch/arm/mach-ixp4xx/common.c48
-rw-r--r--arch/arm/mach-ixp4xx/include/mach/gpio.h79
-rw-r--r--arch/arm/mach-ixp4xx/ixdp425-setup.c4
-rw-r--r--arch/arm/mach-kirkwood/board-iconnect.c3
-rw-r--r--arch/arm/mach-kirkwood/common.c9
-rw-r--r--arch/arm/mach-kirkwood/include/mach/bridge-regs.h1
-rw-r--r--arch/arm/mach-kirkwood/include/mach/kirkwood.h1
-rw-r--r--arch/arm/mach-mmp/irq.c7
-rw-r--r--arch/arm/mach-nomadik/board-nhk8815.c2
-rw-r--r--arch/arm/mach-omap1/board-fsample.c3
-rw-r--r--arch/arm/mach-omap1/board-h2.c3
-rw-r--r--arch/arm/mach-omap1/board-h3.c3
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c3
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c6
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c28
-rw-r--r--arch/arm/mach-omap2/board-rx51-peripherals.c6
-rw-r--r--arch/arm/mach-omap2/clock3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/clock44xx_data.c5
-rw-r--r--arch/arm/mach-omap2/cm.h11
-rw-r--r--arch/arm/mach-omap2/cminst44xx.c4
-rw-r--r--arch/arm/mach-omap2/display.c196
-rw-r--r--arch/arm/mach-omap2/dsp.c3
-rw-r--r--arch/arm/mach-omap2/gpmc.c184
-rw-r--r--arch/arm/mach-omap2/id.c11
-rw-r--r--arch/arm/mach-omap2/irq.c1
-rw-r--r--arch/arm/mach-omap2/mux.c4
-rw-r--r--arch/arm/mach-omap2/mux.h11
-rw-r--r--arch/arm/mach-omap2/omap_hwmod.c2
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_44xx_data.c8
-rw-r--r--arch/arm/mach-omap2/omap_l3_smx.c3
-rw-r--r--arch/arm/mach-omap2/omap_phy_internal.c6
-rw-r--r--arch/arm/mach-omap2/pm34xx.c1
-rw-r--r--arch/arm/mach-omap2/prm2xxx_3xxx.c14
-rw-r--r--arch/arm/mach-omap2/serial.c67
-rw-r--r--arch/arm/mach-omap2/usb-musb.c6
-rw-r--r--arch/arm/mach-omap2/usb-tusb6010.c2
-rw-r--r--arch/arm/mach-orion5x/include/mach/bridge-regs.h2
-rw-r--r--arch/arm/mach-orion5x/include/mach/io.h22
-rw-r--r--arch/arm/mach-orion5x/include/mach/orion5x.h1
-rw-r--r--arch/arm/mach-orion5x/ts78xx-setup.c3
-rw-r--r--arch/arm/mach-pxa/balloon3.c3
-rw-r--r--arch/arm/mach-pxa/em-x270.c3
-rw-r--r--arch/arm/mach-pxa/palmtx.c3
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/irqs.h15
-rw-r--r--arch/arm/mach-s3c24xx/irq-s3c2416.c98
-rw-r--r--arch/arm/mach-s3c24xx/mach-smdk2416.c27
-rw-r--r--arch/arm/mach-s3c24xx/s3c2416.c1
-rw-r--r--arch/arm/mach-s3c64xx/cpuidle.c45
-rw-r--r--arch/arm/mach-s3c64xx/mach-anw6410.c25
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410-module.c9
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c26
-rw-r--r--arch/arm/mach-s3c64xx/mach-hmt.c24
-rw-r--r--arch/arm/mach-s3c64xx/mach-mini6410.c92
-rw-r--r--arch/arm/mach-s3c64xx/mach-real6410.c90
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq5.c26
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq7.c26
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c25
-rw-r--r--arch/arm/mach-s5p64x0/mach-smdk6440.c24
-rw-r--r--arch/arm/mach-s5p64x0/mach-smdk6450.c24
-rw-r--r--arch/arm/mach-s5pc100/mach-smdkc100.c27
-rw-r--r--arch/arm/mach-s5pv210/mach-aquila.c36
-rw-r--r--arch/arm/mach-s5pv210/mach-goni.c26
-rw-r--r--arch/arm/mach-s5pv210/mach-smdkv210.c24
-rw-r--r--arch/arm/mach-shmobile/Kconfig6
-rw-r--r--arch/arm/mach-spear13xx/include/mach/debug-macro.S2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/dma.h2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/generic.h2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/spear.h2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/timex.h2
-rw-r--r--arch/arm/mach-spear13xx/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-spear13xx/spear1310.c2
-rw-r--r--arch/arm/mach-spear13xx/spear1340.c2
-rw-r--r--arch/arm/mach-spear13xx/spear13xx.c2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/debug-macro.S2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/generic.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/irqs.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/timex.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/uncompress.h2
-rw-r--r--arch/arm/mach-spear3xx/spear300.c2
-rw-r--r--arch/arm/mach-spear3xx/spear310.c2
-rw-r--r--arch/arm/mach-spear3xx/spear320.c2
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c2
-rw-r--r--arch/arm/mach-spear6xx/include/mach/gpio.h2
-rw-r--r--arch/arm/mach-spear6xx/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-tegra/reset.c2
-rw-r--r--arch/arm/mach-ux500/board-mop500-uib.c4
-rw-r--r--arch/arm/mach-ux500/board-mop500.c86
-rw-r--r--arch/arm/mach-ux500/board-mop500.h4
-rw-r--r--arch/arm/mach-ux500/cpu-db8500.c25
-rw-r--r--arch/arm/mach-versatile/core.c19
-rw-r--r--arch/arm/mach-versatile/include/mach/hardware.h3
-rw-r--r--arch/arm/mach-versatile/include/mach/io.h27
-rw-r--r--arch/arm/mach-versatile/pci.c18
-rw-r--r--arch/arm/mach-vexpress/v2m.c2
-rw-r--r--arch/arm/mm/dma-mapping.c14
-rw-r--r--arch/arm/mm/init.c2
-rw-r--r--arch/arm/mm/mm.h2
-rw-r--r--arch/arm/net/bpf_jit_32.c5
-rw-r--r--arch/arm/net/bpf_jit_32.h4
-rw-r--r--arch/arm/plat-mxc/epit.c11
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h4
-rw-r--r--arch/arm/plat-mxc/time.c24
-rw-r--r--arch/arm/plat-omap/clock.c2
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h33
-rw-r--r--arch/arm/plat-omap/include/plat/gpmc.h11
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h4
-rw-r--r--arch/arm/plat-orion/common.c2
-rw-r--r--arch/arm/plat-pxa/ssp.c1
-rw-r--r--arch/arm/plat-samsung/include/plat/fb.h11
-rw-r--r--arch/arm/plat-samsung/include/plat/s3c2416.h3
-rw-r--r--arch/arm/plat-spear/include/plat/debug-macro.S2
-rw-r--r--arch/arm/plat-spear/include/plat/pl080.h2
-rw-r--r--arch/arm/plat-spear/include/plat/shirq.h2
-rw-r--r--arch/arm/plat-spear/include/plat/timex.h2
-rw-r--r--arch/arm/plat-spear/include/plat/uncompress.h2
-rw-r--r--arch/arm/plat-spear/pl080.c2
-rw-r--r--arch/arm/plat-spear/restart.c2
-rw-r--r--arch/arm/plat-spear/shirq.c2
-rw-r--r--arch/avr32/include/asm/posix_types.h3
-rw-r--r--arch/avr32/kernel/entry-avr32b.S4
-rw-r--r--arch/avr32/kernel/signal.c43
-rw-r--r--arch/blackfin/include/asm/posix_types.h3
-rw-r--r--arch/blackfin/include/asm/thread_info.h2
-rw-r--r--arch/blackfin/kernel/process.c2
-rw-r--r--arch/blackfin/kernel/signal.c69
-rw-r--r--arch/blackfin/kernel/trace.c32
-rw-r--r--arch/blackfin/mach-bf561/boards/acvilon.c3
-rw-r--r--arch/blackfin/mach-common/entry.S2
-rw-r--r--arch/c6x/kernel/signal.c45
-rw-r--r--arch/cris/arch-v10/kernel/signal.c34
-rw-r--r--arch/cris/arch-v32/kernel/signal.c36
-rw-r--r--arch/cris/include/asm/posix_types.h3
-rw-r--r--arch/cris/kernel/ptrace.c2
-rw-r--r--arch/frv/include/asm/posix_types.h3
-rw-r--r--arch/frv/include/asm/thread_info.h16
-rw-r--r--arch/frv/kernel/entry.S29
-rw-r--r--arch/frv/kernel/signal.c59
-rw-r--r--arch/h8300/include/asm/posix_types.h3
-rw-r--r--arch/h8300/kernel/setup.c23
-rw-r--r--arch/h8300/kernel/signal.c30
-rw-r--r--arch/h8300/mm/init.c17
-rw-r--r--arch/hexagon/kernel/signal.c50
-rw-r--r--arch/ia64/include/asm/posix_types.h3
-rw-r--r--arch/ia64/include/asm/thread_info.h18
-rw-r--r--arch/ia64/kernel/perfmon.c10
-rw-r--r--arch/ia64/kernel/process.c2
-rw-r--r--arch/ia64/kernel/signal.c34
-rw-r--r--arch/ia64/kernel/sys_ia64.c19
-rw-r--r--arch/m32r/include/asm/posix_types.h3
-rw-r--r--arch/m32r/kernel/signal.c34
-rw-r--r--arch/m68k/Kconfig2
-rw-r--r--arch/m68k/include/asm/Kbuild2
-rw-r--r--arch/m68k/include/asm/m528xsim.h2
-rw-r--r--arch/m68k/include/asm/posix_types.h3
-rw-r--r--arch/m68k/include/asm/uaccess_mm.h11
-rw-r--r--arch/m68k/kernel/ptrace.c2
-rw-r--r--arch/m68k/kernel/signal.c29
-rw-r--r--arch/m68k/kernel/time.c4
-rw-r--r--arch/m68k/lib/uaccess.c74
-rw-r--r--arch/m68k/platform/68328/timers.c6
-rw-r--r--arch/m68k/platform/68360/config.c7
-rw-r--r--arch/microblaze/include/asm/thread_info.h18
-rw-r--r--arch/microblaze/kernel/signal.c41
-rw-r--r--arch/mips/alchemy/devboards/db1200.c3
-rw-r--r--arch/mips/alchemy/devboards/db1300.c3
-rw-r--r--arch/mips/alchemy/devboards/db1550.c3
-rw-r--r--arch/mips/include/asm/posix_types.h5
-rw-r--r--arch/mips/include/asm/stat.h6
-rw-r--r--arch/mips/kernel/signal-common.h2
-rw-r--r--arch/mips/kernel/signal.c40
-rw-r--r--arch/mips/kernel/signal32.c2
-rw-r--r--arch/mips/kernel/signal_n32.c1
-rw-r--r--arch/mips/pnx833x/common/platform.c6
-rw-r--r--arch/mips/rb532/devices.c1
-rw-r--r--arch/mn10300/include/asm/posix_types.h3
-rw-r--r--arch/mn10300/kernel/signal.c41
-rw-r--r--arch/openrisc/kernel/signal.c42
-rw-r--r--arch/parisc/Kconfig1
-rw-r--r--arch/parisc/Makefile3
-rw-r--r--arch/parisc/include/asm/Kbuild1
-rw-r--r--arch/parisc/include/asm/bug.h2
-rw-r--r--arch/parisc/include/asm/posix_types.h3
-rw-r--r--arch/parisc/include/asm/smp.h2
-rw-r--r--arch/parisc/include/asm/stat.h4
-rw-r--r--arch/parisc/include/asm/thread_info.h2
-rw-r--r--arch/parisc/include/asm/uaccess.h5
-rw-r--r--arch/parisc/kernel/entry.S34
-rw-r--r--arch/parisc/kernel/parisc_ksyms.c1
-rw-r--r--arch/parisc/kernel/signal.c49
-rw-r--r--arch/parisc/kernel/signal32.c2
-rw-r--r--arch/parisc/kernel/vmlinux.lds.S6
-rw-r--r--arch/parisc/lib/lusercopy.S41
-rw-r--r--arch/powerpc/include/asm/hw_irq.h3
-rw-r--r--arch/powerpc/include/asm/posix_types.h3
-rw-r--r--arch/powerpc/include/asm/stat.h4
-rw-r--r--arch/powerpc/include/asm/thread_info.h18
-rw-r--r--arch/powerpc/kernel/module_32.c11
-rw-r--r--arch/powerpc/kernel/signal.c38
-rw-r--r--arch/powerpc/kernel/signal.h3
-rw-r--r--arch/powerpc/kernel/signal_32.c4
-rw-r--r--arch/powerpc/kernel/signal_64.c4
-rw-r--r--arch/powerpc/kernel/time.c14
-rw-r--r--arch/powerpc/kvm/book3s_hv.c96
-rw-r--r--arch/powerpc/mm/mmu_context_nohash.c11
-rw-r--r--arch/powerpc/platforms/pseries/nvram.c61
-rw-r--r--arch/s390/Kconfig1
-rw-r--r--arch/s390/include/asm/bitops.h21
-rw-r--r--arch/s390/include/asm/cio.h4
-rw-r--r--arch/s390/include/asm/cmpxchg.h54
-rw-r--r--arch/s390/include/asm/cputime.h10
-rw-r--r--arch/s390/include/asm/ctl_reg.h6
-rw-r--r--arch/s390/include/asm/current.h3
-rw-r--r--arch/s390/include/asm/elf.h12
-rw-r--r--arch/s390/include/asm/futex.h3
-rw-r--r--arch/s390/include/asm/idals.h10
-rw-r--r--arch/s390/include/asm/io.h4
-rw-r--r--arch/s390/include/asm/irq.h3
-rw-r--r--arch/s390/include/asm/kexec.h4
-rw-r--r--arch/s390/include/asm/kmap_types.h2
-rw-r--r--arch/s390/include/asm/mmu_context.h2
-rw-r--r--arch/s390/include/asm/module.h2
-rw-r--r--arch/s390/include/asm/os_info.h5
-rw-r--r--arch/s390/include/asm/percpu.h2
-rw-r--r--arch/s390/include/asm/pgalloc.h6
-rw-r--r--arch/s390/include/asm/pgtable.h44
-rw-r--r--arch/s390/include/asm/posix_types.h3
-rw-r--r--arch/s390/include/asm/processor.h39
-rw-r--r--arch/s390/include/asm/rwsem.h63
-rw-r--r--arch/s390/include/asm/setup.h18
-rw-r--r--arch/s390/include/asm/sfp-util.h2
-rw-r--r--arch/s390/include/asm/string.h4
-rw-r--r--arch/s390/include/asm/thread_info.h10
-rw-r--r--arch/s390/include/asm/timer.h4
-rw-r--r--arch/s390/include/asm/tlb.h4
-rw-r--r--arch/s390/include/asm/tlbflush.h4
-rw-r--r--arch/s390/include/asm/types.h4
-rw-r--r--arch/s390/include/asm/uaccess.h15
-rw-r--r--arch/s390/include/asm/vdso.h5
-rw-r--r--arch/s390/kernel/base.S12
-rw-r--r--arch/s390/kernel/compat_signal.c12
-rw-r--r--arch/s390/kernel/early.c2
-rw-r--r--arch/s390/kernel/entry.h2
-rw-r--r--arch/s390/kernel/head_kdump.S7
-rw-r--r--arch/s390/kernel/ipl.c16
-rw-r--r--arch/s390/kernel/irq.c3
-rw-r--r--arch/s390/kernel/machine_kexec.c11
-rw-r--r--arch/s390/kernel/os_info.c3
-rw-r--r--arch/s390/kernel/perf_cpum_cf.c2
-rw-r--r--arch/s390/kernel/setup.c12
-rw-r--r--arch/s390/kernel/signal.c49
-rw-r--r--arch/s390/kernel/smp.c45
-rw-r--r--arch/s390/kernel/sysinfo.c21
-rw-r--r--arch/s390/lib/uaccess_mvcos.c2
-rw-r--r--arch/s390/lib/uaccess_std.c2
-rw-r--r--arch/s390/mm/maccess.c38
-rw-r--r--arch/s390/mm/vmem.c2
-rw-r--r--arch/s390/oprofile/hwsampler.c2
-rw-r--r--arch/score/kernel/signal.c42
-rw-r--r--arch/sh/Kconfig2
-rw-r--r--arch/sh/Makefile16
-rw-r--r--arch/sh/boards/mach-migor/setup.c1
-rw-r--r--arch/sh/include/asm/Kbuild34
-rw-r--r--arch/sh/include/asm/bitsperlong.h1
-rw-r--r--arch/sh/include/asm/cputime.h6
-rw-r--r--arch/sh/include/asm/current.h1
-rw-r--r--arch/sh/include/asm/delay.h1
-rw-r--r--arch/sh/include/asm/div64.h1
-rw-r--r--arch/sh/include/asm/emergency-restart.h6
-rw-r--r--arch/sh/include/asm/errno.h6
-rw-r--r--arch/sh/include/asm/fcntl.h1
-rw-r--r--arch/sh/include/asm/ioctl.h1
-rw-r--r--arch/sh/include/asm/ipcbuf.h1
-rw-r--r--arch/sh/include/asm/irq_regs.h1
-rw-r--r--arch/sh/include/asm/kvm_para.h1
-rw-r--r--arch/sh/include/asm/local.h7
-rw-r--r--arch/sh/include/asm/local64.h1
-rw-r--r--arch/sh/include/asm/mman.h1
-rw-r--r--arch/sh/include/asm/msgbuf.h1
-rw-r--r--arch/sh/include/asm/param.h1
-rw-r--r--arch/sh/include/asm/parport.h1
-rw-r--r--arch/sh/include/asm/percpu.h6
-rw-r--r--arch/sh/include/asm/poll.h1
-rw-r--r--arch/sh/include/asm/posix_types_32.h2
-rw-r--r--arch/sh/include/asm/posix_types_64.h2
-rw-r--r--arch/sh/include/asm/resource.h6
-rw-r--r--arch/sh/include/asm/scatterlist.h6
-rw-r--r--arch/sh/include/asm/sembuf.h1
-rw-r--r--arch/sh/include/asm/serial.h1
-rw-r--r--arch/sh/include/asm/shmbuf.h1
-rw-r--r--arch/sh/include/asm/siginfo.h6
-rw-r--r--arch/sh/include/asm/sizes.h1
-rw-r--r--arch/sh/include/asm/socket.h1
-rw-r--r--arch/sh/include/asm/statfs.h6
-rw-r--r--arch/sh/include/asm/termbits.h1
-rw-r--r--arch/sh/include/asm/termios.h1
-rw-r--r--arch/sh/include/asm/thread_info.h19
-rw-r--r--arch/sh/include/asm/uaccess.h75
-rw-r--r--arch/sh/include/asm/uaccess_32.h75
-rw-r--r--arch/sh/include/asm/uaccess_64.h4
-rw-r--r--arch/sh/include/asm/ucontext.h1
-rw-r--r--arch/sh/include/asm/word-at-a-time.h53
-rw-r--r--arch/sh/include/asm/xor.h1
-rw-r--r--arch/sh/include/cpu-sh2a/cpu/ubc.h28
-rw-r--r--arch/sh/kernel/cpu/sh5/entry.S82
-rw-r--r--arch/sh/kernel/process.c1
-rw-r--r--arch/sh/kernel/process_64.c1
-rw-r--r--arch/sh/kernel/sh_ksyms_64.c2
-rw-r--r--arch/sh/kernel/signal_32.c45
-rw-r--r--arch/sh/kernel/signal_64.c49
-rw-r--r--arch/sh/kernel/smp.c7
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/asi.h4
-rw-r--r--arch/sparc/include/asm/asmmacro.h22
-rw-r--r--arch/sparc/include/asm/cmt.h59
-rw-r--r--arch/sparc/include/asm/dma-mapping.h9
-rw-r--r--arch/sparc/include/asm/leon.h82
-rw-r--r--arch/sparc/include/asm/leon_amba.h4
-rw-r--r--arch/sparc/include/asm/mpmbox.h67
-rw-r--r--arch/sparc/include/asm/pgtsrmmu.h86
-rw-r--r--arch/sparc/include/asm/posix_types.h5
-rw-r--r--arch/sparc/include/asm/psr.h8
-rw-r--r--arch/sparc/include/asm/sections.h3
-rw-r--r--arch/sparc/include/asm/thread_info_32.h3
-rw-r--r--arch/sparc/include/asm/thread_info_64.h18
-rw-r--r--arch/sparc/kernel/Makefile4
-rw-r--r--arch/sparc/kernel/cpu.c18
-rw-r--r--arch/sparc/kernel/entry.S10
-rw-r--r--arch/sparc/kernel/etrap_32.S18
-rw-r--r--arch/sparc/kernel/head_32.S168
-rw-r--r--arch/sparc/kernel/ioport.c24
-rw-r--r--arch/sparc/kernel/irq_32.c22
-rw-r--r--arch/sparc/kernel/kernel.h3
-rw-r--r--arch/sparc/kernel/leon_kernel.c1
-rw-r--r--arch/sparc/kernel/leon_pmc.c15
-rw-r--r--arch/sparc/kernel/leon_smp.c8
-rw-r--r--arch/sparc/kernel/process_32.c35
-rw-r--r--arch/sparc/kernel/prom_common.c1
-rw-r--r--arch/sparc/kernel/rtrap_32.S18
-rw-r--r--arch/sparc/kernel/setup_32.c62
-rw-r--r--arch/sparc/kernel/signal32.c27
-rw-r--r--arch/sparc/kernel/signal_32.c41
-rw-r--r--arch/sparc/kernel/signal_64.c36
-rw-r--r--arch/sparc/kernel/sys_sparc_64.c11
-rw-r--r--arch/sparc/kernel/trampoline_32.S6
-rw-r--r--arch/sparc/kernel/traps_64.c12
-rw-r--r--arch/sparc/kernel/vio.c2
-rw-r--r--arch/sparc/kernel/vmlinux.lds.S5
-rw-r--r--arch/sparc/kernel/wof.S18
-rw-r--r--arch/sparc/kernel/wuf.S27
-rw-r--r--arch/sparc/math-emu/math_64.c20
-rw-r--r--arch/sparc/mm/Makefile3
-rw-r--r--arch/sparc/mm/leon_mm.c2
-rw-r--r--arch/sparc/mm/srmmu.c25
-rw-r--r--arch/sparc/mm/srmmu_access.S82
-rw-r--r--arch/tile/include/asm/compat.h1
-rw-r--r--arch/tile/include/asm/thread_info.h23
-rw-r--r--arch/tile/include/asm/uaccess.h2
-rw-r--r--arch/tile/kernel/compat_signal.c3
-rw-r--r--arch/tile/kernel/entry.S14
-rw-r--r--arch/tile/kernel/process.c2
-rw-r--r--arch/tile/kernel/setup.c1
-rw-r--r--arch/tile/kernel/signal.c42
-rw-r--r--arch/um/include/shared/frame_kern.h3
-rw-r--r--arch/um/kernel/process.c5
-rw-r--r--arch/um/kernel/reboot.c13
-rw-r--r--arch/um/kernel/signal.c37
-rw-r--r--arch/um/kernel/trap.c24
-rw-r--r--arch/unicore32/kernel/signal.c49
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/boot/compressed/eboot.c87
-rw-r--r--arch/x86/boot/compressed/eboot.h6
-rw-r--r--arch/x86/boot/header.S42
-rw-r--r--arch/x86/boot/tools/build.c172
-rw-r--r--arch/x86/crypto/aesni-intel_asm.S6
-rw-r--r--arch/x86/ia32/ia32_signal.c2
-rw-r--r--arch/x86/include/asm/acpi.h7
-rw-r--r--arch/x86/include/asm/bitops.h2
-rw-r--r--arch/x86/include/asm/ftrace.h2
-rw-r--r--arch/x86/include/asm/nmi.h14
-rw-r--r--arch/x86/include/asm/pgtable-3level.h30
-rw-r--r--arch/x86/include/asm/posix_types_32.h3
-rw-r--r--arch/x86/include/asm/sighandling.h2
-rw-r--r--arch/x86/include/asm/thread_info.h18
-rw-r--r--arch/x86/include/asm/uaccess.h12
-rw-r--r--arch/x86/include/asm/uv/uv_bau.h1
-rw-r--r--arch/x86/kernel/aperture_64.c6
-rw-r--r--arch/x86/kernel/apic/io_apic.c4
-rw-r--r--arch/x86/kernel/cpu/common.c8
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c37
-rw-r--r--arch/x86/kernel/cpu/mtrr/cleanup.c2
-rw-r--r--arch/x86/kernel/cpu/perf_event.c11
-rw-r--r--arch/x86/kernel/cpu/perf_event.h2
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c145
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_ds.c9
-rw-r--r--arch/x86/kernel/entry_32.S13
-rw-r--r--arch/x86/kernel/entry_64.S44
-rw-r--r--arch/x86/kernel/ftrace.c102
-rw-r--r--arch/x86/kernel/hpet.c2
-rw-r--r--arch/x86/kernel/kvmclock.c5
-rw-r--r--arch/x86/kernel/nmi.c6
-rw-r--r--arch/x86/kernel/nmi_selftest.c4
-rw-r--r--arch/x86/kernel/pci-dma.c3
-rw-r--r--arch/x86/kernel/ptrace.c6
-rw-r--r--arch/x86/kernel/reboot.c6
-rw-r--r--arch/x86/kernel/signal.c62
-rw-r--r--arch/x86/kernel/smpboot.c26
-rw-r--r--arch/x86/kernel/traps.c8
-rw-r--r--arch/x86/kvm/mmu.c3
-rw-r--r--arch/x86/lib/usercopy.c4
-rw-r--r--arch/x86/lib/x86-opcode-map.txt8
-rw-r--r--arch/x86/mm/init.c3
-rw-r--r--arch/x86/mm/ioremap.c4
-rw-r--r--arch/x86/mm/pageattr.c2
-rw-r--r--arch/x86/mm/pat.c56
-rw-r--r--arch/x86/mm/srat.c2
-rw-r--r--arch/x86/platform/mrst/early_printk_mrst.c13
-rw-r--r--arch/x86/platform/mrst/mrst.c2
-rw-r--r--arch/x86/platform/uv/tlb_uv.c1
-rw-r--r--arch/x86/syscalls/syscall_32.tbl1
-rw-r--r--arch/x86/syscalls/syscall_64.tbl2
-rw-r--r--arch/x86/tools/gen-insn-attr-x86.awk14
-rw-r--r--arch/x86/um/signal.c2
-rw-r--r--arch/x86/um/sys_call_table_32.c4
-rw-r--r--arch/x86/xen/enlighten.c11
-rw-r--r--arch/x86/xen/p2m.c36
-rw-r--r--arch/x86/xen/setup.c3
-rw-r--r--arch/xtensa/Makefile4
-rw-r--r--arch/xtensa/include/asm/syscall.h4
-rw-r--r--arch/xtensa/kernel/signal.c26
-rw-r--r--arch/xtensa/kernel/vmlinux.lds.S3
-rw-r--r--arch/xtensa/mm/init.c18
-rw-r--r--block/blk-ioc.c6
-rw-r--r--drivers/acpi/Kconfig2
-rw-r--r--drivers/acpi/battery.c10
-rw-r--r--drivers/acpi/bus.c88
-rw-r--r--drivers/acpi/power.c2
-rw-r--r--drivers/acpi/processor_perflib.c30
-rw-r--r--drivers/acpi/scan.c1
-rw-r--r--drivers/acpi/sleep.c57
-rw-r--r--drivers/acpi/video.c33
-rw-r--r--drivers/ata/pata_arasan_cf.c4
-rw-r--r--drivers/atm/solos-pci.c4
-rw-r--r--drivers/base/dd.c2
-rw-r--r--drivers/base/regmap/regmap.c10
-rw-r--r--drivers/base/soc.c2
-rw-r--r--drivers/bcma/driver_chipcommon_pmu.c4
-rw-r--r--drivers/bcma/driver_pci.c6
-rw-r--r--drivers/bcma/sprom.c4
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c276
-rw-r--r--drivers/block/mtip32xx/mtip32xx.h48
-rw-r--r--drivers/char/agp/intel-agp.c1
-rw-r--r--drivers/char/agp/intel-agp.h1
-rw-r--r--drivers/char/hw_random/atmel-rng.c9
-rw-r--r--drivers/clk/spear/clk-aux-synth.c2
-rw-r--r--drivers/clk/spear/clk-frac-synth.c2
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c2
-rw-r--r--drivers/clk/spear/clk-vco-pll.c2
-rw-r--r--drivers/clk/spear/clk.c2
-rw-r--r--drivers/clk/spear/clk.h2
-rw-r--r--drivers/clk/spear/spear1310_clock.c2
-rw-r--r--drivers/clk/spear/spear1340_clock.c2
-rw-r--r--drivers/clk/spear/spear3xx_clock.c2
-rw-r--r--drivers/clk/spear/spear6xx_clock.c2
-rw-r--r--drivers/clocksource/Makefile1
-rw-r--r--drivers/clocksource/em_sti.c406
-rw-r--r--drivers/clocksource/sh_cmt.c26
-rw-r--r--drivers/clocksource/sh_mtu2.c6
-rw-r--r--drivers/clocksource/sh_tmu.c16
-rw-r--r--drivers/dma/dw_dmac.c2
-rw-r--r--drivers/dma/imx-sdma.c6
-rw-r--r--drivers/dma/pl330.c30
-rw-r--r--drivers/edac/edac_mc.c2
-rw-r--r--drivers/edac/i7core_edac.c15
-rw-r--r--drivers/edac/mce_amd.h2
-rw-r--r--drivers/edac/mpc85xx_edac.c3
-rw-r--r--drivers/edac/sb_edac.c10
-rw-r--r--drivers/extcon/extcon-max8997.c5
-rw-r--r--drivers/extcon/extcon_class.c2
-rw-r--r--drivers/extcon/extcon_gpio.c2
-rw-r--r--drivers/gpio/gpio-samsung.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c19
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_ttm.c5
-rw-r--r--drivers/gpu/drm/drm_edid.c13
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_encoder.c7
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c19
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.h4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c9
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c12
-rw-r--r--drivers/gpu/drm/i810/i810_dma.c4
-rw-r--r--drivers/gpu/drm/i915/i915_drv.c13
-rw-r--r--drivers/gpu/drm/i915/i915_drv.h3
-rw-r--r--drivers/gpu/drm/i915/i915_irq.c38
-rw-r--r--drivers/gpu/drm/i915/i915_reg.h43
-rw-r--r--drivers/gpu/drm/i915/intel_display.c21
-rw-r--r--drivers/gpu/drm/i915/intel_dp.c60
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c19
-rw-r--r--drivers/gpu/drm/radeon/atombios_crtc.c10
-rw-r--r--drivers/gpu/drm/radeon/atombios_encoders.c4
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c385
-rw-r--r--drivers/gpu/drm/radeon/evergreen_cs.c49
-rw-r--r--drivers/gpu/drm/radeon/evergreen_hdmi.c3
-rw-r--r--drivers/gpu/drm/radeon/evergreend.h12
-rw-r--r--drivers/gpu/drm/radeon/ni.c386
-rw-r--r--drivers/gpu/drm/radeon/nid.h11
-rw-r--r--drivers/gpu/drm/radeon/r600.c215
-rw-r--r--drivers/gpu/drm/radeon/r600_audio.c7
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c42
-rw-r--r--drivers/gpu/drm/radeon/r600_hdmi.c8
-rw-r--r--drivers/gpu/drm/radeon/r600d.h4
-rw-r--r--drivers/gpu/drm/radeon/radeon.h10
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c31
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c4
-rw-r--r--drivers/gpu/drm/radeon/radeon_gart.c19
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c2
-rw-r--r--drivers/gpu/drm/radeon/radeon_pm.c10
-rw-r--r--drivers/gpu/drm/radeon/radeon_prime.c10
-rw-r--r--drivers/gpu/drm/radeon/rs600.c12
-rw-r--r--drivers/gpu/drm/radeon/rs690.c12
-rw-r--r--drivers/gpu/drm/radeon/rv770.c297
-rw-r--r--drivers/gpu/drm/radeon/rv770d.h7
-rw-r--r--drivers/gpu/drm/radeon/si.c477
-rw-r--r--drivers/gpu/drm/radeon/si_reg.h72
-rw-r--r--drivers/gpu/drm/radeon/sid.h19
-rw-r--r--drivers/gpu/drm/sis/sis_drv.c2
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo.c14
-rw-r--r--drivers/gpu/drm/udl/udl_drv.c15
-rw-r--r--drivers/gpu/drm/via/via_map.c3
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c2
-rw-r--r--drivers/gpu/vga/vga_switcheroo.c27
-rw-r--r--drivers/hwmon/applesmc.c4
-rw-r--r--drivers/hwmon/coretemp.c33
-rw-r--r--drivers/hwmon/emc2103.c12
-rw-r--r--drivers/i2c/muxes/Kconfig12
-rw-r--r--drivers/i2c/muxes/Makefile1
-rw-r--r--drivers/i2c/muxes/i2c-mux-pinctrl.c279
-rw-r--r--drivers/ide/icside.c17
-rw-r--r--drivers/ide/ide-cs.c3
-rw-r--r--drivers/iio/Kconfig3
-rw-r--r--drivers/iio/industrialio-core.c16
-rw-r--r--drivers/infiniband/core/cma.c2
-rw-r--r--drivers/infiniband/hw/cxgb4/cm.c4
-rw-r--r--drivers/infiniband/hw/mlx4/main.c21
-rw-r--r--drivers/infiniband/hw/mlx4/mlx4_ib.h8
-rw-r--r--drivers/infiniband/hw/mlx4/qp.c21
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma.h2
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_abi.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_hw.c27
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_main.c64
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_sli.h5
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.c17
-rw-r--r--drivers/infiniband/hw/ocrdma/ocrdma_verbs.h1
-rw-r--r--drivers/iommu/amd_iommu.c71
-rw-r--r--drivers/iommu/amd_iommu_init.c13
-rw-r--r--drivers/iommu/amd_iommu_types.h3
-rw-r--r--drivers/leds/Kconfig4
-rw-r--r--drivers/leds/led-class.c2
-rw-r--r--drivers/leds/led-core.c7
-rw-r--r--drivers/md/dm-mpath.c47
-rw-r--r--drivers/md/dm-thin-metadata.c136
-rw-r--r--drivers/md/dm-thin-metadata.h13
-rw-r--r--drivers/md/dm-thin.c203
-rw-r--r--drivers/md/persistent-data/dm-transaction-manager.c2
-rw-r--r--drivers/md/raid1.c4
-rw-r--r--drivers/md/raid10.c4
-rw-r--r--drivers/media/video/pms.c1
-rw-r--r--drivers/message/fusion/mptbase.c13
-rw-r--r--drivers/message/fusion/mptctl.c4
-rw-r--r--drivers/mfd/db8500-prcmu.c1
-rw-r--r--drivers/mfd/stmpe-i2c.c2
-rw-r--r--drivers/mfd/stmpe-spi.c4
-rw-r--r--drivers/misc/mei/interrupt.c2
-rw-r--r--drivers/misc/mei/main.c9
-rw-r--r--drivers/misc/mei/wd.c2
-rw-r--r--drivers/mmc/core/mmc.c2
-rw-r--r--drivers/mmc/core/sd.c6
-rw-r--r--drivers/mmc/core/sdio.c6
-rw-r--r--drivers/mmc/host/atmel-mci-regs.h14
-rw-r--r--drivers/mmc/host/atmel-mci.c12
-rw-r--r--drivers/mmc/host/dw_mmc.c36
-rw-r--r--drivers/mmc/host/mmci.c19
-rw-r--r--drivers/mmc/host/mxs-mmc.c2
-rw-r--r--drivers/mmc/host/omap.c18
-rw-r--r--drivers/mmc/host/sdhci-s3c.c2
-rw-r--r--drivers/mmc/host/sdhci-spear.c4
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mtd/Kconfig2
-rw-r--r--drivers/mtd/bcm63xxpart.c41
-rw-r--r--drivers/mtd/chips/cfi_cmdset_0002.c18
-rw-r--r--drivers/mtd/cmdlinepart.c2
-rw-r--r--drivers/mtd/devices/block2mtd.c7
-rw-r--r--drivers/mtd/devices/docg3.c40
-rw-r--r--drivers/mtd/devices/m25p80.c5
-rw-r--r--drivers/mtd/devices/spear_smi.c14
-rw-r--r--drivers/mtd/lpddr/qinfo_probe.c2
-rw-r--r--drivers/mtd/maps/Kconfig2
-rw-r--r--drivers/mtd/maps/intel_vr_nor.c13
-rw-r--r--drivers/mtd/maps/pci.c13
-rw-r--r--drivers/mtd/maps/scb2_flash.c15
-rw-r--r--drivers/mtd/maps/wr_sbc82xx_flash.c2
-rw-r--r--drivers/mtd/mtdcore.c57
-rw-r--r--drivers/mtd/mtdoops.c22
-rw-r--r--drivers/mtd/mtdpart.c14
-rw-r--r--drivers/mtd/nand/Kconfig42
-rw-r--r--drivers/mtd/nand/alauda.c4
-rw-r--r--drivers/mtd/nand/atmel_nand.c14
-rw-r--r--drivers/mtd/nand/au1550nd.c2
-rw-r--r--drivers/mtd/nand/bcm_umi_bch.c14
-rw-r--r--drivers/mtd/nand/bcm_umi_nand.c9
-rw-r--r--drivers/mtd/nand/bf5xx_nand.c4
-rw-r--r--drivers/mtd/nand/cafe_nand.c35
-rw-r--r--drivers/mtd/nand/cs553x_nand.c1
-rw-r--r--drivers/mtd/nand/denali.c38
-rw-r--r--drivers/mtd/nand/docg4.c22
-rw-r--r--drivers/mtd/nand/fsl_elbc_nand.c37
-rw-r--r--drivers/mtd/nand/fsl_ifc_nand.c47
-rw-r--r--drivers/mtd/nand/fsmc_nand.c26
-rw-r--r--drivers/mtd/nand/gpmi-nand/bch-regs.h42
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-lib.c27
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.c184
-rw-r--r--drivers/mtd/nand/gpmi-nand/gpmi-nand.h6
-rw-r--r--drivers/mtd/nand/h1910.c1
-rw-r--r--drivers/mtd/nand/jz4740_nand.c6
-rw-r--r--drivers/mtd/nand/mpc5121_nfc.c1
-rw-r--r--drivers/mtd/nand/mxc_nand.c636
-rw-r--r--drivers/mtd/nand/nand_base.c233
-rw-r--r--drivers/mtd/nand/nand_bbt.c1
-rw-r--r--drivers/mtd/nand/nand_ids.c6
-rw-r--r--drivers/mtd/nand/nandsim.c28
-rw-r--r--drivers/mtd/nand/omap2.c253
-rw-r--r--drivers/mtd/nand/pasemi_nand.c1
-rw-r--r--drivers/mtd/nand/plat_nand.c28
-rw-r--r--drivers/mtd/nand/pxa3xx_nand.c6
-rw-r--r--drivers/mtd/nand/r852.c22
-rw-r--r--drivers/mtd/nand/sh_flctl.c8
-rw-r--r--drivers/mtd/nand/sm_common.c9
-rw-r--r--drivers/mtd/onenand/onenand_base.c6
-rw-r--r--drivers/mtd/ubi/debug.c12
-rw-r--r--drivers/mtd/ubi/wl.c17
-rw-r--r--drivers/net/bonding/bond_main.c9
-rw-r--r--drivers/net/bonding/bond_sysfs.c8
-rw-r--r--drivers/net/can/c_can/c_can.c16
-rw-r--r--drivers/net/can/c_can/c_can.h1
-rw-r--r--drivers/net/can/cc770/cc770_platform.c2
-rw-r--r--drivers/net/dummy.c4
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x.h15
-rw-r--r--drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c27
-rw-r--r--drivers/net/ethernet/broadcom/tg3.c3
-rw-r--r--drivers/net/ethernet/emulex/benet/be_main.c5
-rw-r--r--drivers/net/ethernet/freescale/fec_mpc52xx.c2
-rw-r--r--drivers/net/ethernet/intel/e1000/e1000_main.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/ethtool.c6
-rw-r--r--drivers/net/ethernet/intel/e1000e/ich8lan.c18
-rw-r--r--drivers/net/ethernet/intel/e1000e/mac.c2
-rw-r--r--drivers/net/ethernet/intel/e1000e/netdev.c4
-rw-r--r--drivers/net/ethernet/intel/e1000e/phy.c8
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c22
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c15
-rw-r--r--drivers/net/ethernet/marvell/sky2.c10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/cmd.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/en_main.c12
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/eq.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/fw.c51
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c40
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h10
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/port.c4
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/profile.c9
-rw-r--r--drivers/net/ethernet/nxp/lpc_eth.c11
-rw-r--r--drivers/net/ethernet/rdc/r6040.c15
-rw-r--r--drivers/net/ethernet/realtek/8139cp.c24
-rw-r--r--drivers/net/ethernet/realtek/8139too.c2
-rw-r--r--drivers/net/ethernet/realtek/r8169.c9
-rw-r--r--drivers/net/ethernet/renesas/sh_eth.c8
-rw-r--r--drivers/net/ethernet/smsc/smsc911x.c7
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/Kconfig4
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac.h63
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_main.c35
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c29
-rw-r--r--drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c4
-rw-r--r--drivers/net/ethernet/sun/niu.c12
-rw-r--r--drivers/net/ethernet/tile/Kconfig2
-rw-r--r--drivers/net/ethernet/tile/Makefile4
-rw-r--r--drivers/net/ethernet/tile/tilegx.c1898
-rw-r--r--drivers/net/hyperv/hyperv_net.h1
-rw-r--r--drivers/net/hyperv/netvsc.c12
-rw-r--r--drivers/net/phy/icplus.c7
-rw-r--r--drivers/net/phy/mdio_bus.c2
-rw-r--r--drivers/net/usb/asix.c3
-rw-r--r--drivers/net/usb/mcs7830.c25
-rw-r--r--drivers/net/usb/qmi_wwan.c2
-rw-r--r--drivers/net/usb/sierra_net.c14
-rw-r--r--drivers/net/virtio_net.c24
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c19
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.c2
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9003_eeprom.h3
-rw-r--r--drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h178
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c3
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c7
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c16
-rw-r--r--drivers/net/wireless/b43/b43.h4
-rw-r--r--drivers/net/wireless/b43/main.c21
-rw-r--r--drivers/net/wireless/b43legacy/main.c2
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c4
-rw-r--r--drivers/net/wireless/brcm80211/brcmfmac/usb.c5
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2100.c20
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig8
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-2000.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c29
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-sta.c4
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-drv.c23
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-mac80211.c3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.c288
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-phy-db.h129
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-prph.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-trans-pcie.c9
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c22
-rw-r--r--drivers/net/wireless/mwifiex/cfg80211.c13
-rw-r--r--drivers/net/wireless/mwifiex/fw.h6
-rw-r--r--drivers/net/wireless/mwifiex/uap_cmd.c10
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h3
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00mac.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00queue.c13
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187/leds.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/sdio.c2
-rw-r--r--drivers/net/wireless/ti/wl1251/spi.c3
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.c2
-rw-r--r--drivers/net/wireless/ti/wlcore/acx.h4
-rw-r--r--drivers/net/wireless/ti/wlcore/rx.c2
-rw-r--r--drivers/net/xen-netback/netback.c3
-rw-r--r--drivers/nfc/pn544_hci.c2
-rw-r--r--drivers/pci/pci.c5
-rw-r--r--drivers/pci/quirks.c26
-rw-r--r--drivers/pinctrl/core.c2
-rw-r--r--drivers/pinctrl/pinctrl-imx.c34
-rw-r--r--drivers/pinctrl/pinctrl-mxs.c13
-rw-r--r--drivers/pinctrl/pinctrl-nomadik.c45
-rw-r--r--drivers/pinctrl/pinctrl-sirf.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear300.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear310.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear320.c4
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.h2
-rw-r--r--drivers/platform/x86/acer-wmi.c24
-rw-r--r--drivers/platform/x86/acerhdf.c2
-rw-r--r--drivers/platform/x86/apple-gmux.c4
-rw-r--r--drivers/platform/x86/dell-laptop.c308
-rw-r--r--drivers/platform/x86/fujitsu-tablet.c34
-rw-r--r--drivers/platform/x86/hdaps.c2
-rw-r--r--drivers/platform/x86/hp-wmi.c10
-rw-r--r--drivers/platform/x86/ideapad-laptop.c9
-rw-r--r--drivers/platform/x86/sony-laptop.c1498
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c2
-rw-r--r--drivers/platform/x86/toshiba_acpi.c141
-rw-r--r--drivers/platform/x86/xo1-rfkill.c13
-rw-r--r--drivers/power/Kconfig10
-rw-r--r--drivers/power/ab8500_btemp.c12
-rw-r--r--drivers/power/ab8500_charger.c13
-rw-r--r--drivers/power/ab8500_fg.c12
-rw-r--r--drivers/power/charger-manager.c392
-rw-r--r--drivers/power/ds2781_battery.c20
-rw-r--r--drivers/power/isp1704_charger.c2
-rw-r--r--drivers/power/max17042_battery.c148
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/power/sbs-battery.c2
-rw-r--r--drivers/power/smb347-charger.c712
-rw-r--r--drivers/rapidio/Kconfig14
-rw-r--r--drivers/rapidio/devices/Makefile3
-rw-r--r--drivers/rapidio/devices/tsi721.c211
-rw-r--r--drivers/rapidio/devices/tsi721.h105
-rw-r--r--drivers/rapidio/devices/tsi721_dma.c823
-rw-r--r--drivers/rapidio/rio.c81
-rw-r--r--drivers/regulator/ab8500.c22
-rw-r--r--drivers/regulator/anatop-regulator.c2
-rw-r--r--drivers/regulator/core.c3
-rw-r--r--drivers/regulator/db8500-prcmu.c40
-rw-r--r--drivers/regulator/gpio-regulator.c16
-rw-r--r--drivers/regulator/max8649.c1
-rw-r--r--drivers/regulator/palmas-regulator.c7
-rw-r--r--drivers/regulator/s5m8767.c2
-rw-r--r--drivers/remoteproc/omap_remoteproc.c2
-rw-r--r--drivers/remoteproc/remoteproc_core.c10
-rw-r--r--drivers/rtc/rtc-cmos.c9
-rw-r--r--drivers/s390/block/dasd_int.h4
-rw-r--r--drivers/s390/char/sclp_sdias.c2
-rw-r--r--drivers/scsi/be2iscsi/be_mgmt.c5
-rw-r--r--drivers/scsi/bfa/bfad_attr.c17
-rw-r--r--drivers/scsi/bfa/bfad_im.c2
-rw-r--r--drivers/scsi/bfa/bfad_im.h1
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc.h9
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_els.c18
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_fcoe.c173
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_hwi.c39
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_io.c122
-rw-r--r--drivers/scsi/bnx2fc/bnx2fc_tgt.c21
-rw-r--r--drivers/scsi/fcoe/Makefile2
-rw-r--r--drivers/scsi/fcoe/fcoe.c200
-rw-r--r--drivers/scsi/fcoe/fcoe.h8
-rw-r--r--drivers/scsi/fcoe/fcoe_ctlr.c159
-rw-r--r--drivers/scsi/fcoe/fcoe_sysfs.c832
-rw-r--r--drivers/scsi/fcoe/fcoe_transport.c13
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_base.c2
-rw-r--r--drivers/scsi/qla2xxx/Kconfig9
-rw-r--r--drivers/scsi/qla2xxx/Makefile3
-rw-r--r--drivers/scsi/qla2xxx/qla_attr.c22
-rw-r--r--drivers/scsi/qla2xxx/qla_bsg.c12
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.c81
-rw-r--r--drivers/scsi/qla2xxx/qla_dbg.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_def.h78
-rw-r--r--drivers/scsi/qla2xxx/qla_gbl.h9
-rw-r--r--drivers/scsi/qla2xxx/qla_gs.c4
-rw-r--r--drivers/scsi/qla2xxx/qla_init.c199
-rw-r--r--drivers/scsi/qla2xxx/qla_iocb.c90
-rw-r--r--drivers/scsi/qla2xxx/qla_isr.c94
-rw-r--r--drivers/scsi/qla2xxx/qla_mbx.c615
-rw-r--r--drivers/scsi/qla2xxx/qla_mid.c18
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.c66
-rw-r--r--drivers/scsi/qla2xxx/qla_nx.h14
-rw-r--r--drivers/scsi/qla2xxx/qla_os.c173
-rw-r--r--drivers/scsi/qla2xxx/qla_target.c4972
-rw-r--r--drivers/scsi/qla2xxx/qla_target.h1004
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.c1919
-rw-r--r--drivers/scsi/qla2xxx/tcm_qla2xxx.h82
-rw-r--r--drivers/scsi/qla4xxx/ql4_attr.c134
-rw-r--r--drivers/scsi/qla4xxx/ql4_def.h22
-rw-r--r--drivers/scsi/qla4xxx/ql4_fw.h28
-rw-r--r--drivers/scsi/qla4xxx/ql4_glbl.h8
-rw-r--r--drivers/scsi/qla4xxx/ql4_init.c95
-rw-r--r--drivers/scsi/qla4xxx/ql4_mbx.c111
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.c738
-rw-r--r--drivers/scsi/qla4xxx/ql4_nx.h192
-rw-r--r--drivers/scsi/qla4xxx/ql4_os.c78
-rw-r--r--drivers/scsi/qla4xxx/ql4_version.h2
-rw-r--r--drivers/scsi/scsi.c4
-rw-r--r--drivers/scsi/scsi_lib.c11
-rw-r--r--drivers/scsi/scsi_pm.c5
-rw-r--r--drivers/scsi/scsi_scan.c7
-rw-r--r--drivers/scsi/scsi_wait_scan.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c5
-rw-r--r--drivers/spi/spi-omap2-mcspi.c3
-rw-r--r--drivers/staging/comedi/drivers.c5
-rw-r--r--drivers/staging/gdm72xx/netlink_k.c2
-rw-r--r--drivers/staging/iio/Documentation/device.txt2
-rw-r--r--drivers/staging/iio/adc/Kconfig1
-rw-r--r--drivers/staging/iio/adc/ad7606_core.c3
-rw-r--r--drivers/staging/omapdrm/omap_fbdev.c10
-rw-r--r--drivers/staging/ramster/zcache-main.c8
-rw-r--r--drivers/staging/rtl8712/usb_intf.c2
-rw-r--r--drivers/staging/zcache/zcache-main.c10
-rw-r--r--drivers/target/sbp/sbp_target.c8
-rw-r--r--drivers/target/target_core_alua.c5
-rw-r--r--drivers/target/target_core_file.c70
-rw-r--r--drivers/target/target_core_file.h1
-rw-r--r--drivers/target/target_core_transport.c8
-rw-r--r--drivers/tty/amiserial.c14
-rw-r--r--drivers/tty/cyclades.c2
-rw-r--r--drivers/tty/hvc/hvc_xen.c31
-rw-r--r--drivers/tty/n_r3964.c11
-rw-r--r--drivers/tty/pty.c25
-rw-r--r--drivers/tty/serial/8250/8250.c2
-rw-r--r--drivers/tty/serial/amba-pl011.c45
-rw-r--r--drivers/tty/serial/crisv10.c8
-rw-r--r--drivers/tty/serial/serial_txx9.c2
-rw-r--r--drivers/tty/serial/sh-sci.c38
-rw-r--r--drivers/tty/synclink.c4
-rw-r--r--drivers/tty/synclink_gt.c4
-rw-r--r--drivers/tty/synclinkmp.c4
-rw-r--r--drivers/tty/tty_io.c67
-rw-r--r--drivers/tty/tty_ldisc.c67
-rw-r--r--drivers/tty/tty_mutex.c60
-rw-r--r--drivers/tty/tty_port.c6
-rw-r--r--drivers/usb/class/cdc-acm.c8
-rw-r--r--drivers/usb/class/cdc-wdm.c9
-rw-r--r--drivers/usb/core/hcd-pci.c9
-rw-r--r--drivers/usb/core/hub.c2
-rw-r--r--drivers/usb/core/message.c3
-rw-r--r--drivers/usb/gadget/atmel_usba_udc.c6
-rw-r--r--drivers/usb/gadget/fsl_qe_udc.c2
-rw-r--r--drivers/usb/gadget/fsl_udc_core.c4
-rw-r--r--drivers/usb/gadget/fsl_usb2_udc.h4
-rw-r--r--drivers/usb/gadget/goku_udc.c2
-rw-r--r--drivers/usb/gadget/mv_udc_core.c2
-rw-r--r--drivers/usb/gadget/omap_udc.c2
-rw-r--r--drivers/usb/gadget/pxa25x_udc.c2
-rw-r--r--drivers/usb/gadget/s3c-hsudc.c2
-rw-r--r--drivers/usb/gadget/s3c2410_udc.c2
-rw-r--r--drivers/usb/host/ehci-hcd.c2
-rw-r--r--drivers/usb/host/ehci-omap.c168
-rw-r--r--drivers/usb/host/ehci-pci.c8
-rw-r--r--drivers/usb/host/ehci-sh.c3
-rw-r--r--drivers/usb/host/ehci-xilinx-of.c4
-rw-r--r--drivers/usb/host/ohci-hub.c2
-rw-r--r--drivers/usb/host/xhci-mem.c74
-rw-r--r--drivers/usb/host/xhci.c10
-rw-r--r--drivers/usb/musb/davinci.c1
-rw-r--r--drivers/usb/musb/davinci.h4
-rw-r--r--drivers/usb/musb/musb_gadget.c1
-rw-r--r--drivers/usb/serial/cp210x.c1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h1
-rw-r--r--drivers/usb/serial/generic.c10
-rw-r--r--drivers/usb/serial/mct_u232.c13
-rw-r--r--drivers/usb/serial/mos7840.c2
-rw-r--r--drivers/usb/serial/option.c96
-rw-r--r--drivers/usb/serial/qcserial.c6
-rw-r--r--drivers/usb/serial/sierra.c4
-rw-r--r--drivers/usb/serial/usb-serial.c12
-rw-r--r--drivers/usb/storage/unusual_devs.h7
-rw-r--r--drivers/video/Kconfig35
-rw-r--r--drivers/video/Makefile3
-rw-r--r--drivers/video/auo_k1900fb.c198
-rw-r--r--drivers/video/auo_k1901fb.c251
-rw-r--r--drivers/video/auo_k190x.c1046
-rw-r--r--drivers/video/auo_k190x.h129
-rw-r--r--drivers/video/backlight/Kconfig2
-rw-r--r--drivers/video/backlight/ili9320.c2
-rw-r--r--drivers/video/bfin_adv7393fb.c49
-rw-r--r--drivers/video/broadsheetfb.c2
-rw-r--r--drivers/video/cobalt_lcdfb.c45
-rw-r--r--drivers/video/console/Kconfig14
-rw-r--r--drivers/video/ep93xx-fb.c32
-rw-r--r--drivers/video/exynos/exynos_dp_core.c69
-rw-r--r--drivers/video/exynos/exynos_dp_core.h3
-rw-r--r--drivers/video/exynos/exynos_dp_reg.c45
-rw-r--r--drivers/video/exynos/exynos_dp_reg.h29
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi.c49
-rw-r--r--drivers/video/exynos/exynos_mipi_dsi_common.c36
-rw-r--r--drivers/video/exynos/s6e8ax0.c15
-rw-r--r--drivers/video/fb_defio.c6
-rw-r--r--drivers/video/fbsysfs.c2
-rw-r--r--drivers/video/fsl-diu-fb.c1
-rw-r--r--drivers/video/intelfb/intelfbdrv.c2
-rw-r--r--drivers/video/mb862xx/mb862xx-i2c.c2
-rw-r--r--drivers/video/mb862xx/mb862xxfbdrv.c2
-rw-r--r--drivers/video/mbx/mbxfb.c4
-rw-r--r--drivers/video/mxsfb.c13
-rw-r--r--drivers/video/omap/Kconfig8
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c7
-rw-r--r--drivers/video/omap2/displays/panel-generic-dpi.c107
-rw-r--r--drivers/video/omap2/displays/panel-n8x0.c8
-rw-r--r--drivers/video/omap2/displays/panel-taal.c90
-rw-r--r--drivers/video/omap2/displays/panel-tfp410.c76
-rw-r--r--drivers/video/omap2/displays/panel-tpo-td043mtea1.c22
-rw-r--r--drivers/video/omap2/dss/Kconfig13
-rw-r--r--drivers/video/omap2/dss/apply.c134
-rw-r--r--drivers/video/omap2/dss/core.c254
-rw-r--r--drivers/video/omap2/dss/dispc.c747
-rw-r--r--drivers/video/omap2/dss/dispc.h72
-rw-r--r--drivers/video/omap2/dss/display.c49
-rw-r--r--drivers/video/omap2/dss/dpi.c75
-rw-r--r--drivers/video/omap2/dss/dsi.c404
-rw-r--r--drivers/video/omap2/dss/dss.c67
-rw-r--r--drivers/video/omap2/dss/dss.h151
-rw-r--r--drivers/video/omap2/dss/dss_features.c30
-rw-r--r--drivers/video/omap2/dss/dss_features.h5
-rw-r--r--drivers/video/omap2/dss/hdmi.c443
-rw-r--r--drivers/video/omap2/dss/hdmi_panel.c236
-rw-r--r--drivers/video/omap2/dss/manager.c19
-rw-r--r--drivers/video/omap2/dss/overlay.c16
-rw-r--r--drivers/video/omap2/dss/rfbi.c84
-rw-r--r--drivers/video/omap2/dss/sdi.c63
-rw-r--r--drivers/video/omap2/dss/ti_hdmi.h32
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c480
-rw-r--r--drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h161
-rw-r--r--drivers/video/omap2/dss/venc.c133
-rw-r--r--drivers/video/omap2/omapfb/omapfb-ioctl.c17
-rw-r--r--drivers/video/omap2/omapfb/omapfb-main.c12
-rw-r--r--drivers/video/omap2/omapfb/omapfb.h1
-rw-r--r--drivers/video/omap2/vrfb.c4
-rw-r--r--drivers/video/pxa3xx-gcu.c5
-rw-r--r--drivers/video/s3c-fb.c160
-rw-r--r--drivers/video/savage/savagefb_driver.c10
-rw-r--r--drivers/video/sh_mobile_hdmi.c219
-rw-r--r--drivers/video/sis/init.h45
-rw-r--r--drivers/video/sis/sis_main.c41
-rw-r--r--drivers/video/skeletonfb.c2
-rw-r--r--drivers/video/smscufx.c4
-rw-r--r--drivers/video/udlfb.c2
-rw-r--r--drivers/video/via/viafbdev.c34
-rw-r--r--drivers/watchdog/sp805_wdt.c4
-rw-r--r--drivers/xen/events.c9
-rw-r--r--drivers/xen/pci.c2
-rw-r--r--drivers/xen/tmem.c8
-rw-r--r--fs/9p/vfs_inode_dotl.c24
-rw-r--r--fs/affs/affs.h8
-rw-r--r--fs/aio.c10
-rw-r--r--fs/attr.c5
-rw-r--r--fs/binfmt_elf.c8
-rw-r--r--fs/binfmt_flat.c8
-rw-r--r--fs/btrfs/acl.c4
-rw-r--r--fs/btrfs/backref.c568
-rw-r--r--fs/btrfs/backref.h3
-rw-r--r--fs/btrfs/btrfs_inode.h51
-rw-r--r--fs/btrfs/check-integrity.c600
-rw-r--r--fs/btrfs/ctree.c909
-rw-r--r--fs/btrfs/ctree.h89
-rw-r--r--fs/btrfs/delayed-inode.c26
-rw-r--r--fs/btrfs/delayed-inode.h3
-rw-r--r--fs/btrfs/delayed-ref.c10
-rw-r--r--fs/btrfs/delayed-ref.h24
-rw-r--r--fs/btrfs/disk-io.c132
-rw-r--r--fs/btrfs/disk-io.h1
-rw-r--r--fs/btrfs/export.c15
-rw-r--r--fs/btrfs/extent-tree.c23
-rw-r--r--fs/btrfs/extent_io.c175
-rw-r--r--fs/btrfs/extent_io.h8
-rw-r--r--fs/btrfs/file.c78
-rw-r--r--fs/btrfs/free-space-cache.c52
-rw-r--r--fs/btrfs/inode.c390
-rw-r--r--fs/btrfs/ioctl.c168
-rw-r--r--fs/btrfs/ioctl.h33
-rw-r--r--fs/btrfs/ordered-data.c175
-rw-r--r--fs/btrfs/ordered-data.h13
-rw-r--r--fs/btrfs/print-tree.c3
-rw-r--r--fs/btrfs/rcu-string.h56
-rw-r--r--fs/btrfs/reada.c5
-rw-r--r--fs/btrfs/scrub.c95
-rw-r--r--fs/btrfs/super.c150
-rw-r--r--fs/btrfs/transaction.c73
-rw-r--r--fs/btrfs/tree-log.c35
-rw-r--r--fs/btrfs/ulist.c38
-rw-r--r--fs/btrfs/ulist.h15
-rw-r--r--fs/btrfs/volumes.c374
-rw-r--r--fs/btrfs/volumes.h54
-rw-r--r--fs/btrfs/xattr.c1
-rw-r--r--fs/buffer.c2
-rw-r--r--fs/ceph/addr.c21
-rw-r--r--fs/ceph/export.c32
-rw-r--r--fs/ceph/snap.c2
-rw-r--r--fs/cifs/cifsglob.h7
-rw-r--r--fs/cifs/cifsproto.h1
-rw-r--r--fs/cifs/cifssmb.c8
-rw-r--r--fs/cifs/connect.c8
-rw-r--r--fs/cifs/file.c106
-rw-r--r--fs/cifs/misc.c89
-rw-r--r--fs/cifs/smb1ops.c89
-rw-r--r--fs/cifs/transport.c2
-rw-r--r--fs/compat.c43
-rw-r--r--fs/dcache.c4
-rw-r--r--fs/direct-io.c44
-rw-r--r--fs/ecryptfs/inode.c48
-rw-r--r--fs/eventfd.c12
-rw-r--r--fs/eventpoll.c4
-rw-r--r--fs/exec.c6
-rw-r--r--fs/exofs/sys.c2
-rw-r--r--fs/exportfs/expfs.c33
-rw-r--r--fs/ext4/Kconfig2
-rw-r--r--fs/ext4/balloc.c49
-rw-r--r--fs/ext4/bitmap.c83
-rw-r--r--fs/ext4/dir.c12
-rw-r--r--fs/ext4/ext4.h130
-rw-r--r--fs/ext4/ext4_extents.h24
-rw-r--r--fs/ext4/ext4_jbd2.c9
-rw-r--r--fs/ext4/ext4_jbd2.h7
-rw-r--r--fs/ext4/extents.c91
-rw-r--r--fs/ext4/file.c2
-rw-r--r--fs/ext4/ialloc.c81
-rw-r--r--fs/ext4/inode.c119
-rw-r--r--fs/ext4/ioctl.c20
-rw-r--r--fs/ext4/mballoc.c30
-rw-r--r--fs/ext4/mmp.c44
-rw-r--r--fs/ext4/namei.c445
-rw-r--r--fs/ext4/resize.c71
-rw-r--r--fs/ext4/super.c253
-rw-r--r--fs/ext4/xattr.c92
-rw-r--r--fs/ext4/xattr.h4
-rw-r--r--fs/fat/dir.c4
-rw-r--r--fs/fat/fat.h6
-rw-r--r--fs/fat/fatent.c21
-rw-r--r--fs/fat/inode.c63
-rw-r--r--fs/fcntl.c42
-rw-r--r--fs/file_table.c17
-rw-r--r--fs/fs-writeback.c1
-rw-r--r--fs/fuse/control.c10
-rw-r--r--fs/fuse/dir.c11
-rw-r--r--fs/fuse/file.c44
-rw-r--r--fs/fuse/fuse_i.h6
-rw-r--r--fs/fuse/inode.c34
-rw-r--r--fs/gfs2/export.c17
-rw-r--r--fs/hfsplus/ioctl.c9
-rw-r--r--fs/hfsplus/wrapper.c2
-rw-r--r--fs/hpfs/alloc.c14
-rw-r--r--fs/hpfs/anode.c43
-rw-r--r--fs/hpfs/buffer.c1
-rw-r--r--fs/hpfs/dir.c2
-rw-r--r--fs/hpfs/dnode.c10
-rw-r--r--fs/hpfs/ea.c60
-rw-r--r--fs/hpfs/hpfs.h289
-rw-r--r--fs/hpfs/hpfs_fn.h23
-rw-r--r--fs/hpfs/inode.c2
-rw-r--r--fs/hpfs/map.c20
-rw-r--r--fs/hpfs/namei.c2
-rw-r--r--fs/hpfs/super.c4
-rw-r--r--fs/inode.c171
-rw-r--r--fs/internal.h3
-rw-r--r--fs/isofs/export.c13
-rw-r--r--fs/jbd2/Kconfig2
-rw-r--r--fs/jbd2/commit.c70
-rw-r--r--fs/jbd2/journal.c132
-rw-r--r--fs/jbd2/recovery.c126
-rw-r--r--fs/jbd2/revoke.c27
-rw-r--r--fs/jbd2/transaction.c4
-rw-r--r--fs/jffs2/jffs2_fs_sb.h11
-rw-r--r--fs/jffs2/nodemgmt.c42
-rw-r--r--fs/jffs2/os-linux.h7
-rw-r--r--fs/jffs2/readinode.c19
-rw-r--r--fs/jffs2/super.c38
-rw-r--r--fs/jffs2/wbuf.c55
-rw-r--r--fs/jffs2/xattr.c23
-rw-r--r--fs/jffs2/xattr.h2
-rw-r--r--fs/lockd/clntlock.c13
-rw-r--r--fs/lockd/svc.c148
-rw-r--r--fs/locks.c5
-rw-r--r--fs/namei.c177
-rw-r--r--fs/namespace.c142
-rw-r--r--fs/ncpfs/file.c6
-rw-r--r--fs/ncpfs/ncp_fs_sb.h10
-rw-r--r--fs/nfs/callback.c18
-rw-r--r--fs/nfs/callback_xdr.c8
-rw-r--r--fs/nfs/client.c3
-rw-r--r--fs/nfs/dir.c56
-rw-r--r--fs/nfs/direct.c24
-rw-r--r--fs/nfs/file.c77
-rw-r--r--fs/nfs/idmap.c13
-rw-r--r--fs/nfs/inode.c7
-rw-r--r--fs/nfs/internal.h4
-rw-r--r--fs/nfs/nfs4_fs.h2
-rw-r--r--fs/nfs/nfs4proc.c42
-rw-r--r--fs/nfs/nfs4state.c22
-rw-r--r--fs/nfs/nfs4xdr.c15
-rw-r--r--fs/nfs/pnfs.c13
-rw-r--r--fs/nfs/pnfs.h2
-rw-r--r--fs/nfs/proc.c2
-rw-r--r--fs/nfs/super.c3
-rw-r--r--fs/nfs/write.c7
-rw-r--r--fs/nfsd/auth.c2
-rw-r--r--fs/nfsd/export.c181
-rw-r--r--fs/nfsd/fault_inject.c1
-rw-r--r--fs/nfsd/idmap.h8
-rw-r--r--fs/nfsd/netns.h6
-rw-r--r--fs/nfsd/nfs4callback.c5
-rw-r--r--fs/nfsd/nfs4idmap.c113
-rw-r--r--fs/nfsd/nfs4recover.c4
-rw-r--r--fs/nfsd/nfs4state.c542
-rw-r--r--fs/nfsd/nfs4xdr.c62
-rw-r--r--fs/nfsd/nfsctl.c67
-rw-r--r--fs/nfsd/nfsfh.c2
-rw-r--r--fs/nfsd/nfssvc.c31
-rw-r--r--fs/nfsd/state.h1
-rw-r--r--fs/nfsd/vfs.c2
-rw-r--r--fs/nfsd/xdr4.h6
-rw-r--r--fs/nilfs2/file.c24
-rw-r--r--fs/nilfs2/gcinode.c2
-rw-r--r--fs/nilfs2/ioctl.c8
-rw-r--r--fs/nilfs2/namei.c22
-rw-r--r--fs/nilfs2/segment.c2
-rw-r--r--fs/nls/Kconfig157
-rw-r--r--fs/nls/Makefile11
-rw-r--r--fs/nls/mac-celtic.c602
-rw-r--r--fs/nls/mac-centeuro.c532
-rw-r--r--fs/nls/mac-croatian.c602
-rw-r--r--fs/nls/mac-cyrillic.c497
-rw-r--r--fs/nls/mac-gaelic.c567
-rw-r--r--fs/nls/mac-greek.c497
-rw-r--r--fs/nls/mac-iceland.c602
-rw-r--r--fs/nls/mac-inuit.c532
-rw-r--r--fs/nls/mac-roman.c637
-rw-r--r--fs/nls/mac-romanian.c602
-rw-r--r--fs/nls/mac-turkish.c602
-rw-r--r--fs/notify/fsnotify.c12
-rw-r--r--fs/ntfs/file.c4
-rw-r--r--fs/ocfs2/blockcheck.c42
-rw-r--r--fs/ocfs2/dlm/dlmast.c2
-rw-r--r--fs/ocfs2/dlm/dlmcommon.h6
-rw-r--r--fs/ocfs2/dlm/dlmdomain.c2
-rw-r--r--fs/ocfs2/export.c19
-rw-r--r--fs/ocfs2/inode.c13
-rw-r--r--fs/ocfs2/ioctl.c31
-rw-r--r--fs/ocfs2/move_extents.c6
-rw-r--r--fs/ocfs2/namei.c5
-rw-r--r--fs/ocfs2/symlink.c115
-rw-r--r--fs/ocfs2/symlink.h2
-rw-r--r--fs/open.c76
-rw-r--r--fs/pipe.c9
-rw-r--r--fs/pnode.c4
-rw-r--r--fs/proc/array.c147
-rw-r--r--fs/proc/base.c98
-rw-r--r--fs/proc/internal.h3
-rw-r--r--fs/proc/task_mmu.c82
-rw-r--r--fs/proc/task_nommu.c2
-rw-r--r--fs/proc_namespace.c4
-rw-r--r--fs/pstore/inode.c2
-rw-r--r--fs/pstore/platform.c34
-rw-r--r--fs/pstore/ram.c3
-rw-r--r--fs/pstore/ram_core.c27
-rw-r--r--fs/read_write.c7
-rw-r--r--fs/readdir.c33
-rw-r--r--fs/reiserfs/inode.c30
-rw-r--r--fs/reiserfs/journal.c15
-rw-r--r--fs/reiserfs/reiserfs.h12
-rw-r--r--fs/reiserfs/resize.c1
-rw-r--r--fs/reiserfs/super.c74
-rw-r--r--fs/select.c4
-rw-r--r--fs/signalfd.c7
-rw-r--r--fs/splice.c6
-rw-r--r--fs/statfs.c5
-rw-r--r--fs/sync.c5
-rw-r--r--fs/ubifs/debug.c12
-rw-r--r--fs/ubifs/dir.c11
-rw-r--r--fs/udf/namei.c14
-rw-r--r--fs/utimes.c5
-rw-r--r--fs/xattr.c20
-rw-r--r--fs/xfs/kmem.c10
-rw-r--r--fs/xfs/kmem.h21
-rw-r--r--fs/xfs/xfs_alloc.c3
-rw-r--r--fs/xfs/xfs_aops.c11
-rw-r--r--fs/xfs/xfs_buf.c16
-rw-r--r--fs/xfs/xfs_export.c23
-rw-r--r--fs/xfs/xfs_file.c7
-rw-r--r--fs/xfs/xfs_inode_item.c17
-rw-r--r--fs/xfs/xfs_log.c79
-rw-r--r--fs/xfs/xfs_log_cil.c22
-rw-r--r--fs/xfs/xfs_log_priv.h46
-rw-r--r--fs/xfs/xfs_log_recover.c38
-rw-r--r--fs/xfs/xfs_mount.h4
-rw-r--r--fs/xfs/xfs_sync.c32
-rw-r--r--fs/xfs/xfs_trace.h18
-rw-r--r--fs/xfs/xfs_trans.c2
-rw-r--r--fs/xfs/xfs_trans.h2
-rw-r--r--include/acpi/acpi_bus.h4
-rw-r--r--include/asm-generic/Kbuild1
-rw-r--r--include/asm-generic/bitsperlong.h4
-rw-r--r--include/asm-generic/bug.h7
-rw-r--r--include/asm-generic/pgtable.h10
-rw-r--r--include/asm-generic/posix_types.h4
-rw-r--r--include/drm/drm_crtc.h2
-rw-r--r--include/drm/drm_mem_util.h4
-rw-r--r--include/drm/drm_pciids.h21
-rw-r--r--include/drm/exynos_drm.h4
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/clockchips.h1
-rw-r--r--include/linux/compaction.h19
-rw-r--r--include/linux/compat.h3
-rw-r--r--include/linux/compiler-gcc.h6
-rw-r--r--include/linux/cpu.h1
-rw-r--r--include/linux/cred.h10
-rw-r--r--include/linux/dmaengine.h12
-rw-r--r--include/linux/errno.h1
-rw-r--r--include/linux/eventfd.h2
-rw-r--r--include/linux/exportfs.h4
-rw-r--r--include/linux/fb.h1
-rw-r--r--include/linux/frontswap.h127
-rw-r--r--include/linux/fs.h37
-rw-r--r--include/linux/fsnotify_backend.h2
-rw-r--r--include/linux/fuse.h14
-rw-r--r--include/linux/genetlink.h3
-rw-r--r--include/linux/i2c-mux-pinctrl.h41
-rw-r--r--include/linux/init_task.h2
-rw-r--r--include/linux/interrupt.h4
-rw-r--r--include/linux/ipc_namespace.h42
-rw-r--r--include/linux/jbd2.h59
-rw-r--r--include/linux/jbd_common.h2
-rw-r--r--include/linux/kcmp.h17
-rw-r--r--include/linux/kernel.h1
-rw-r--r--include/linux/kexec.h75
-rw-r--r--include/linux/key.h4
-rw-r--r--include/linux/kmod.h34
-rw-r--r--include/linux/kmsg_dump.h45
-rw-r--r--include/linux/lglock.h179
-rw-r--r--include/linux/lockd/bind.h4
-rw-r--r--include/linux/mlx4/device.h6
-rw-r--r--include/linux/mm.h2
-rw-r--r--include/linux/mm_types.h10
-rw-r--r--include/linux/mmc/sdhci-spear.h2
-rw-r--r--include/linux/mmc/sdio.h2
-rw-r--r--include/linux/moduleparam.h10
-rw-r--r--include/linux/msdos_fs.h3
-rw-r--r--include/linux/mtd/gpmi-nand.h8
-rw-r--r--include/linux/mtd/mtd.h11
-rw-r--r--include/linux/mtd/nand.h25
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdevice.h8
-rw-r--r--include/linux/netfilter/xt_HMARK.h5
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_xdr.h3
-rw-r--r--include/linux/nfsd/export.h13
-rw-r--r--include/linux/pata_arasan_cf_data.h2
-rw-r--r--include/linux/pci.h2
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/power/charger-manager.h50
-rw-r--r--include/linux/power/max17042_battery.h17
-rw-r--r--include/linux/power_supply.h4
-rw-r--r--include/linux/prctl.h16
-rw-r--r--include/linux/pstore_ram.h2
-rw-r--r--include/linux/pxa2xx_ssp.h2
-rw-r--r--include/linux/radix-tree.h5
-rw-r--r--include/linux/rcutiny.h6
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/linux/rio.h47
-rw-r--r--include/linux/rio_drv.h9
-rw-r--r--include/linux/sched.h43
-rw-r--r--include/linux/security.h40
-rw-r--r--include/linux/signal.h5
-rw-r--r--include/linux/skbuff.h2
-rw-r--r--include/linux/slab.h2
-rw-r--r--include/linux/spi/pxa2xx_spi.h2
-rw-r--r--include/linux/sunrpc/svc.h2
-rw-r--r--include/linux/sunrpc/svcauth.h13
-rw-r--r--include/linux/sunrpc/svcauth_gss.h1
-rw-r--r--include/linux/swap.h4
-rw-r--r--include/linux/swapfile.h13
-rw-r--r--include/linux/swapops.h8
-rw-r--r--include/linux/syscalls.h2
-rw-r--r--include/linux/task_work.h33
-rw-r--r--include/linux/tcp.h20
-rw-r--r--include/linux/thread_info.h19
-rw-r--r--include/linux/tracehook.h13
-rw-r--r--include/linux/tty.h23
-rw-r--r--include/linux/types.h2
-rw-r--r--include/linux/usb/hcd.h2
-rw-r--r--include/linux/vga_switcheroo.h13
-rw-r--r--include/net/cipso_ipv4.h29
-rw-r--r--include/net/dst.h1
-rw-r--r--include/net/inetpeer.h5
-rw-r--r--include/net/route.h2
-rw-r--r--include/net/sch_generic.h7
-rw-r--r--include/scsi/fcoe_sysfs.h124
-rw-r--r--include/scsi/libfcoe.h27
-rw-r--r--include/target/target_core_fabric.h1
-rw-r--r--include/trace/events/rcu.h1
-rw-r--r--include/video/auo_k190xfb.h106
-rw-r--r--include/video/exynos_dp.h2
-rw-r--r--include/video/exynos_mipi_dsim.h1
-rw-r--r--include/video/omapdss.h47
-rw-r--r--include/video/sh_mobile_hdmi.h12
-rw-r--r--init/Kconfig11
-rw-r--r--init/do_mounts.c14
-rw-r--r--init/do_mounts_initrd.c10
-rw-r--r--init/do_mounts_md.c12
-rw-r--r--init/do_mounts_rd.c13
-rw-r--r--init/initramfs.c16
-rw-r--r--init/main.c9
-rw-r--r--ipc/mq_sysctl.c49
-rw-r--r--ipc/mqueue.c292
-rw-r--r--ipc/shm.c19
-rw-r--r--kernel/Makefile7
-rw-r--r--kernel/cgroup.c30
-rw-r--r--kernel/cpu.c44
-rw-r--r--kernel/cpu_pm.c16
-rw-r--r--kernel/cred.c9
-rw-r--r--kernel/events/core.c11
-rw-r--r--kernel/exit.c32
-rw-r--r--kernel/fork.c11
-rw-r--r--kernel/irq/chip.c8
-rw-r--r--kernel/irq/internals.h3
-rw-r--r--kernel/irq/manage.c119
-rw-r--r--kernel/irq/migration.c13
-rw-r--r--kernel/kcmp.c196
-rw-r--r--kernel/kmod.c30
-rw-r--r--kernel/lglock.c89
-rw-r--r--kernel/panic.c6
-rw-r--r--kernel/pid_namespace.c33
-rw-r--r--kernel/printk.c241
-rw-r--r--kernel/rcutree.c2
-rw-r--r--kernel/rcutree.h14
-rw-r--r--kernel/rcutree_plugin.h165
-rw-r--r--kernel/resource.c4
-rw-r--r--kernel/sched/core.c249
-rw-r--r--kernel/sched/fair.c71
-rw-r--r--kernel/sched/rt.c53
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/signal.c59
-rw-r--r--kernel/smpboot.c17
-rw-r--r--kernel/sys.c221
-rw-r--r--kernel/sys_ni.c3
-rw-r--r--kernel/task_work.c84
-rw-r--r--kernel/time/clockevents.c3
-rw-r--r--kernel/time/tick-sched.c26
-rw-r--r--kernel/time/timekeeping.c2
-rw-r--r--kernel/trace/trace.c2
-rw-r--r--kernel/watchdog.c19
-rw-r--r--lib/Kconfig.debug20
-rw-r--r--lib/btree.c5
-rw-r--r--lib/dynamic_queue_limits.c18
-rw-r--r--lib/fault-inject.c4
-rw-r--r--lib/radix-tree.c3
-rw-r--r--lib/raid6/recov.c7
-rw-r--r--lib/raid6/recov_ssse3.c7
-rw-r--r--lib/spinlock_debug.c2
-rw-r--r--lib/vsprintf.c289
-rw-r--r--mm/Kconfig17
-rw-r--r--mm/Makefile1
-rw-r--r--mm/cleancache.c6
-rw-r--r--mm/compaction.c142
-rw-r--r--mm/filemap.c69
-rw-r--r--mm/filemap_xip.c4
-rw-r--r--mm/frontswap.c314
-rw-r--r--mm/internal.h13
-rw-r--r--mm/memblock.c68
-rw-r--r--mm/memcontrol.c6
-rw-r--r--mm/memory.c12
-rw-r--r--mm/mempolicy.c2
-rw-r--r--mm/migrate.c5
-rw-r--r--mm/mmap.c54
-rw-r--r--mm/mremap.c26
-rw-r--r--mm/nommu.c35
-rw-r--r--mm/oom_kill.c21
-rw-r--r--mm/page_alloc.c8
-rw-r--r--mm/page_cgroup.c4
-rw-r--r--mm/page_io.c12
-rw-r--r--mm/pagewalk.c1
-rw-r--r--mm/percpu-vm.c1
-rw-r--r--mm/process_vm_access.c16
-rw-r--r--mm/shmem.c63
-rw-r--r--mm/slub.c23
-rw-r--r--mm/swapfile.c66
-rw-r--r--mm/util.c30
-rw-r--r--net/9p/trans_virtio.c2
-rw-r--r--net/appletalk/ddp.c4
-rw-r--r--net/bluetooth/af_bluetooth.c2
-rw-r--r--net/bluetooth/rfcomm/tty.c4
-rw-r--r--net/ceph/ceph_common.c7
-rw-r--r--net/ceph/messenger.c4
-rw-r--r--net/ceph/mon_client.c8
-rw-r--r--net/ceph/osd_client.c12
-rw-r--r--net/core/drop_monitor.c103
-rw-r--r--net/core/filter.c4
-rw-r--r--net/core/neighbour.c14
-rw-r--r--net/core/netpoll.c11
-rw-r--r--net/core/skbuff.c2
-rw-r--r--net/core/sock.c7
-rw-r--r--net/ipv4/esp4.c24
-rw-r--r--net/ipv4/inet_connection_sock.c3
-rw-r--r--net/ipv4/inetpeer.c16
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/ipmr.c1
-rw-r--r--net/ipv4/tcp_ipv4.c9
-rw-r--r--net/ipv6/esp6.c18
-rw-r--r--net/ipv6/ip6_fib.c2
-rw-r--r--net/ipv6/ip6_output.c69
-rw-r--r--net/ipv6/ip6mr.c2
-rw-r--r--net/ipv6/tcp_ipv6.c9
-rw-r--r--net/l2tp/l2tp_eth.c2
-rw-r--r--net/l2tp/l2tp_ip.c33
-rw-r--r--net/l2tp/l2tp_ip6.c18
-rw-r--r--net/l2tp/l2tp_netlink.c3
-rw-r--r--net/mac80211/agg-rx.c7
-rw-r--r--net/mac80211/cfg.c6
-rw-r--r--net/mac80211/iface.c12
-rw-r--r--net/mac80211/mlme.c41
-rw-r--r--net/mac80211/offchannel.c16
-rw-r--r--net/mac80211/sta_info.c4
-rw-r--r--net/mac80211/tx.c11
-rw-r--r--net/mac80211/util.c14
-rw-r--r--net/netfilter/nf_conntrack_h323_main.c5
-rw-r--r--net/netfilter/xt_HMARK.c72
-rw-r--r--net/netlink/genetlink.c2
-rw-r--r--net/nfc/llcp/sock.c3
-rw-r--r--net/rds/ib.h3
-rw-r--r--net/sched/sch_atm.c2
-rw-r--r--net/sunrpc/auth_gss/gss_krb5_wrap.c61
-rw-r--r--net/sunrpc/auth_gss/svcauth_gss.c127
-rw-r--r--net/sunrpc/rpc_pipe.c12
-rw-r--r--net/sunrpc/rpcb_clnt.c12
-rw-r--r--net/sunrpc/svc.c26
-rw-r--r--net/sunrpc/svc_xprt.c4
-rw-r--r--net/sunrpc/svcauth_unix.c19
-rw-r--r--net/wanrouter/Kconfig2
-rw-r--r--net/wireless/ibss.c6
-rw-r--r--net/wireless/util.c19
-rw-r--r--net/xfrm/xfrm_policy.c3
-rwxr-xr-xscripts/checkpatch.pl20
-rwxr-xr-xscripts/get_maintainer.pl3
-rw-r--r--security/apparmor/lsm.c15
-rw-r--r--security/capability.c3
-rw-r--r--security/commoncap.c17
-rw-r--r--security/keys/compat.c2
-rw-r--r--security/keys/internal.h2
-rw-r--r--security/keys/keyctl.c77
-rw-r--r--security/keys/process_keys.c20
-rw-r--r--security/keys/request_key.c13
-rw-r--r--security/security.c51
-rw-r--r--security/selinux/hooks.c15
-rw-r--r--security/selinux/selinuxfs.c36
-rw-r--r--security/smack/smack_lsm.c15
-rw-r--r--sound/core/compress_offload.c8
-rw-r--r--sound/pci/hda/hda_codec.c46
-rw-r--r--sound/pci/hda/hda_codec.h2
-rw-r--r--sound/pci/hda/hda_intel.c19
-rw-r--r--sound/pci/hda/patch_conexant.c2
-rw-r--r--sound/pci/hda/patch_realtek.c10
-rw-r--r--sound/pci/rme9652/hdspm.c7
-rw-r--r--sound/soc/codecs/wm2000.c59
-rw-r--r--sound/soc/codecs/wm8904.c26
-rw-r--r--sound/soc/codecs/wm8994.c105
-rw-r--r--sound/soc/codecs/wm8996.c8
-rw-r--r--sound/soc/fsl/imx-audmux.c8
-rw-r--r--sound/soc/fsl/imx-ssi.c6
-rw-r--r--sound/soc/pxa/pxa-ssp.c38
-rw-r--r--sound/soc/sh/fsi.c25
-rw-r--r--sound/soc/soc-dapm.c4
-rw-r--r--sound/soc/soc-pcm.c6
-rw-r--r--sound/soc/tegra/tegra30_ahub.c1
-rw-r--r--sound/soc/tegra/tegra_wm8903.c13
-rw-r--r--sound/usb/6fire/firmware.c2
-rw-r--r--sound/usb/card.h1
-rw-r--r--sound/usb/pcm.c24
-rw-r--r--sound/usb/stream.c7
-rw-r--r--tools/hv/hv_kvp_daemon.c10
-rw-r--r--tools/perf/MANIFEST2
-rw-r--r--tools/perf/builtin-report.c4
-rw-r--r--tools/perf/builtin-stat.c16
-rw-r--r--tools/perf/builtin-top.c2
-rw-r--r--tools/perf/design.txt7
-rw-r--r--tools/perf/ui/browsers/annotate.c2
-rwxr-xr-xtools/perf/util/PERF-VERSION-GEN2
-rw-r--r--tools/perf/util/callchain.c2
-rw-r--r--tools/perf/util/callchain.h2
-rw-r--r--tools/perf/util/evlist.c17
-rw-r--r--tools/perf/util/evlist.h4
-rw-r--r--tools/perf/util/evsel.c29
-rw-r--r--tools/perf/util/header.c48
-rw-r--r--tools/perf/util/hist.c7
-rw-r--r--tools/perf/util/hist.h2
-rw-r--r--tools/perf/util/include/linux/bitops.h2
-rw-r--r--tools/perf/util/pager.c4
-rw-r--r--tools/perf/util/probe-event.c8
-rw-r--r--tools/perf/util/session.c107
-rw-r--r--tools/perf/util/session.h1
-rw-r--r--tools/perf/util/symbol.c38
-rw-r--r--tools/perf/util/symbol.h30
-rw-r--r--tools/power/x86/turbostat/turbostat.c30
-rw-r--r--tools/testing/selftests/Makefile2
-rw-r--r--tools/testing/selftests/kcmp/Makefile29
-rw-r--r--tools/testing/selftests/kcmp/kcmp_test.c94
-rw-r--r--tools/testing/selftests/mqueue/.gitignore2
-rw-r--r--tools/testing/selftests/mqueue/Makefile10
-rw-r--r--tools/testing/selftests/mqueue/mq_open_tests.c492
-rw-r--r--tools/testing/selftests/mqueue/mq_perf_tests.c741
-rw-r--r--usr/Kconfig10
-rw-r--r--virt/kvm/assigned-dev.c4
-rw-r--r--virt/kvm/irq_comm.c1
1660 files changed, 53912 insertions, 17761 deletions
diff --git a/.mailmap b/.mailmap
index 9b0d0267a3c3..658003aa9446 100644
--- a/.mailmap
+++ b/.mailmap
@@ -111,5 +111,8 @@ Uwe Kleine-König <ukleinek@informatik.uni-freiburg.de>
111Uwe Kleine-König <ukl@pengutronix.de> 111Uwe Kleine-König <ukl@pengutronix.de>
112Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com> 112Uwe Kleine-König <Uwe.Kleine-Koenig@digi.com>
113Valdis Kletnieks <Valdis.Kletnieks@vt.edu> 113Valdis Kletnieks <Valdis.Kletnieks@vt.edu>
114Viresh Kumar <viresh.linux@gmail.com> <viresh.kumar@st.com>
114Takashi YOSHII <takashi.yoshii.zj@renesas.com> 115Takashi YOSHII <takashi.yoshii.zj@renesas.com>
115Yusuke Goda <goda.yusuke@renesas.com> 116Yusuke Goda <goda.yusuke@renesas.com>
117Gustavo Padovan <gustavo@las.ic.unicamp.br>
118Gustavo Padovan <padovan@profusion.mobi>
diff --git a/Documentation/ABI/testing/sysfs-block-rssd b/Documentation/ABI/testing/sysfs-block-rssd
index d535757799fe..679ce3543122 100644
--- a/Documentation/ABI/testing/sysfs-block-rssd
+++ b/Documentation/ABI/testing/sysfs-block-rssd
@@ -6,13 +6,21 @@ Description: This is a read-only file. Dumps below driver information and
6 hardware registers. 6 hardware registers.
7 - S ACTive 7 - S ACTive
8 - Command Issue 8 - Command Issue
9 - Allocated
10 - Completed 9 - Completed
11 - PORT IRQ STAT 10 - PORT IRQ STAT
12 - HOST IRQ STAT 11 - HOST IRQ STAT
12 - Allocated
13 - Commands in Q
13 14
14What: /sys/block/rssd*/status 15What: /sys/block/rssd*/status
15Date: April 2012 16Date: April 2012
16KernelVersion: 3.4 17KernelVersion: 3.4
17Contact: Asai Thambi S P <asamymuthupa@micron.com> 18Contact: Asai Thambi S P <asamymuthupa@micron.com>
18Description: This is a read-only file. Indicates the status of the device. 19Description: This is a read-only file. Indicates the status of the device.
20
21What: /sys/block/rssd*/flags
22Date: May 2012
23KernelVersion: 3.5
24Contact: Asai Thambi S P <asamymuthupa@micron.com>
25Description: This is a read-only file. Dumps the flags in port and driver
26 data structure
diff --git a/Documentation/ABI/testing/sysfs-bus-fcoe b/Documentation/ABI/testing/sysfs-bus-fcoe
new file mode 100644
index 000000000000..469d09c02f6b
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-fcoe
@@ -0,0 +1,77 @@
1What: /sys/bus/fcoe/ctlr_X
2Date: March 2012
3KernelVersion: TBD
4Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
5Description: 'FCoE Controller' instances on the fcoe bus
6Attributes:
7
8 fcf_dev_loss_tmo: Device loss timeout peroid (see below). Changing
9 this value will change the dev_loss_tmo for all
10 FCFs discovered by this controller.
11
12 lesb_link_fail: Link Error Status Block (LESB) link failure count.
13
14 lesb_vlink_fail: Link Error Status Block (LESB) virtual link
15 failure count.
16
17 lesb_miss_fka: Link Error Status Block (LESB) missed FCoE
18 Initialization Protocol (FIP) Keep-Alives (FKA).
19
20 lesb_symb_err: Link Error Status Block (LESB) symbolic error count.
21
22 lesb_err_block: Link Error Status Block (LESB) block error count.
23
24 lesb_fcs_error: Link Error Status Block (LESB) Fibre Channel
25 Serivces error count.
26
27Notes: ctlr_X (global increment starting at 0)
28
29What: /sys/bus/fcoe/fcf_X
30Date: March 2012
31KernelVersion: TBD
32Contact: Robert Love <robert.w.love@intel.com>, devel@open-fcoe.org
33Description: 'FCoE FCF' instances on the fcoe bus. A FCF is a Fibre Channel
34 Forwarder, which is a FCoE switch that can accept FCoE
35 (Ethernet) packets, unpack them, and forward the embedded
36 Fibre Channel frames into a FC fabric. It can also take
37 outbound FC frames and pack them in Ethernet packets to
38 be sent to their destination on the Ethernet segment.
39Attributes:
40
41 fabric_name: Identifies the fabric that the FCF services.
42
43 switch_name: Identifies the FCF.
44
45 priority: The switch's priority amongst other FCFs on the same
46 fabric.
47
48 selected: 1 indicates that the switch has been selected for use;
49 0 indicates that the swich will not be used.
50
51 fc_map: The Fibre Channel MAP
52
53 vfid: The Virtual Fabric ID
54
55 mac: The FCF's MAC address
56
57 fka_peroid: The FIP Keep-Alive peroid
58
59 fabric_state: The internal kernel state
60 "Unknown" - Initialization value
61 "Disconnected" - No link to the FCF/fabric
62 "Connected" - Host is connected to the FCF
63 "Deleted" - FCF is being removed from the system
64
65 dev_loss_tmo: The device loss timeout peroid for this FCF.
66
67Notes: A device loss infrastructre similar to the FC Transport's
68 is present in fcoe_sysfs. It is nice to have so that a
69 link flapping adapter doesn't continually advance the count
70 used to identify the discovered FCF. FCFs will exist in a
71 "Disconnected" state until either the timer expires and the
72 FCF becomes "Deleted" or the FCF is rediscovered and becomes
73 "Connected."
74
75
76Users: The first user of this interface will be the fcoeadm application,
77 which is commonly packaged in the fcoe-utils package.
diff --git a/Documentation/ABI/testing/sysfs-bus-iio b/Documentation/ABI/testing/sysfs-bus-iio
index 5bc8a476c15e..cfedf63cce15 100644
--- a/Documentation/ABI/testing/sysfs-bus-iio
+++ b/Documentation/ABI/testing/sysfs-bus-iio
@@ -219,6 +219,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_scale
219What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_scale 219What: /sys/bus/iio/devices/iio:deviceX/in_voltageY_supply_scale
220What: /sys/bus/iio/devices/iio:deviceX/in_voltage_scale 220What: /sys/bus/iio/devices/iio:deviceX/in_voltage_scale
221What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_scale 221What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_scale
222What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_scale
222What: /sys/bus/iio/devices/iio:deviceX/in_accel_scale 223What: /sys/bus/iio/devices/iio:deviceX/in_accel_scale
223What: /sys/bus/iio/devices/iio:deviceX/in_accel_peak_scale 224What: /sys/bus/iio/devices/iio:deviceX/in_accel_peak_scale
224What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_scale 225What: /sys/bus/iio/devices/iio:deviceX/in_anglvel_scale
@@ -273,6 +274,7 @@ What: /sys/bus/iio/devices/iio:deviceX/in_accel_scale_available
273What: /sys/.../iio:deviceX/in_voltageX_scale_available 274What: /sys/.../iio:deviceX/in_voltageX_scale_available
274What: /sys/.../iio:deviceX/in_voltage-voltage_scale_available 275What: /sys/.../iio:deviceX/in_voltage-voltage_scale_available
275What: /sys/.../iio:deviceX/out_voltageX_scale_available 276What: /sys/.../iio:deviceX/out_voltageX_scale_available
277What: /sys/.../iio:deviceX/out_altvoltageX_scale_available
276What: /sys/.../iio:deviceX/in_capacitance_scale_available 278What: /sys/.../iio:deviceX/in_capacitance_scale_available
277KernelVersion: 2.635 279KernelVersion: 2.635
278Contact: linux-iio@vger.kernel.org 280Contact: linux-iio@vger.kernel.org
@@ -298,14 +300,19 @@ Description:
298 gives the 3dB frequency of the filter in Hz. 300 gives the 3dB frequency of the filter in Hz.
299 301
300What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw 302What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_raw
303What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_raw
301KernelVersion: 2.6.37 304KernelVersion: 2.6.37
302Contact: linux-iio@vger.kernel.org 305Contact: linux-iio@vger.kernel.org
303Description: 306Description:
304 Raw (unscaled, no bias etc.) output voltage for 307 Raw (unscaled, no bias etc.) output voltage for
305 channel Y. The number must always be specified and 308 channel Y. The number must always be specified and
306 unique if the output corresponds to a single channel. 309 unique if the output corresponds to a single channel.
310 While DAC like devices typically use out_voltage,
311 a continuous frequency generating device, such as
312 a DDS or PLL should use out_altvoltage.
307 313
308What: /sys/bus/iio/devices/iio:deviceX/out_voltageY&Z_raw 314What: /sys/bus/iio/devices/iio:deviceX/out_voltageY&Z_raw
315What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY&Z_raw
309KernelVersion: 2.6.37 316KernelVersion: 2.6.37
310Contact: linux-iio@vger.kernel.org 317Contact: linux-iio@vger.kernel.org
311Description: 318Description:
@@ -316,6 +323,8 @@ Description:
316 323
317What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown_mode 324What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown_mode
318What: /sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown_mode 325What: /sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown_mode
326What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_powerdown_mode
327What: /sys/bus/iio/devices/iio:deviceX/out_altvoltage_powerdown_mode
319KernelVersion: 2.6.38 328KernelVersion: 2.6.38
320Contact: linux-iio@vger.kernel.org 329Contact: linux-iio@vger.kernel.org
321Description: 330Description:
@@ -330,6 +339,8 @@ Description:
330 339
331What: /sys/.../iio:deviceX/out_votlageY_powerdown_mode_available 340What: /sys/.../iio:deviceX/out_votlageY_powerdown_mode_available
332What: /sys/.../iio:deviceX/out_voltage_powerdown_mode_available 341What: /sys/.../iio:deviceX/out_voltage_powerdown_mode_available
342What: /sys/.../iio:deviceX/out_altvotlageY_powerdown_mode_available
343What: /sys/.../iio:deviceX/out_altvoltage_powerdown_mode_available
333KernelVersion: 2.6.38 344KernelVersion: 2.6.38
334Contact: linux-iio@vger.kernel.org 345Contact: linux-iio@vger.kernel.org
335Description: 346Description:
@@ -338,6 +349,8 @@ Description:
338 349
339What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown 350What: /sys/bus/iio/devices/iio:deviceX/out_voltageY_powerdown
340What: /sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown 351What: /sys/bus/iio/devices/iio:deviceX/out_voltage_powerdown
352What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_powerdown
353What: /sys/bus/iio/devices/iio:deviceX/out_altvoltage_powerdown
341KernelVersion: 2.6.38 354KernelVersion: 2.6.38
342Contact: linux-iio@vger.kernel.org 355Contact: linux-iio@vger.kernel.org
343Description: 356Description:
@@ -346,6 +359,24 @@ Description:
346 normal operation. Y may be suppressed if all outputs are 359 normal operation. Y may be suppressed if all outputs are
347 controlled together. 360 controlled together.
348 361
362What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_frequency
363KernelVersion: 3.4.0
364Contact: linux-iio@vger.kernel.org
365Description:
366 Output frequency for channel Y in Hz. The number must always be
367 specified and unique if the output corresponds to a single
368 channel.
369
370What: /sys/bus/iio/devices/iio:deviceX/out_altvoltageY_phase
371KernelVersion: 3.4.0
372Contact: linux-iio@vger.kernel.org
373Description:
374 Phase in radians of one frequency/clock output Y
375 (out_altvoltageY) relative to another frequency/clock output
376 (out_altvoltageZ) of the device X. The number must always be
377 specified and unique if the output corresponds to a single
378 channel.
379
349What: /sys/bus/iio/devices/iio:deviceX/events 380What: /sys/bus/iio/devices/iio:deviceX/events
350KernelVersion: 2.6.35 381KernelVersion: 2.6.35
351Contact: linux-iio@vger.kernel.org 382Contact: linux-iio@vger.kernel.org
diff --git a/Documentation/ABI/testing/sysfs-class-mtd b/Documentation/ABI/testing/sysfs-class-mtd
index 4d55a1888981..db1ad7e34fc3 100644
--- a/Documentation/ABI/testing/sysfs-class-mtd
+++ b/Documentation/ABI/testing/sysfs-class-mtd
@@ -123,3 +123,54 @@ Description:
123 half page, or a quarter page). 123 half page, or a quarter page).
124 124
125 In the case of ECC NOR, it is the ECC block size. 125 In the case of ECC NOR, it is the ECC block size.
126
127What: /sys/class/mtd/mtdX/ecc_strength
128Date: April 2012
129KernelVersion: 3.4
130Contact: linux-mtd@lists.infradead.org
131Description:
132 Maximum number of bit errors that the device is capable of
133 correcting within each region covering an ecc step. This will
134 always be a non-negative integer. Note that some devices will
135 have multiple ecc steps within each writesize region.
136
137 In the case of devices lacking any ECC capability, it is 0.
138
139What: /sys/class/mtd/mtdX/bitflip_threshold
140Date: April 2012
141KernelVersion: 3.4
142Contact: linux-mtd@lists.infradead.org
143Description:
144 This allows the user to examine and adjust the criteria by which
145 mtd returns -EUCLEAN from mtd_read(). If the maximum number of
146 bit errors that were corrected on any single region comprising
147 an ecc step (as reported by the driver) equals or exceeds this
148 value, -EUCLEAN is returned. Otherwise, absent an error, 0 is
149 returned. Higher layers (e.g., UBI) use this return code as an
150 indication that an erase block may be degrading and should be
151 scrutinized as a candidate for being marked as bad.
152
153 The initial value may be specified by the flash device driver.
154 If not, then the default value is ecc_strength.
155
156 The introduction of this feature brings a subtle change to the
157 meaning of the -EUCLEAN return code. Previously, it was
158 interpreted to mean simply "one or more bit errors were
159 corrected". Its new interpretation can be phrased as "a
160 dangerously high number of bit errors were corrected on one or
161 more regions comprising an ecc step". The precise definition of
162 "dangerously high" can be adjusted by the user with
163 bitflip_threshold. Users are discouraged from doing this,
164 however, unless they know what they are doing and have intimate
165 knowledge of the properties of their device. Broadly speaking,
166 bitflip_threshold should be low enough to detect genuine erase
167 block degradation, but high enough to avoid the consequences of
168 a persistent return value of -EUCLEAN on devices where sticky
169 bitflips occur. Note that if bitflip_threshold exceeds
170 ecc_strength, -EUCLEAN is never returned by mtd_read().
171 Conversely, if bitflip_threshold is zero, -EUCLEAN is always
172 returned, absent a hard error.
173
174 This is generally applicable only to NAND flash devices with ECC
175 capability. It is ignored on devices lacking ECC capability;
176 i.e., devices for which ecc_strength is zero.
diff --git a/Documentation/CodingStyle b/Documentation/CodingStyle
index c58b236bbe04..cb9258b8fd35 100644
--- a/Documentation/CodingStyle
+++ b/Documentation/CodingStyle
@@ -671,8 +671,9 @@ ones already enabled by DEBUG.
671 Chapter 14: Allocating memory 671 Chapter 14: Allocating memory
672 672
673The kernel provides the following general purpose memory allocators: 673The kernel provides the following general purpose memory allocators:
674kmalloc(), kzalloc(), kcalloc(), vmalloc(), and vzalloc(). Please refer to 674kmalloc(), kzalloc(), kmalloc_array(), kcalloc(), vmalloc(), and
675the API documentation for further information about them. 675vzalloc(). Please refer to the API documentation for further information
676about them.
676 677
677The preferred form for passing a size of a struct is the following: 678The preferred form for passing a size of a struct is the following:
678 679
@@ -686,6 +687,17 @@ Casting the return value which is a void pointer is redundant. The conversion
686from void pointer to any other pointer type is guaranteed by the C programming 687from void pointer to any other pointer type is guaranteed by the C programming
687language. 688language.
688 689
690The preferred form for allocating an array is the following:
691
692 p = kmalloc_array(n, sizeof(...), ...);
693
694The preferred form for allocating a zeroed array is the following:
695
696 p = kcalloc(n, sizeof(...), ...);
697
698Both forms check for overflow on the allocation size n * sizeof(...),
699and return NULL if that occurred.
700
689 701
690 Chapter 15: The inline disease 702 Chapter 15: The inline disease
691 703
diff --git a/Documentation/DocBook/mtdnand.tmpl b/Documentation/DocBook/mtdnand.tmpl
index 0c674be0d3c6..e0aedb7a7827 100644
--- a/Documentation/DocBook/mtdnand.tmpl
+++ b/Documentation/DocBook/mtdnand.tmpl
@@ -1119,8 +1119,6 @@ in this page</entry>
1119 These constants are defined in nand.h. They are ored together to describe 1119 These constants are defined in nand.h. They are ored together to describe
1120 the chip functionality. 1120 the chip functionality.
1121 <programlisting> 1121 <programlisting>
1122/* Chip can not auto increment pages */
1123#define NAND_NO_AUTOINCR 0x00000001
1124/* Buswitdh is 16 bit */ 1122/* Buswitdh is 16 bit */
1125#define NAND_BUSWIDTH_16 0x00000002 1123#define NAND_BUSWIDTH_16 0x00000002
1126/* Device supports partial programming without padding */ 1124/* Device supports partial programming without padding */
diff --git a/Documentation/arm/OMAP/DSS b/Documentation/arm/OMAP/DSS
index 888ae7b83ae4..a564ceea9e98 100644
--- a/Documentation/arm/OMAP/DSS
+++ b/Documentation/arm/OMAP/DSS
@@ -47,6 +47,51 @@ flexible way to enable non-common multi-display configuration. In addition to
47modelling the hardware overlays, omapdss supports virtual overlays and overlay 47modelling the hardware overlays, omapdss supports virtual overlays and overlay
48managers. These can be used when updating a display with CPU or system DMA. 48managers. These can be used when updating a display with CPU or system DMA.
49 49
50omapdss driver support for audio
51--------------------------------
52There exist several display technologies and standards that support audio as
53well. Hence, it is relevant to update the DSS device driver to provide an audio
54interface that may be used by an audio driver or any other driver interested in
55the functionality.
56
57The audio_enable function is intended to prepare the relevant
58IP for playback (e.g., enabling an audio FIFO, taking in/out of reset
59some IP, enabling companion chips, etc). It is intended to be called before
60audio_start. The audio_disable function performs the reverse operation and is
61intended to be called after audio_stop.
62
63While a given DSS device driver may support audio, it is possible that for
64certain configurations audio is not supported (e.g., an HDMI display using a
65VESA video timing). The audio_supported function is intended to query whether
66the current configuration of the display supports audio.
67
68The audio_config function is intended to configure all the relevant audio
69parameters of the display. In order to make the function independent of any
70specific DSS device driver, a struct omap_dss_audio is defined. Its purpose
71is to contain all the required parameters for audio configuration. At the
72moment, such structure contains pointers to IEC-60958 channel status word
73and CEA-861 audio infoframe structures. This should be enough to support
74HDMI and DisplayPort, as both are based on CEA-861 and IEC-60958.
75
76The audio_enable/disable, audio_config and audio_supported functions could be
77implemented as functions that may sleep. Hence, they should not be called
78while holding a spinlock or a readlock.
79
80The audio_start/audio_stop function is intended to effectively start/stop audio
81playback after the configuration has taken place. These functions are designed
82to be used in an atomic context. Hence, audio_start should return quickly and be
83called only after all the needed resources for audio playback (audio FIFOs,
84DMA channels, companion chips, etc) have been enabled to begin data transfers.
85audio_stop is designed to only stop the audio transfers. The resources used
86for playback are released using audio_disable.
87
88The enum omap_dss_audio_state may be used to help the implementations of
89the interface to keep track of the audio state. The initial state is _DISABLED;
90then, the state transitions to _CONFIGURED, and then, when it is ready to
91play audio, to _ENABLED. The state _PLAYING is used when the audio is being
92rendered.
93
94
50Panel and controller drivers 95Panel and controller drivers
51---------------------------- 96----------------------------
52 97
@@ -156,6 +201,7 @@ timings Display timings (pixclock,xres/hfp/hbp/hsw,yres/vfp/vbp/vsw)
156 "pal" and "ntsc" 201 "pal" and "ntsc"
157panel_name 202panel_name
158tear_elim Tearing elimination 0=off, 1=on 203tear_elim Tearing elimination 0=off, 1=on
204output_type Output type (video encoder only): "composite" or "svideo"
159 205
160There are also some debugfs files at <debugfs>/omapdss/ which show information 206There are also some debugfs files at <debugfs>/omapdss/ which show information
161about clocks and registers. 207about clocks and registers.
diff --git a/Documentation/arm/SPEAr/overview.txt b/Documentation/arm/SPEAr/overview.txt
index 57aae7765c74..65610bf52ebf 100644
--- a/Documentation/arm/SPEAr/overview.txt
+++ b/Documentation/arm/SPEAr/overview.txt
@@ -60,4 +60,4 @@ Introduction
60 Document Author 60 Document Author
61 --------------- 61 ---------------
62 62
63 Viresh Kumar <viresh.kumar@st.com>, (c) 2010-2012 ST Microelectronics 63 Viresh Kumar <viresh.linux@gmail.com>, (c) 2010-2012 ST Microelectronics
diff --git a/Documentation/device-mapper/thin-provisioning.txt b/Documentation/device-mapper/thin-provisioning.txt
index 3370bc4d7b98..f5cfc62b7ad3 100644
--- a/Documentation/device-mapper/thin-provisioning.txt
+++ b/Documentation/device-mapper/thin-provisioning.txt
@@ -287,6 +287,17 @@ iii) Messages
287 the current transaction id is when you change it with this 287 the current transaction id is when you change it with this
288 compare-and-swap message. 288 compare-and-swap message.
289 289
290 reserve_metadata_snap
291
292 Reserve a copy of the data mapping btree for use by userland.
293 This allows userland to inspect the mappings as they were when
294 this message was executed. Use the pool's status command to
295 get the root block associated with the metadata snapshot.
296
297 release_metadata_snap
298
299 Release a previously reserved copy of the data mapping btree.
300
290'thin' target 301'thin' target
291------------- 302-------------
292 303
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
new file mode 100644
index 000000000000..ae8af1694e95
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pinctrl.txt
@@ -0,0 +1,93 @@
1Pinctrl-based I2C Bus Mux
2
3This binding describes an I2C bus multiplexer that uses pin multiplexing to
4route the I2C signals, and represents the pin multiplexing configuration
5using the pinctrl device tree bindings.
6
7 +-----+ +-----+
8 | dev | | dev |
9 +------------------------+ +-----+ +-----+
10 | SoC | | |
11 | /----|------+--------+
12 | +---+ +------+ | child bus A, on first set of pins
13 | |I2C|---|Pinmux| |
14 | +---+ +------+ | child bus B, on second set of pins
15 | \----|------+--------+--------+
16 | | | | |
17 +------------------------+ +-----+ +-----+ +-----+
18 | dev | | dev | | dev |
19 +-----+ +-----+ +-----+
20
21Required properties:
22- compatible: i2c-mux-pinctrl
23- i2c-parent: The phandle of the I2C bus that this multiplexer's master-side
24 port is connected to.
25
26Also required are:
27
28* Standard pinctrl properties that specify the pin mux state for each child
29 bus. See ../pinctrl/pinctrl-bindings.txt.
30
31* Standard I2C mux properties. See mux.txt in this directory.
32
33* I2C child bus nodes. See mux.txt in this directory.
34
35For each named state defined in the pinctrl-names property, an I2C child bus
36will be created. I2C child bus numbers are assigned based on the index into
37the pinctrl-names property.
38
39The only exception is that no bus will be created for a state named "idle". If
40such a state is defined, it must be the last entry in pinctrl-names. For
41example:
42
43 pinctrl-names = "ddc", "pta", "idle" -> ddc = bus 0, pta = bus 1
44 pinctrl-names = "ddc", "idle", "pta" -> Invalid ("idle" not last)
45 pinctrl-names = "idle", "ddc", "pta" -> Invalid ("idle" not last)
46
47Whenever an access is made to a device on a child bus, the relevant pinctrl
48state will be programmed into hardware.
49
50If an idle state is defined, whenever an access is not being made to a device
51on a child bus, the idle pinctrl state will be programmed into hardware.
52
53If an idle state is not defined, the most recently used pinctrl state will be
54left programmed into hardware whenever no access is being made of a device on
55a child bus.
56
57Example:
58
59 i2cmux {
60 compatible = "i2c-mux-pinctrl";
61 #address-cells = <1>;
62 #size-cells = <0>;
63
64 i2c-parent = <&i2c1>;
65
66 pinctrl-names = "ddc", "pta", "idle";
67 pinctrl-0 = <&state_i2cmux_ddc>;
68 pinctrl-1 = <&state_i2cmux_pta>;
69 pinctrl-2 = <&state_i2cmux_idle>;
70
71 i2c@0 {
72 reg = <0>;
73 #address-cells = <1>;
74 #size-cells = <0>;
75
76 eeprom {
77 compatible = "eeprom";
78 reg = <0x50>;
79 };
80 };
81
82 i2c@1 {
83 reg = <1>;
84 #address-cells = <1>;
85 #size-cells = <0>;
86
87 eeprom {
88 compatible = "eeprom";
89 reg = <0x50>;
90 };
91 };
92 };
93
diff --git a/Documentation/devicetree/bindings/mtd/gpmi-nand.txt b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
new file mode 100644
index 000000000000..1a5bbd346d22
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/gpmi-nand.txt
@@ -0,0 +1,33 @@
1* Freescale General-Purpose Media Interface (GPMI)
2
3The GPMI nand controller provides an interface to control the
4NAND flash chips. We support only one NAND chip now.
5
6Required properties:
7 - compatible : should be "fsl,<chip>-gpmi-nand"
8 - reg : should contain registers location and length for gpmi and bch.
9 - reg-names: Should contain the reg names "gpmi-nand" and "bch"
10 - interrupts : The first is the DMA interrupt number for GPMI.
11 The second is the BCH interrupt number.
12 - interrupt-names : The interrupt names "gpmi-dma", "bch";
13 - fsl,gpmi-dma-channel : Should contain the dma channel it uses.
14
15The device tree may optionally contain sub-nodes describing partitions of the
16address space. See partition.txt for more detail.
17
18Examples:
19
20gpmi-nand@8000c000 {
21 compatible = "fsl,imx28-gpmi-nand";
22 #address-cells = <1>;
23 #size-cells = <1>;
24 reg = <0x8000c000 2000>, <0x8000a000 2000>;
25 reg-names = "gpmi-nand", "bch";
26 interrupts = <88>, <41>;
27 interrupt-names = "gpmi-dma", "bch";
28 fsl,gpmi-dma-channel = <4>;
29
30 partition@0 {
31 ...
32 };
33};
diff --git a/Documentation/devicetree/bindings/mtd/mxc-nand.txt b/Documentation/devicetree/bindings/mtd/mxc-nand.txt
new file mode 100644
index 000000000000..b5833d11c7be
--- /dev/null
+++ b/Documentation/devicetree/bindings/mtd/mxc-nand.txt
@@ -0,0 +1,19 @@
1* Freescale's mxc_nand
2
3Required properties:
4- compatible: "fsl,imxXX-nand"
5- reg: address range of the nfc block
6- interrupts: irq to be used
7- nand-bus-width: see nand.txt
8- nand-ecc-mode: see nand.txt
9- nand-on-flash-bbt: see nand.txt
10
11Example:
12
13 nand@d8000000 {
14 compatible = "fsl,imx27-nand";
15 reg = <0xd8000000 0x1000>;
16 interrupts = <29>;
17 nand-bus-width = <8>;
18 nand-ecc-mode = "hw";
19 };
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index ebaffe208ccb..56000b33340b 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -606,3 +606,9 @@ Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support
606Who: Ludovic Desroches <ludovic.desroches@atmel.com> 606Who: Ludovic Desroches <ludovic.desroches@atmel.com>
607 607
608---------------------------- 608----------------------------
609
610What: net/wanrouter/
611When: June 2013
612Why: Unsupported/unmaintained/unused since 2.6
613
614----------------------------
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index d449e632e6a0..8e2da1e06e3b 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -61,6 +61,7 @@ ata *);
61 ssize_t (*listxattr) (struct dentry *, char *, size_t); 61 ssize_t (*listxattr) (struct dentry *, char *, size_t);
62 int (*removexattr) (struct dentry *, const char *); 62 int (*removexattr) (struct dentry *, const char *);
63 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); 63 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
64 void (*update_time)(struct inode *, struct timespec *, int);
64 65
65locking rules: 66locking rules:
66 all may block 67 all may block
@@ -87,6 +88,8 @@ getxattr: no
87listxattr: no 88listxattr: no
88removexattr: yes 89removexattr: yes
89fiemap: no 90fiemap: no
91update_time: no
92
90 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on 93 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
91victim. 94victim.
92 cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem. 95 cross-directory ->rename() has (per-superblock) ->s_vfs_rename_sem.
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index 912af6ce5626..fb0a6aeb936c 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -40,6 +40,7 @@ Table of Contents
40 3.4 /proc/<pid>/coredump_filter - Core dump filtering settings 40 3.4 /proc/<pid>/coredump_filter - Core dump filtering settings
41 3.5 /proc/<pid>/mountinfo - Information about mounts 41 3.5 /proc/<pid>/mountinfo - Information about mounts
42 3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm 42 3.6 /proc/<pid>/comm & /proc/<pid>/task/<tid>/comm
43 3.7 /proc/<pid>/task/<tid>/children - Information about task children
43 44
44 4 Configuring procfs 45 4 Configuring procfs
45 4.1 Mount options 46 4.1 Mount options
@@ -310,6 +311,11 @@ Table 1-4: Contents of the stat files (as of 2.6.30-rc7)
310 start_data address above which program data+bss is placed 311 start_data address above which program data+bss is placed
311 end_data address below which program data+bss is placed 312 end_data address below which program data+bss is placed
312 start_brk address above which program heap can be expanded with brk() 313 start_brk address above which program heap can be expanded with brk()
314 arg_start address above which program command line is placed
315 arg_end address below which program command line is placed
316 env_start address above which program environment is placed
317 env_end address below which program environment is placed
318 exit_code the thread's exit_code in the form reported by the waitpid system call
313.............................................................................. 319..............................................................................
314 320
315The /proc/PID/maps file containing the currently mapped memory regions and 321The /proc/PID/maps file containing the currently mapped memory regions and
@@ -1578,6 +1584,23 @@ then the kernel's TASK_COMM_LEN (currently 16 chars) will result in a truncated
1578comm value. 1584comm value.
1579 1585
1580 1586
15873.7 /proc/<pid>/task/<tid>/children - Information about task children
1588-------------------------------------------------------------------------
1589This file provides a fast way to retrieve first level children pids
1590of a task pointed by <pid>/<tid> pair. The format is a space separated
1591stream of pids.
1592
1593Note the "first level" here -- if a child has own children they will
1594not be listed here, one needs to read /proc/<children-pid>/task/<tid>/children
1595to obtain the descendants.
1596
1597Since this interface is intended to be fast and cheap it doesn't
1598guarantee to provide precise results and some children might be
1599skipped, especially if they've exited right after we printed their
1600pids, so one need to either stop or freeze processes being inspected
1601if precise results are needed.
1602
1603
1581------------------------------------------------------------------------------ 1604------------------------------------------------------------------------------
1582Configuring procfs 1605Configuring procfs
1583------------------------------------------------------------------------------ 1606------------------------------------------------------------------------------
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index ef19f91a0f12..efd23f481704 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -363,6 +363,7 @@ struct inode_operations {
363 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); 363 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
364 ssize_t (*listxattr) (struct dentry *, char *, size_t); 364 ssize_t (*listxattr) (struct dentry *, char *, size_t);
365 int (*removexattr) (struct dentry *, const char *); 365 int (*removexattr) (struct dentry *, const char *);
366 void (*update_time)(struct inode *, struct timespec *, int);
366}; 367};
367 368
368Again, all methods are called without any locks being held, unless 369Again, all methods are called without any locks being held, unless
@@ -471,6 +472,9 @@ otherwise noted.
471 removexattr: called by the VFS to remove an extended attribute from 472 removexattr: called by the VFS to remove an extended attribute from
472 a file. This method is called by removexattr(2) system call. 473 a file. This method is called by removexattr(2) system call.
473 474
475 update_time: called by the VFS to update a specific time or the i_version of
476 an inode. If this is not defined the VFS will update the inode itself
477 and call mark_inode_dirty_sync.
474 478
475The Address Space Object 479The Address Space Object
476======================== 480========================
diff --git a/Documentation/hwmon/coretemp b/Documentation/hwmon/coretemp
index 84d46c0c71a3..c86b50c03ea8 100644
--- a/Documentation/hwmon/coretemp
+++ b/Documentation/hwmon/coretemp
@@ -6,7 +6,9 @@ Supported chips:
6 Prefix: 'coretemp' 6 Prefix: 'coretemp'
7 CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm), 7 CPUID: family 0x6, models 0xe (Pentium M DC), 0xf (Core 2 DC 65nm),
8 0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm), 8 0x16 (Core 2 SC 65nm), 0x17 (Penryn 45nm),
9 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield) 9 0x1a (Nehalem), 0x1c (Atom), 0x1e (Lynnfield),
10 0x26 (Tunnel Creek Atom), 0x27 (Medfield Atom),
11 0x36 (Cedar Trail Atom)
10 Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual 12 Datasheet: Intel 64 and IA-32 Architectures Software Developer's Manual
11 Volume 3A: System Programming Guide 13 Volume 3A: System Programming Guide
12 http://softwarecommunity.intel.com/Wiki/Mobility/720.htm 14 http://softwarecommunity.intel.com/Wiki/Mobility/720.htm
@@ -52,6 +54,17 @@ Some information comes from ark.intel.com
52 54
53Process Processor TjMax(C) 55Process Processor TjMax(C)
54 56
5722nm Core i5/i7 Processors
58 i7 3920XM, 3820QM, 3720QM, 3667U, 3520M 105
59 i5 3427U, 3360M/3320M 105
60 i7 3770/3770K 105
61 i5 3570/3570K, 3550, 3470/3450 105
62 i7 3770S 103
63 i5 3570S/3550S, 3475S/3470S/3450S 103
64 i7 3770T 94
65 i5 3570T 94
66 i5 3470T 91
67
5532nm Core i3/i5/i7 Processors 6832nm Core i3/i5/i7 Processors
56 i7 660UM/640/620, 640LM/620, 620M, 610E 105 69 i7 660UM/640/620, 640LM/620, 620M, 610E 105
57 i5 540UM/520/430, 540M/520/450/430 105 70 i5 540UM/520/430, 540M/520/450/430 105
@@ -65,6 +78,11 @@ Process Processor TjMax(C)
65 U3400 105 78 U3400 105
66 P4505/P4500 90 79 P4505/P4500 90
67 80
8132nm Atom Processors
82 Z2460 90
83 D2700/2550/2500 100
84 N2850/2800/2650/2600 100
85
6845nm Xeon Processors 5400 Quad-Core 8645nm Xeon Processors 5400 Quad-Core
69 X5492, X5482, X5472, X5470, X5460, X5450 85 87 X5492, X5482, X5472, X5470, X5460, X5450 85
70 E5472, E5462, E5450/40/30/20/10/05 85 88 E5472, E5462, E5450/40/30/20/10/05 85
@@ -85,6 +103,8 @@ Process Processor TjMax(C)
85 N475/470/455/450 100 103 N475/470/455/450 100
86 N280/270 90 104 N280/270 90
87 330/230 125 105 330/230 125
106 E680/660/640/620 90
107 E680T/660T/640T/620T 110
88 108
8945nm Core2 Processors 10945nm Core2 Processors
90 Solo ULV SU3500/3300 100 110 Solo ULV SU3500/3300 100
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index c45513d806ab..a92c5ebf373e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -2543,6 +2543,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
2543 2543
2544 sched_debug [KNL] Enables verbose scheduler debug messages. 2544 sched_debug [KNL] Enables verbose scheduler debug messages.
2545 2545
2546 skew_tick= [KNL] Offset the periodic timer tick per cpu to mitigate
2547 xtime_lock contention on larger systems, and/or RCU lock
2548 contention on all systems with CONFIG_MAXSMP set.
2549 Format: { "0" | "1" }
2550 0 -- disable. (may be 1 via CONFIG_CMDLINE="skew_tick=1"
2551 1 -- enable.
2552 Note: increases power consumption, thus should only be
2553 enabled if running jitter sensitive (HPC/RT) workloads.
2554
2546 security= [SECURITY] Choose a security module to enable at boot. 2555 security= [SECURITY] Choose a security module to enable at boot.
2547 If this boot parameter is not specified, only the first 2556 If this boot parameter is not specified, only the first
2548 security module asking for security registration will be 2557 security module asking for security registration will be
diff --git a/Documentation/networking/stmmac.txt b/Documentation/networking/stmmac.txt
index ab1e8d7004c5..5cb9a1972460 100644
--- a/Documentation/networking/stmmac.txt
+++ b/Documentation/networking/stmmac.txt
@@ -10,8 +10,8 @@ Currently this network device driver is for all STM embedded MAC/GMAC
10(i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000 10(i.e. 7xxx/5xxx SoCs), SPEAr (arm), Loongson1B (mips) and XLINX XC2V3000
11FF1152AMT0221 D1215994A VIRTEX FPGA board. 11FF1152AMT0221 D1215994A VIRTEX FPGA board.
12 12
13DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether MAC 10/100 13DWC Ether MAC 10/100/1000 Universal version 3.60a (and older) and DWC Ether
14Universal version 4.0 have been used for developing this driver. 14MAC 10/100 Universal version 4.0 have been used for developing this driver.
15 15
16This driver supports both the platform bus and PCI. 16This driver supports both the platform bus and PCI.
17 17
@@ -54,27 +54,27 @@ net_device structure enabling the scatter/gather feature.
54When one or more packets are received, an interrupt happens. The interrupts 54When one or more packets are received, an interrupt happens. The interrupts
55are not queued so the driver has to scan all the descriptors in the ring during 55are not queued so the driver has to scan all the descriptors in the ring during
56the receive process. 56the receive process.
57This is based on NAPI so the interrupt handler signals only if there is work to be 57This is based on NAPI so the interrupt handler signals only if there is work
58done, and it exits. 58to be done, and it exits.
59Then the poll method will be scheduled at some future point. 59Then the poll method will be scheduled at some future point.
60The incoming packets are stored, by the DMA, in a list of pre-allocated socket 60The incoming packets are stored, by the DMA, in a list of pre-allocated socket
61buffers in order to avoid the memcpy (Zero-copy). 61buffers in order to avoid the memcpy (Zero-copy).
62 62
634.3) Timer-Driver Interrupt 634.3) Timer-Driver Interrupt
64Instead of having the device that asynchronously notifies the frame receptions, the 64Instead of having the device that asynchronously notifies the frame receptions,
65driver configures a timer to generate an interrupt at regular intervals. 65the driver configures a timer to generate an interrupt at regular intervals.
66Based on the granularity of the timer, the frames that are received by the device 66Based on the granularity of the timer, the frames that are received by the
67will experience different levels of latency. Some NICs have dedicated timer 67device will experience different levels of latency. Some NICs have dedicated
68device to perform this task. STMMAC can use either the RTC device or the TMU 68timer device to perform this task. STMMAC can use either the RTC device or the
69channel 2 on STLinux platforms. 69TMU channel 2 on STLinux platforms.
70The timers frequency can be passed to the driver as parameter; when change it, 70The timers frequency can be passed to the driver as parameter; when change it,
71take care of both hardware capability and network stability/performance impact. 71take care of both hardware capability and network stability/performance impact.
72Several performance tests on STM platforms showed this optimisation allows to spare 72Several performance tests on STM platforms showed this optimisation allows to
73the CPU while having the maximum throughput. 73spare the CPU while having the maximum throughput.
74 74
754.4) WOL 754.4) WOL
76Wake up on Lan feature through Magic and Unicast frames are supported for the GMAC 76Wake up on Lan feature through Magic and Unicast frames are supported for the
77core. 77GMAC core.
78 78
794.5) DMA descriptors 794.5) DMA descriptors
80Driver handles both normal and enhanced descriptors. The latter has been only 80Driver handles both normal and enhanced descriptors. The latter has been only
@@ -106,7 +106,8 @@ Several driver's information can be passed through the platform
106These are included in the include/linux/stmmac.h header file 106These are included in the include/linux/stmmac.h header file
107and detailed below as well: 107and detailed below as well:
108 108
109 struct plat_stmmacenet_data { 109struct plat_stmmacenet_data {
110 char *phy_bus_name;
110 int bus_id; 111 int bus_id;
111 int phy_addr; 112 int phy_addr;
112 int interface; 113 int interface;
@@ -124,19 +125,24 @@ and detailed below as well:
124 void (*bus_setup)(void __iomem *ioaddr); 125 void (*bus_setup)(void __iomem *ioaddr);
125 int (*init)(struct platform_device *pdev); 126 int (*init)(struct platform_device *pdev);
126 void (*exit)(struct platform_device *pdev); 127 void (*exit)(struct platform_device *pdev);
128 void *custom_cfg;
129 void *custom_data;
127 void *bsp_priv; 130 void *bsp_priv;
128 }; 131 };
129 132
130Where: 133Where:
134 o phy_bus_name: phy bus name to attach to the stmmac.
131 o bus_id: bus identifier. 135 o bus_id: bus identifier.
132 o phy_addr: the physical address can be passed from the platform. 136 o phy_addr: the physical address can be passed from the platform.
133 If it is set to -1 the driver will automatically 137 If it is set to -1 the driver will automatically
134 detect it at run-time by probing all the 32 addresses. 138 detect it at run-time by probing all the 32 addresses.
135 o interface: PHY device's interface. 139 o interface: PHY device's interface.
136 o mdio_bus_data: specific platform fields for the MDIO bus. 140 o mdio_bus_data: specific platform fields for the MDIO bus.
137 o pbl: the Programmable Burst Length is maximum number of beats to 141 o dma_cfg: internal DMA parameters
142 o pbl: the Programmable Burst Length is maximum number of beats to
138 be transferred in one DMA transaction. 143 be transferred in one DMA transaction.
139 GMAC also enables the 4xPBL by default. 144 GMAC also enables the 4xPBL by default.
145 o fixed_burst/mixed_burst/burst_len
140 o clk_csr: fixed CSR Clock range selection. 146 o clk_csr: fixed CSR Clock range selection.
141 o has_gmac: uses the GMAC core. 147 o has_gmac: uses the GMAC core.
142 o enh_desc: if sets the MAC will use the enhanced descriptor structure. 148 o enh_desc: if sets the MAC will use the enhanced descriptor structure.
@@ -160,8 +166,9 @@ Where:
160 this is sometime necessary on some platforms (e.g. ST boxes) 166 this is sometime necessary on some platforms (e.g. ST boxes)
161 where the HW needs to have set some PIO lines or system cfg 167 where the HW needs to have set some PIO lines or system cfg
162 registers. 168 registers.
163 o custom_cfg: this is a custom configuration that can be passed while 169 o custom_cfg/custom_data: this is a custom configuration that can be passed
164 initialising the resources. 170 while initialising the resources.
171 o bsp_priv: another private poiter.
165 172
166For MDIO bus The we have: 173For MDIO bus The we have:
167 174
@@ -180,7 +187,6 @@ Where:
180 o irqs: list of IRQs, one per PHY. 187 o irqs: list of IRQs, one per PHY.
181 o probed_phy_irq: if irqs is NULL, use this for probed PHY. 188 o probed_phy_irq: if irqs is NULL, use this for probed PHY.
182 189
183
184For DMA engine we have the following internal fields that should be 190For DMA engine we have the following internal fields that should be
185tuned according to the HW capabilities. 191tuned according to the HW capabilities.
186 192
diff --git a/Documentation/power/charger-manager.txt b/Documentation/power/charger-manager.txt
index fdcca991df30..b4f7f4b23f64 100644
--- a/Documentation/power/charger-manager.txt
+++ b/Documentation/power/charger-manager.txt
@@ -44,6 +44,16 @@ Charger Manager supports the following:
44 Normally, the platform will need to resume and suspend some devices 44 Normally, the platform will need to resume and suspend some devices
45 that are used by Charger Manager. 45 that are used by Charger Manager.
46 46
47* Support for premature full-battery event handling
48 If the battery voltage drops by "fullbatt_vchkdrop_uV" after
49 "fullbatt_vchkdrop_ms" from the full-battery event, the framework
50 restarts charging. This check is also performed while suspended by
51 setting wakeup time accordingly and using suspend_again.
52
53* Support for uevent-notify
54 With the charger-related events, the device sends
55 notification to users with UEVENT.
56
472. Global Charger-Manager Data related with suspend_again 572. Global Charger-Manager Data related with suspend_again
48======================================================== 58========================================================
49In order to setup Charger Manager with suspend-again feature 59In order to setup Charger Manager with suspend-again feature
@@ -55,7 +65,7 @@ if there are multiple batteries. If there are multiple batteries, the
55multiple instances of Charger Manager share the same charger_global_desc 65multiple instances of Charger Manager share the same charger_global_desc
56and it will manage in-suspend monitoring for all instances of Charger Manager. 66and it will manage in-suspend monitoring for all instances of Charger Manager.
57 67
58The user needs to provide all the two entries properly in order to activate 68The user needs to provide all the three entries properly in order to activate
59in-suspend monitoring: 69in-suspend monitoring:
60 70
61struct charger_global_desc { 71struct charger_global_desc {
@@ -74,6 +84,11 @@ bool (*rtc_only_wakeup)(void);
74 same struct. If there is any other wakeup source triggered the 84 same struct. If there is any other wakeup source triggered the
75 wakeup, it should return false. If the "rtc" is the only wakeup 85 wakeup, it should return false. If the "rtc" is the only wakeup
76 reason, it should return true. 86 reason, it should return true.
87
88bool assume_timer_stops_in_suspend;
89 : if true, Charger Manager assumes that
90 the timer (CM uses jiffies as timer) stops during suspend. Then, CM
91 assumes that the suspend-duration is same as the alarm length.
77}; 92};
78 93
793. How to setup suspend_again 943. How to setup suspend_again
@@ -111,6 +126,16 @@ enum polling_modes polling_mode;
111 CM_POLL_CHARGING_ONLY: poll this battery if and only if the 126 CM_POLL_CHARGING_ONLY: poll this battery if and only if the
112 battery is being charged. 127 battery is being charged.
113 128
129unsigned int fullbatt_vchkdrop_ms;
130unsigned int fullbatt_vchkdrop_uV;
131 : If both have non-zero values, Charger Manager will check the
132 battery voltage drop fullbatt_vchkdrop_ms after the battery is fully
133 charged. If the voltage drop is over fullbatt_vchkdrop_uV, Charger
134 Manager will try to recharge the battery by disabling and enabling
135 chargers. Recharge with voltage drop condition only (without delay
136 condition) is needed to be implemented with hardware interrupts from
137 fuel gauges or charger devices/chips.
138
114unsigned int fullbatt_uV; 139unsigned int fullbatt_uV;
115 : If specified with a non-zero value, Charger Manager assumes 140 : If specified with a non-zero value, Charger Manager assumes
116 that the battery is full (capacity = 100) if the battery is not being 141 that the battery is full (capacity = 100) if the battery is not being
@@ -122,6 +147,8 @@ unsigned int polling_interval_ms;
122 this battery every polling_interval_ms or more frequently. 147 this battery every polling_interval_ms or more frequently.
123 148
124enum data_source battery_present; 149enum data_source battery_present;
150 : CM_BATTERY_PRESENT: assume that the battery exists.
151 CM_NO_BATTERY: assume that the battery does not exists.
125 CM_FUEL_GAUGE: get battery presence information from fuel gauge. 152 CM_FUEL_GAUGE: get battery presence information from fuel gauge.
126 CM_CHARGER_STAT: get battery presence from chargers. 153 CM_CHARGER_STAT: get battery presence from chargers.
127 154
@@ -151,7 +178,17 @@ bool measure_battery_temp;
151 the value of measure_battery_temp. 178 the value of measure_battery_temp.
152}; 179};
153 180
1545. Other Considerations 1815. Notify Charger-Manager of charger events: cm_notify_event()
182=========================================================
183If there is an charger event is required to notify
184Charger Manager, a charger device driver that triggers the event can call
185cm_notify_event(psy, type, msg) to notify the corresponding Charger Manager.
186In the function, psy is the charger driver's power_supply pointer, which is
187associated with Charger-Manager. The parameter "type"
188is the same as irq's type (enum cm_event_types). The event message "msg" is
189optional and is effective only if the event type is "UNDESCRIBED" or "OTHERS".
190
1916. Other Considerations
155======================= 192=======================
156 193
157At the charger/battery-related events such as battery-pulled-out, 194At the charger/battery-related events such as battery-pulled-out,
diff --git a/Documentation/power/power_supply_class.txt b/Documentation/power/power_supply_class.txt
index 9f16c5178b66..211831d4095f 100644
--- a/Documentation/power/power_supply_class.txt
+++ b/Documentation/power/power_supply_class.txt
@@ -84,6 +84,8 @@ are already charged or discharging, 'n/a' can be displayed (or
84HEALTH - represents health of the battery, values corresponds to 84HEALTH - represents health of the battery, values corresponds to
85POWER_SUPPLY_HEALTH_*, defined in battery.h. 85POWER_SUPPLY_HEALTH_*, defined in battery.h.
86 86
87VOLTAGE_OCV - open circuit voltage of the battery.
88
87VOLTAGE_MAX_DESIGN, VOLTAGE_MIN_DESIGN - design values for maximal and 89VOLTAGE_MAX_DESIGN, VOLTAGE_MIN_DESIGN - design values for maximal and
88minimal power supply voltages. Maximal/minimal means values of voltages 90minimal power supply voltages. Maximal/minimal means values of voltages
89when battery considered "full"/"empty" at normal conditions. Yes, there is 91when battery considered "full"/"empty" at normal conditions. Yes, there is
diff --git a/Documentation/sysctl/fs.txt b/Documentation/sysctl/fs.txt
index 88fd7f5c8dcd..13d6166d7a27 100644
--- a/Documentation/sysctl/fs.txt
+++ b/Documentation/sysctl/fs.txt
@@ -225,6 +225,13 @@ a queue must be less or equal then msg_max.
225maximum message size value (it is every message queue's attribute set during 225maximum message size value (it is every message queue's attribute set during
226its creation). 226its creation).
227 227
228/proc/sys/fs/mqueue/msg_default is a read/write file for setting/getting the
229default number of messages in a queue value if attr parameter of mq_open(2) is
230NULL. If it exceed msg_max, the default value is initialized msg_max.
231
232/proc/sys/fs/mqueue/msgsize_default is a read/write file for setting/getting
233the default message size value if attr parameter of mq_open(2) is NULL. If it
234exceed msgsize_max, the default value is initialized msgsize_max.
228 235
2294. /proc/sys/fs/epoll - Configuration options for the epoll interface 2364. /proc/sys/fs/epoll - Configuration options for the epoll interface
230-------------------------------------------------------- 237--------------------------------------------------------
diff --git a/Documentation/vm/frontswap.txt b/Documentation/vm/frontswap.txt
new file mode 100644
index 000000000000..37067cf455f4
--- /dev/null
+++ b/Documentation/vm/frontswap.txt
@@ -0,0 +1,278 @@
1Frontswap provides a "transcendent memory" interface for swap pages.
2In some environments, dramatic performance savings may be obtained because
3swapped pages are saved in RAM (or a RAM-like device) instead of a swap disk.
4
5(Note, frontswap -- and cleancache (merged at 3.0) -- are the "frontends"
6and the only necessary changes to the core kernel for transcendent memory;
7all other supporting code -- the "backends" -- is implemented as drivers.
8See the LWN.net article "Transcendent memory in a nutshell" for a detailed
9overview of frontswap and related kernel parts:
10https://lwn.net/Articles/454795/ )
11
12Frontswap is so named because it can be thought of as the opposite of
13a "backing" store for a swap device. The storage is assumed to be
14a synchronous concurrency-safe page-oriented "pseudo-RAM device" conforming
15to the requirements of transcendent memory (such as Xen's "tmem", or
16in-kernel compressed memory, aka "zcache", or future RAM-like devices);
17this pseudo-RAM device is not directly accessible or addressable by the
18kernel and is of unknown and possibly time-varying size. The driver
19links itself to frontswap by calling frontswap_register_ops to set the
20frontswap_ops funcs appropriately and the functions it provides must
21conform to certain policies as follows:
22
23An "init" prepares the device to receive frontswap pages associated
24with the specified swap device number (aka "type"). A "store" will
25copy the page to transcendent memory and associate it with the type and
26offset associated with the page. A "load" will copy the page, if found,
27from transcendent memory into kernel memory, but will NOT remove the page
28from from transcendent memory. An "invalidate_page" will remove the page
29from transcendent memory and an "invalidate_area" will remove ALL pages
30associated with the swap type (e.g., like swapoff) and notify the "device"
31to refuse further stores with that swap type.
32
33Once a page is successfully stored, a matching load on the page will normally
34succeed. So when the kernel finds itself in a situation where it needs
35to swap out a page, it first attempts to use frontswap. If the store returns
36success, the data has been successfully saved to transcendent memory and
37a disk write and, if the data is later read back, a disk read are avoided.
38If a store returns failure, transcendent memory has rejected the data, and the
39page can be written to swap as usual.
40
41If a backend chooses, frontswap can be configured as a "writethrough
42cache" by calling frontswap_writethrough(). In this mode, the reduction
43in swap device writes is lost (and also a non-trivial performance advantage)
44in order to allow the backend to arbitrarily "reclaim" space used to
45store frontswap pages to more completely manage its memory usage.
46
47Note that if a page is stored and the page already exists in transcendent memory
48(a "duplicate" store), either the store succeeds and the data is overwritten,
49or the store fails AND the page is invalidated. This ensures stale data may
50never be obtained from frontswap.
51
52If properly configured, monitoring of frontswap is done via debugfs in
53the /sys/kernel/debug/frontswap directory. The effectiveness of
54frontswap can be measured (across all swap devices) with:
55
56failed_stores - how many store attempts have failed
57loads - how many loads were attempted (all should succeed)
58succ_stores - how many store attempts have succeeded
59invalidates - how many invalidates were attempted
60
61A backend implementation may provide additional metrics.
62
63FAQ
64
651) Where's the value?
66
67When a workload starts swapping, performance falls through the floor.
68Frontswap significantly increases performance in many such workloads by
69providing a clean, dynamic interface to read and write swap pages to
70"transcendent memory" that is otherwise not directly addressable to the kernel.
71This interface is ideal when data is transformed to a different form
72and size (such as with compression) or secretly moved (as might be
73useful for write-balancing for some RAM-like devices). Swap pages (and
74evicted page-cache pages) are a great use for this kind of slower-than-RAM-
75but-much-faster-than-disk "pseudo-RAM device" and the frontswap (and
76cleancache) interface to transcendent memory provides a nice way to read
77and write -- and indirectly "name" -- the pages.
78
79Frontswap -- and cleancache -- with a fairly small impact on the kernel,
80provides a huge amount of flexibility for more dynamic, flexible RAM
81utilization in various system configurations:
82
83In the single kernel case, aka "zcache", pages are compressed and
84stored in local memory, thus increasing the total anonymous pages
85that can be safely kept in RAM. Zcache essentially trades off CPU
86cycles used in compression/decompression for better memory utilization.
87Benchmarks have shown little or no impact when memory pressure is
88low while providing a significant performance improvement (25%+)
89on some workloads under high memory pressure.
90
91"RAMster" builds on zcache by adding "peer-to-peer" transcendent memory
92support for clustered systems. Frontswap pages are locally compressed
93as in zcache, but then "remotified" to another system's RAM. This
94allows RAM to be dynamically load-balanced back-and-forth as needed,
95i.e. when system A is overcommitted, it can swap to system B, and
96vice versa. RAMster can also be configured as a memory server so
97many servers in a cluster can swap, dynamically as needed, to a single
98server configured with a large amount of RAM... without pre-configuring
99how much of the RAM is available for each of the clients!
100
101In the virtual case, the whole point of virtualization is to statistically
102multiplex physical resources acrosst the varying demands of multiple
103virtual machines. This is really hard to do with RAM and efforts to do
104it well with no kernel changes have essentially failed (except in some
105well-publicized special-case workloads).
106Specifically, the Xen Transcendent Memory backend allows otherwise
107"fallow" hypervisor-owned RAM to not only be "time-shared" between multiple
108virtual machines, but the pages can be compressed and deduplicated to
109optimize RAM utilization. And when guest OS's are induced to surrender
110underutilized RAM (e.g. with "selfballooning"), sudden unexpected
111memory pressure may result in swapping; frontswap allows those pages
112to be swapped to and from hypervisor RAM (if overall host system memory
113conditions allow), thus mitigating the potentially awful performance impact
114of unplanned swapping.
115
116A KVM implementation is underway and has been RFC'ed to lkml. And,
117using frontswap, investigation is also underway on the use of NVM as
118a memory extension technology.
119
1202) Sure there may be performance advantages in some situations, but
121 what's the space/time overhead of frontswap?
122
123If CONFIG_FRONTSWAP is disabled, every frontswap hook compiles into
124nothingness and the only overhead is a few extra bytes per swapon'ed
125swap device. If CONFIG_FRONTSWAP is enabled but no frontswap "backend"
126registers, there is one extra global variable compared to zero for
127every swap page read or written. If CONFIG_FRONTSWAP is enabled
128AND a frontswap backend registers AND the backend fails every "store"
129request (i.e. provides no memory despite claiming it might),
130CPU overhead is still negligible -- and since every frontswap fail
131precedes a swap page write-to-disk, the system is highly likely
132to be I/O bound and using a small fraction of a percent of a CPU
133will be irrelevant anyway.
134
135As for space, if CONFIG_FRONTSWAP is enabled AND a frontswap backend
136registers, one bit is allocated for every swap page for every swap
137device that is swapon'd. This is added to the EIGHT bits (which
138was sixteen until about 2.6.34) that the kernel already allocates
139for every swap page for every swap device that is swapon'd. (Hugh
140Dickins has observed that frontswap could probably steal one of
141the existing eight bits, but let's worry about that minor optimization
142later.) For very large swap disks (which are rare) on a standard
1434K pagesize, this is 1MB per 32GB swap.
144
145When swap pages are stored in transcendent memory instead of written
146out to disk, there is a side effect that this may create more memory
147pressure that can potentially outweigh the other advantages. A
148backend, such as zcache, must implement policies to carefully (but
149dynamically) manage memory limits to ensure this doesn't happen.
150
1513) OK, how about a quick overview of what this frontswap patch does
152 in terms that a kernel hacker can grok?
153
154Let's assume that a frontswap "backend" has registered during
155kernel initialization; this registration indicates that this
156frontswap backend has access to some "memory" that is not directly
157accessible by the kernel. Exactly how much memory it provides is
158entirely dynamic and random.
159
160Whenever a swap-device is swapon'd frontswap_init() is called,
161passing the swap device number (aka "type") as a parameter.
162This notifies frontswap to expect attempts to "store" swap pages
163associated with that number.
164
165Whenever the swap subsystem is readying a page to write to a swap
166device (c.f swap_writepage()), frontswap_store is called. Frontswap
167consults with the frontswap backend and if the backend says it does NOT
168have room, frontswap_store returns -1 and the kernel swaps the page
169to the swap device as normal. Note that the response from the frontswap
170backend is unpredictable to the kernel; it may choose to never accept a
171page, it could accept every ninth page, or it might accept every
172page. But if the backend does accept a page, the data from the page
173has already been copied and associated with the type and offset,
174and the backend guarantees the persistence of the data. In this case,
175frontswap sets a bit in the "frontswap_map" for the swap device
176corresponding to the page offset on the swap device to which it would
177otherwise have written the data.
178
179When the swap subsystem needs to swap-in a page (swap_readpage()),
180it first calls frontswap_load() which checks the frontswap_map to
181see if the page was earlier accepted by the frontswap backend. If
182it was, the page of data is filled from the frontswap backend and
183the swap-in is complete. If not, the normal swap-in code is
184executed to obtain the page of data from the real swap device.
185
186So every time the frontswap backend accepts a page, a swap device read
187and (potentially) a swap device write are replaced by a "frontswap backend
188store" and (possibly) a "frontswap backend loads", which are presumably much
189faster.
190
1914) Can't frontswap be configured as a "special" swap device that is
192 just higher priority than any real swap device (e.g. like zswap,
193 or maybe swap-over-nbd/NFS)?
194
195No. First, the existing swap subsystem doesn't allow for any kind of
196swap hierarchy. Perhaps it could be rewritten to accomodate a hierarchy,
197but this would require fairly drastic changes. Even if it were
198rewritten, the existing swap subsystem uses the block I/O layer which
199assumes a swap device is fixed size and any page in it is linearly
200addressable. Frontswap barely touches the existing swap subsystem,
201and works around the constraints of the block I/O subsystem to provide
202a great deal of flexibility and dynamicity.
203
204For example, the acceptance of any swap page by the frontswap backend is
205entirely unpredictable. This is critical to the definition of frontswap
206backends because it grants completely dynamic discretion to the
207backend. In zcache, one cannot know a priori how compressible a page is.
208"Poorly" compressible pages can be rejected, and "poorly" can itself be
209defined dynamically depending on current memory constraints.
210
211Further, frontswap is entirely synchronous whereas a real swap
212device is, by definition, asynchronous and uses block I/O. The
213block I/O layer is not only unnecessary, but may perform "optimizations"
214that are inappropriate for a RAM-oriented device including delaying
215the write of some pages for a significant amount of time. Synchrony is
216required to ensure the dynamicity of the backend and to avoid thorny race
217conditions that would unnecessarily and greatly complicate frontswap
218and/or the block I/O subsystem. That said, only the initial "store"
219and "load" operations need be synchronous. A separate asynchronous thread
220is free to manipulate the pages stored by frontswap. For example,
221the "remotification" thread in RAMster uses standard asynchronous
222kernel sockets to move compressed frontswap pages to a remote machine.
223Similarly, a KVM guest-side implementation could do in-guest compression
224and use "batched" hypercalls.
225
226In a virtualized environment, the dynamicity allows the hypervisor
227(or host OS) to do "intelligent overcommit". For example, it can
228choose to accept pages only until host-swapping might be imminent,
229then force guests to do their own swapping.
230
231There is a downside to the transcendent memory specifications for
232frontswap: Since any "store" might fail, there must always be a real
233slot on a real swap device to swap the page. Thus frontswap must be
234implemented as a "shadow" to every swapon'd device with the potential
235capability of holding every page that the swap device might have held
236and the possibility that it might hold no pages at all. This means
237that frontswap cannot contain more pages than the total of swapon'd
238swap devices. For example, if NO swap device is configured on some
239installation, frontswap is useless. Swapless portable devices
240can still use frontswap but a backend for such devices must configure
241some kind of "ghost" swap device and ensure that it is never used.
242
2435) Why this weird definition about "duplicate stores"? If a page
244 has been previously successfully stored, can't it always be
245 successfully overwritten?
246
247Nearly always it can, but no, sometimes it cannot. Consider an example
248where data is compressed and the original 4K page has been compressed
249to 1K. Now an attempt is made to overwrite the page with data that
250is non-compressible and so would take the entire 4K. But the backend
251has no more space. In this case, the store must be rejected. Whenever
252frontswap rejects a store that would overwrite, it also must invalidate
253the old data and ensure that it is no longer accessible. Since the
254swap subsystem then writes the new data to the read swap device,
255this is the correct course of action to ensure coherency.
256
2576) What is frontswap_shrink for?
258
259When the (non-frontswap) swap subsystem swaps out a page to a real
260swap device, that page is only taking up low-value pre-allocated disk
261space. But if frontswap has placed a page in transcendent memory, that
262page may be taking up valuable real estate. The frontswap_shrink
263routine allows code outside of the swap subsystem to force pages out
264of the memory managed by frontswap and back into kernel-addressable memory.
265For example, in RAMster, a "suction driver" thread will attempt
266to "repatriate" pages sent to a remote machine back to the local machine;
267this is driven using the frontswap_shrink mechanism when memory pressure
268subsides.
269
2707) Why does the frontswap patch create the new include file swapfile.h?
271
272The frontswap code depends on some swap-subsystem-internal data
273structures that have, over the years, moved back and forth between
274static and global. This seemed a reasonable compromise: Define
275them as global but declare them in a new include file that isn't
276included by the large number of source files that include swap.h.
277
278Dan Magenheimer, last updated April 9, 2012
diff --git a/Documentation/vm/pagemap.txt b/Documentation/vm/pagemap.txt
index 4600cbe3d6be..7587493c67f1 100644
--- a/Documentation/vm/pagemap.txt
+++ b/Documentation/vm/pagemap.txt
@@ -16,7 +16,7 @@ There are three components to pagemap:
16 * Bits 0-4 swap type if swapped 16 * Bits 0-4 swap type if swapped
17 * Bits 5-54 swap offset if swapped 17 * Bits 5-54 swap offset if swapped
18 * Bits 55-60 page shift (page size = 1<<page shift) 18 * Bits 55-60 page shift (page size = 1<<page shift)
19 * Bit 61 reserved for future use 19 * Bit 61 page is file-page or shared-anon
20 * Bit 62 page swapped 20 * Bit 62 page swapped
21 * Bit 63 page present 21 * Bit 63 page present
22 22
diff --git a/Documentation/vm/slub.txt b/Documentation/vm/slub.txt
index 6752870c4970..b0c6d1bbb434 100644
--- a/Documentation/vm/slub.txt
+++ b/Documentation/vm/slub.txt
@@ -17,7 +17,7 @@ data and perform operation on the slabs. By default slabinfo only lists
17slabs that have data in them. See "slabinfo -h" for more options when 17slabs that have data in them. See "slabinfo -h" for more options when
18running the command. slabinfo can be compiled with 18running the command. slabinfo can be compiled with
19 19
20gcc -o slabinfo tools/slub/slabinfo.c 20gcc -o slabinfo tools/vm/slabinfo.c
21 21
22Some of the modes of operation of slabinfo require that slub debugging 22Some of the modes of operation of slabinfo require that slub debugging
23be enabled on the command line. F.e. no tracking information will be 23be enabled on the command line. F.e. no tracking information will be
diff --git a/Documentation/x86/efi-stub.txt b/Documentation/x86/efi-stub.txt
new file mode 100644
index 000000000000..44e6bb6ead10
--- /dev/null
+++ b/Documentation/x86/efi-stub.txt
@@ -0,0 +1,65 @@
1 The EFI Boot Stub
2 ---------------------------
3
4On the x86 platform, a bzImage can masquerade as a PE/COFF image,
5thereby convincing EFI firmware loaders to load it as an EFI
6executable. The code that modifies the bzImage header, along with the
7EFI-specific entry point that the firmware loader jumps to are
8collectively known as the "EFI boot stub", and live in
9arch/x86/boot/header.S and arch/x86/boot/compressed/eboot.c,
10respectively.
11
12By using the EFI boot stub it's possible to boot a Linux kernel
13without the use of a conventional EFI boot loader, such as grub or
14elilo. Since the EFI boot stub performs the jobs of a boot loader, in
15a certain sense it *IS* the boot loader.
16
17The EFI boot stub is enabled with the CONFIG_EFI_STUB kernel option.
18
19
20**** How to install bzImage.efi
21
22The bzImage located in arch/x86/boot/bzImage must be copied to the EFI
23System Partiion (ESP) and renamed with the extension ".efi". Without
24the extension the EFI firmware loader will refuse to execute it. It's
25not possible to execute bzImage.efi from the usual Linux file systems
26because EFI firmware doesn't have support for them.
27
28
29**** Passing kernel parameters from the EFI shell
30
31Arguments to the kernel can be passed after bzImage.efi, e.g.
32
33 fs0:> bzImage.efi console=ttyS0 root=/dev/sda4
34
35
36**** The "initrd=" option
37
38Like most boot loaders, the EFI stub allows the user to specify
39multiple initrd files using the "initrd=" option. This is the only EFI
40stub-specific command line parameter, everything else is passed to the
41kernel when it boots.
42
43The path to the initrd file must be an absolute path from the
44beginning of the ESP, relative path names do not work. Also, the path
45is an EFI-style path and directory elements must be separated with
46backslashes (\). For example, given the following directory layout,
47
48fs0:>
49 Kernels\
50 bzImage.efi
51 initrd-large.img
52
53 Ramdisks\
54 initrd-small.img
55 initrd-medium.img
56
57to boot with the initrd-large.img file if the current working
58directory is fs0:\Kernels, the following command must be used,
59
60 fs0:\Kernels> bzImage.efi initrd=\Kernels\initrd-large.img
61
62Notice how bzImage.efi can be specified with a relative path. That's
63because the image we're executing is interpreted by the EFI shell,
64which understands relative paths, whereas the rest of the command line
65is passed to bzImage.efi.
diff --git a/MAINTAINERS b/MAINTAINERS
index 64e675d6d478..eb22272b2116 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -579,7 +579,7 @@ F: drivers/net/appletalk/
579F: net/appletalk/ 579F: net/appletalk/
580 580
581ARASAN COMPACT FLASH PATA CONTROLLER 581ARASAN COMPACT FLASH PATA CONTROLLER
582M: Viresh Kumar <viresh.kumar@st.com> 582M: Viresh Kumar <viresh.linux@gmail.com>
583L: linux-ide@vger.kernel.org 583L: linux-ide@vger.kernel.org
584S: Maintained 584S: Maintained
585F: include/linux/pata_arasan_cf_data.h 585F: include/linux/pata_arasan_cf_data.h
@@ -1077,7 +1077,7 @@ F: drivers/media/video/s5p-fimc/
1077ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT 1077ARM/SAMSUNG S5P SERIES Multi Format Codec (MFC) SUPPORT
1078M: Kyungmin Park <kyungmin.park@samsung.com> 1078M: Kyungmin Park <kyungmin.park@samsung.com>
1079M: Kamil Debski <k.debski@samsung.com> 1079M: Kamil Debski <k.debski@samsung.com>
1080M: Jeongtae Park <jtp.park@samsung.com> 1080M: Jeongtae Park <jtp.park@samsung.com>
1081L: linux-arm-kernel@lists.infradead.org 1081L: linux-arm-kernel@lists.infradead.org
1082L: linux-media@vger.kernel.org 1082L: linux-media@vger.kernel.org
1083S: Maintained 1083S: Maintained
@@ -1646,11 +1646,11 @@ S: Maintained
1646F: drivers/gpio/gpio-bt8xx.c 1646F: drivers/gpio/gpio-bt8xx.c
1647 1647
1648BTRFS FILE SYSTEM 1648BTRFS FILE SYSTEM
1649M: Chris Mason <chris.mason@oracle.com> 1649M: Chris Mason <chris.mason@fusionio.com>
1650L: linux-btrfs@vger.kernel.org 1650L: linux-btrfs@vger.kernel.org
1651W: http://btrfs.wiki.kernel.org/ 1651W: http://btrfs.wiki.kernel.org/
1652Q: http://patchwork.kernel.org/project/linux-btrfs/list/ 1652Q: http://patchwork.kernel.org/project/linux-btrfs/list/
1653T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable.git 1653T: git git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs.git
1654S: Maintained 1654S: Maintained
1655F: Documentation/filesystems/btrfs.txt 1655F: Documentation/filesystems/btrfs.txt
1656F: fs/btrfs/ 1656F: fs/btrfs/
@@ -1743,10 +1743,10 @@ F: include/linux/can/platform/
1743CAPABILITIES 1743CAPABILITIES
1744M: Serge Hallyn <serge.hallyn@canonical.com> 1744M: Serge Hallyn <serge.hallyn@canonical.com>
1745L: linux-security-module@vger.kernel.org 1745L: linux-security-module@vger.kernel.org
1746S: Supported 1746S: Supported
1747F: include/linux/capability.h 1747F: include/linux/capability.h
1748F: security/capability.c 1748F: security/capability.c
1749F: security/commoncap.c 1749F: security/commoncap.c
1750F: kernel/capability.c 1750F: kernel/capability.c
1751 1751
1752CELL BROADBAND ENGINE ARCHITECTURE 1752CELL BROADBAND ENGINE ARCHITECTURE
@@ -1800,6 +1800,9 @@ F: include/linux/cfag12864b.h
1800CFG80211 and NL80211 1800CFG80211 and NL80211
1801M: Johannes Berg <johannes@sipsolutions.net> 1801M: Johannes Berg <johannes@sipsolutions.net>
1802L: linux-wireless@vger.kernel.org 1802L: linux-wireless@vger.kernel.org
1803W: http://wireless.kernel.org/
1804T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
1805T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
1803S: Maintained 1806S: Maintained
1804F: include/linux/nl80211.h 1807F: include/linux/nl80211.h
1805F: include/net/cfg80211.h 1808F: include/net/cfg80211.h
@@ -2146,11 +2149,11 @@ S: Orphan
2146F: drivers/net/wan/pc300* 2149F: drivers/net/wan/pc300*
2147 2150
2148CYTTSP TOUCHSCREEN DRIVER 2151CYTTSP TOUCHSCREEN DRIVER
2149M: Javier Martinez Canillas <javier@dowhile0.org> 2152M: Javier Martinez Canillas <javier@dowhile0.org>
2150L: linux-input@vger.kernel.org 2153L: linux-input@vger.kernel.org
2151S: Maintained 2154S: Maintained
2152F: drivers/input/touchscreen/cyttsp* 2155F: drivers/input/touchscreen/cyttsp*
2153F: include/linux/input/cyttsp.h 2156F: include/linux/input/cyttsp.h
2154 2157
2155DAMA SLAVE for AX.25 2158DAMA SLAVE for AX.25
2156M: Joerg Reuter <jreuter@yaina.de> 2159M: Joerg Reuter <jreuter@yaina.de>
@@ -2270,7 +2273,7 @@ F: include/linux/device-mapper.h
2270F: include/linux/dm-*.h 2273F: include/linux/dm-*.h
2271 2274
2272DIOLAN U2C-12 I2C DRIVER 2275DIOLAN U2C-12 I2C DRIVER
2273M: Guenter Roeck <guenter.roeck@ericsson.com> 2276M: Guenter Roeck <linux@roeck-us.net>
2274L: linux-i2c@vger.kernel.org 2277L: linux-i2c@vger.kernel.org
2275S: Maintained 2278S: Maintained
2276F: drivers/i2c/busses/i2c-diolan-u2c.c 2279F: drivers/i2c/busses/i2c-diolan-u2c.c
@@ -2930,6 +2933,13 @@ F: Documentation/power/freezing-of-tasks.txt
2930F: include/linux/freezer.h 2933F: include/linux/freezer.h
2931F: kernel/freezer.c 2934F: kernel/freezer.c
2932 2935
2936FRONTSWAP API
2937M: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
2938L: linux-kernel@vger.kernel.org
2939S: Maintained
2940F: mm/frontswap.c
2941F: include/linux/frontswap.h
2942
2933FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS 2943FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
2934M: David Howells <dhowells@redhat.com> 2944M: David Howells <dhowells@redhat.com>
2935L: linux-cachefs@redhat.com 2945L: linux-cachefs@redhat.com
@@ -3138,7 +3148,7 @@ F: drivers/tty/hvc/
3138 3148
3139HARDWARE MONITORING 3149HARDWARE MONITORING
3140M: Jean Delvare <khali@linux-fr.org> 3150M: Jean Delvare <khali@linux-fr.org>
3141M: Guenter Roeck <guenter.roeck@ericsson.com> 3151M: Guenter Roeck <linux@roeck-us.net>
3142L: lm-sensors@lm-sensors.org 3152L: lm-sensors@lm-sensors.org
3143W: http://www.lm-sensors.org/ 3153W: http://www.lm-sensors.org/
3144T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/ 3154T: quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
@@ -4096,6 +4106,8 @@ F: drivers/scsi/53c700*
4096LED SUBSYSTEM 4106LED SUBSYSTEM
4097M: Bryan Wu <bryan.wu@canonical.com> 4107M: Bryan Wu <bryan.wu@canonical.com>
4098M: Richard Purdie <rpurdie@rpsys.net> 4108M: Richard Purdie <rpurdie@rpsys.net>
4109L: linux-leds@vger.kernel.org
4110T: git git://git.kernel.org/pub/scm/linux/kernel/git/cooloney/linux-leds.git
4099S: Maintained 4111S: Maintained
4100F: drivers/leds/ 4112F: drivers/leds/
4101F: include/linux/leds.h 4113F: include/linux/leds.h
@@ -4340,7 +4352,8 @@ MAC80211
4340M: Johannes Berg <johannes@sipsolutions.net> 4352M: Johannes Berg <johannes@sipsolutions.net>
4341L: linux-wireless@vger.kernel.org 4353L: linux-wireless@vger.kernel.org
4342W: http://linuxwireless.org/ 4354W: http://linuxwireless.org/
4343T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git 4355T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
4356T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
4344S: Maintained 4357S: Maintained
4345F: Documentation/networking/mac80211-injection.txt 4358F: Documentation/networking/mac80211-injection.txt
4346F: include/net/mac80211.h 4359F: include/net/mac80211.h
@@ -4351,7 +4364,8 @@ M: Stefano Brivio <stefano.brivio@polimi.it>
4351M: Mattias Nissler <mattias.nissler@gmx.de> 4364M: Mattias Nissler <mattias.nissler@gmx.de>
4352L: linux-wireless@vger.kernel.org 4365L: linux-wireless@vger.kernel.org
4353W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID 4366W: http://linuxwireless.org/en/developers/Documentation/mac80211/RateControl/PID
4354T: git git://git.kernel.org/pub/scm/linux/kernel/git/linville/wireless.git 4367T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
4368T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
4355S: Maintained 4369S: Maintained
4356F: net/mac80211/rc80211_pid* 4370F: net/mac80211/rc80211_pid*
4357 4371
@@ -4411,6 +4425,13 @@ S: Orphan
4411F: drivers/video/matrox/matroxfb_* 4425F: drivers/video/matrox/matroxfb_*
4412F: include/linux/matroxfb.h 4426F: include/linux/matroxfb.h
4413 4427
4428MAX16065 HARDWARE MONITOR DRIVER
4429M: Guenter Roeck <linux@roeck-us.net>
4430L: lm-sensors@lm-sensors.org
4431S: Maintained
4432F: Documentation/hwmon/max16065
4433F: drivers/hwmon/max16065.c
4434
4414MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER 4435MAX6650 HARDWARE MONITOR AND FAN CONTROLLER DRIVER
4415M: "Hans J. Koch" <hjk@hansjkoch.de> 4436M: "Hans J. Koch" <hjk@hansjkoch.de>
4416L: lm-sensors@lm-sensors.org 4437L: lm-sensors@lm-sensors.org
@@ -5149,7 +5170,7 @@ F: drivers/leds/leds-pca9532.c
5149F: include/linux/leds-pca9532.h 5170F: include/linux/leds-pca9532.h
5150 5171
5151PCA9541 I2C BUS MASTER SELECTOR DRIVER 5172PCA9541 I2C BUS MASTER SELECTOR DRIVER
5152M: Guenter Roeck <guenter.roeck@ericsson.com> 5173M: Guenter Roeck <linux@roeck-us.net>
5153L: linux-i2c@vger.kernel.org 5174L: linux-i2c@vger.kernel.org
5154S: Maintained 5175S: Maintained
5155F: drivers/i2c/muxes/i2c-mux-pca9541.c 5176F: drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -5169,7 +5190,7 @@ S: Maintained
5169F: drivers/firmware/pcdp.* 5190F: drivers/firmware/pcdp.*
5170 5191
5171PCI ERROR RECOVERY 5192PCI ERROR RECOVERY
5172M: Linas Vepstas <linasvepstas@gmail.com> 5193M: Linas Vepstas <linasvepstas@gmail.com>
5173L: linux-pci@vger.kernel.org 5194L: linux-pci@vger.kernel.org
5174S: Supported 5195S: Supported
5175F: Documentation/PCI/pci-error-recovery.txt 5196F: Documentation/PCI/pci-error-recovery.txt
@@ -5275,7 +5296,7 @@ S: Maintained
5275F: drivers/pinctrl/ 5296F: drivers/pinctrl/
5276 5297
5277PIN CONTROLLER - ST SPEAR 5298PIN CONTROLLER - ST SPEAR
5278M: Viresh Kumar <viresh.kumar@st.com> 5299M: Viresh Kumar <viresh.linux@gmail.com>
5279L: spear-devel@list.st.com 5300L: spear-devel@list.st.com
5280L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 5301L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
5281W: http://www.st.com/spear 5302W: http://www.st.com/spear
@@ -5299,7 +5320,7 @@ F: drivers/video/fb-puv3.c
5299F: drivers/rtc/rtc-puv3.c 5320F: drivers/rtc/rtc-puv3.c
5300 5321
5301PMBUS HARDWARE MONITORING DRIVERS 5322PMBUS HARDWARE MONITORING DRIVERS
5302M: Guenter Roeck <guenter.roeck@ericsson.com> 5323M: Guenter Roeck <linux@roeck-us.net>
5303L: lm-sensors@lm-sensors.org 5324L: lm-sensors@lm-sensors.org
5304W: http://www.lm-sensors.org/ 5325W: http://www.lm-sensors.org/
5305W: http://www.roeck-us.net/linux/drivers/ 5326W: http://www.roeck-us.net/linux/drivers/
@@ -5337,7 +5358,7 @@ M: David Woodhouse <dwmw2@infradead.org>
5337T: git git://git.infradead.org/battery-2.6.git 5358T: git git://git.infradead.org/battery-2.6.git
5338S: Maintained 5359S: Maintained
5339F: include/linux/power_supply.h 5360F: include/linux/power_supply.h
5340F: drivers/power/power_supply* 5361F: drivers/power/
5341 5362
5342PNP SUPPORT 5363PNP SUPPORT
5343M: Adam Belay <abelay@mit.edu> 5364M: Adam Belay <abelay@mit.edu>
@@ -5695,6 +5716,9 @@ F: include/linux/remoteproc.h
5695RFKILL 5716RFKILL
5696M: Johannes Berg <johannes@sipsolutions.net> 5717M: Johannes Berg <johannes@sipsolutions.net>
5697L: linux-wireless@vger.kernel.org 5718L: linux-wireless@vger.kernel.org
5719W: http://wireless.kernel.org/
5720T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211.git
5721T: git git://git.kernel.org/pub/scm/linux/kernel/git/jberg/mac80211-next.git
5698S: Maintained 5722S: Maintained
5699F: Documentation/rfkill.txt 5723F: Documentation/rfkill.txt
5700F: net/rfkill/ 5724F: net/rfkill/
@@ -5849,7 +5873,7 @@ S: Maintained
5849F: drivers/tty/serial 5873F: drivers/tty/serial
5850 5874
5851SYNOPSYS DESIGNWARE DMAC DRIVER 5875SYNOPSYS DESIGNWARE DMAC DRIVER
5852M: Viresh Kumar <viresh.kumar@st.com> 5876M: Viresh Kumar <viresh.linux@gmail.com>
5853S: Maintained 5877S: Maintained
5854F: include/linux/dw_dmac.h 5878F: include/linux/dw_dmac.h
5855F: drivers/dma/dw_dmac_regs.h 5879F: drivers/dma/dw_dmac_regs.h
@@ -5997,7 +6021,7 @@ S: Maintained
5997F: drivers/mmc/host/sdhci-s3c.c 6021F: drivers/mmc/host/sdhci-s3c.c
5998 6022
5999SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER 6023SECURE DIGITAL HOST CONTROLLER INTERFACE (SDHCI) ST SPEAR DRIVER
6000M: Viresh Kumar <viresh.kumar@st.com> 6024M: Viresh Kumar <viresh.linux@gmail.com>
6001L: spear-devel@list.st.com 6025L: spear-devel@list.st.com
6002L: linux-mmc@vger.kernel.org 6026L: linux-mmc@vger.kernel.org
6003S: Maintained 6027S: Maintained
@@ -6353,7 +6377,7 @@ S: Maintained
6353F: include/linux/compiler.h 6377F: include/linux/compiler.h
6354 6378
6355SPEAR PLATFORM SUPPORT 6379SPEAR PLATFORM SUPPORT
6356M: Viresh Kumar <viresh.kumar@st.com> 6380M: Viresh Kumar <viresh.linux@gmail.com>
6357M: Shiraz Hashim <shiraz.hashim@st.com> 6381M: Shiraz Hashim <shiraz.hashim@st.com>
6358L: spear-devel@list.st.com 6382L: spear-devel@list.st.com
6359L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 6383L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6362,7 +6386,7 @@ S: Maintained
6362F: arch/arm/plat-spear/ 6386F: arch/arm/plat-spear/
6363 6387
6364SPEAR13XX MACHINE SUPPORT 6388SPEAR13XX MACHINE SUPPORT
6365M: Viresh Kumar <viresh.kumar@st.com> 6389M: Viresh Kumar <viresh.linux@gmail.com>
6366M: Shiraz Hashim <shiraz.hashim@st.com> 6390M: Shiraz Hashim <shiraz.hashim@st.com>
6367L: spear-devel@list.st.com 6391L: spear-devel@list.st.com
6368L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 6392L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6371,7 +6395,7 @@ S: Maintained
6371F: arch/arm/mach-spear13xx/ 6395F: arch/arm/mach-spear13xx/
6372 6396
6373SPEAR3XX MACHINE SUPPORT 6397SPEAR3XX MACHINE SUPPORT
6374M: Viresh Kumar <viresh.kumar@st.com> 6398M: Viresh Kumar <viresh.linux@gmail.com>
6375M: Shiraz Hashim <shiraz.hashim@st.com> 6399M: Shiraz Hashim <shiraz.hashim@st.com>
6376L: spear-devel@list.st.com 6400L: spear-devel@list.st.com
6377L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 6401L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -6382,7 +6406,7 @@ F: arch/arm/mach-spear3xx/
6382SPEAR6XX MACHINE SUPPORT 6406SPEAR6XX MACHINE SUPPORT
6383M: Rajeev Kumar <rajeev-dlh.kumar@st.com> 6407M: Rajeev Kumar <rajeev-dlh.kumar@st.com>
6384M: Shiraz Hashim <shiraz.hashim@st.com> 6408M: Shiraz Hashim <shiraz.hashim@st.com>
6385M: Viresh Kumar <viresh.kumar@st.com> 6409M: Viresh Kumar <viresh.linux@gmail.com>
6386L: spear-devel@list.st.com 6410L: spear-devel@list.st.com
6387L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 6411L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6388W: http://www.st.com/spear 6412W: http://www.st.com/spear
@@ -6390,7 +6414,7 @@ S: Maintained
6390F: arch/arm/mach-spear6xx/ 6414F: arch/arm/mach-spear6xx/
6391 6415
6392SPEAR CLOCK FRAMEWORK SUPPORT 6416SPEAR CLOCK FRAMEWORK SUPPORT
6393M: Viresh Kumar <viresh.kumar@st.com> 6417M: Viresh Kumar <viresh.linux@gmail.com>
6394L: spear-devel@list.st.com 6418L: spear-devel@list.st.com
6395L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 6419L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6396W: http://www.st.com/spear 6420W: http://www.st.com/spear
@@ -6657,7 +6681,7 @@ F: include/linux/taskstats*
6657F: kernel/taskstats.c 6681F: kernel/taskstats.c
6658 6682
6659TC CLASSIFIER 6683TC CLASSIFIER
6660M: Jamal Hadi Salim <hadi@cyberus.ca> 6684M: Jamal Hadi Salim <jhs@mojatatu.com>
6661L: netdev@vger.kernel.org 6685L: netdev@vger.kernel.org
6662S: Maintained 6686S: Maintained
6663F: include/linux/pkt_cls.h 6687F: include/linux/pkt_cls.h
@@ -7291,11 +7315,11 @@ F: Documentation/DocBook/uio-howto.tmpl
7291F: drivers/uio/ 7315F: drivers/uio/
7292F: include/linux/uio*.h 7316F: include/linux/uio*.h
7293 7317
7294UTIL-LINUX-NG PACKAGE 7318UTIL-LINUX PACKAGE
7295M: Karel Zak <kzak@redhat.com> 7319M: Karel Zak <kzak@redhat.com>
7296L: util-linux-ng@vger.kernel.org 7320L: util-linux@vger.kernel.org
7297W: http://kernel.org/~kzak/util-linux-ng/ 7321W: http://en.wikipedia.org/wiki/Util-linux
7298T: git git://git.kernel.org/pub/scm/utils/util-linux-ng/util-linux-ng.git 7322T: git git://git.kernel.org/pub/scm/utils/util-linux/util-linux.git
7299S: Maintained 7323S: Maintained
7300 7324
7301UVESAFB DRIVER 7325UVESAFB DRIVER
@@ -7397,7 +7421,7 @@ F: include/linux/vlynq.h
7397 7421
7398VME SUBSYSTEM 7422VME SUBSYSTEM
7399M: Martyn Welch <martyn.welch@ge.com> 7423M: Martyn Welch <martyn.welch@ge.com>
7400M: Manohar Vanga <manohar.vanga@cern.ch> 7424M: Manohar Vanga <manohar.vanga@gmail.com>
7401M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> 7425M: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
7402L: devel@driverdev.osuosl.org 7426L: devel@driverdev.osuosl.org
7403S: Maintained 7427S: Maintained
diff --git a/Makefile b/Makefile
index dda21c3efc7b..3fdfde2c1b7d 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
1VERSION = 3 1VERSION = 3
2PATCHLEVEL = 4 2PATCHLEVEL = 5
3SUBLEVEL = 0 3SUBLEVEL = 0
4EXTRAVERSION = 4EXTRAVERSION = -rc4
5NAME = Saber-toothed Squirrel 5NAME = Saber-toothed Squirrel
6 6
7# *DOCUMENTATION* 7# *DOCUMENTATION*
@@ -561,6 +561,8 @@ else
561KBUILD_CFLAGS += -O2 561KBUILD_CFLAGS += -O2
562endif 562endif
563 563
564include $(srctree)/arch/$(SRCARCH)/Makefile
565
564ifdef CONFIG_READABLE_ASM 566ifdef CONFIG_READABLE_ASM
565# Disable optimizations that make assembler listings hard to read. 567# Disable optimizations that make assembler listings hard to read.
566# reorder blocks reorders the control in the function 568# reorder blocks reorders the control in the function
@@ -571,8 +573,6 @@ KBUILD_CFLAGS += $(call cc-option,-fno-reorder-blocks,) \
571 $(call cc-option,-fno-partial-inlining) 573 $(call cc-option,-fno-partial-inlining)
572endif 574endif
573 575
574include $(srctree)/arch/$(SRCARCH)/Makefile
575
576ifneq ($(CONFIG_FRAME_WARN),0) 576ifneq ($(CONFIG_FRAME_WARN),0)
577KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN}) 577KBUILD_CFLAGS += $(call cc-option,-Wframe-larger-than=${CONFIG_FRAME_WARN})
578endif 578endif
diff --git a/arch/alpha/include/asm/posix_types.h b/arch/alpha/include/asm/posix_types.h
index 24779fc95994..5a8a48320efe 100644
--- a/arch/alpha/include/asm/posix_types.h
+++ b/arch/alpha/include/asm/posix_types.h
@@ -10,9 +10,6 @@
10typedef unsigned int __kernel_ino_t; 10typedef unsigned int __kernel_ino_t;
11#define __kernel_ino_t __kernel_ino_t 11#define __kernel_ino_t __kernel_ino_t
12 12
13typedef unsigned int __kernel_nlink_t;
14#define __kernel_nlink_t __kernel_nlink_t
15
16typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ 13typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
17 14
18#include <asm-generic/posix_types.h> 15#include <asm-generic/posix_types.h>
diff --git a/arch/alpha/kernel/signal.c b/arch/alpha/kernel/signal.c
index 10ab2d74ecbb..a8c97d42ec8e 100644
--- a/arch/alpha/kernel/signal.c
+++ b/arch/alpha/kernel/signal.c
@@ -226,7 +226,6 @@ do_sigreturn(struct sigcontext __user *sc, struct pt_regs *regs,
226 if (__get_user(set.sig[0], &sc->sc_mask)) 226 if (__get_user(set.sig[0], &sc->sc_mask))
227 goto give_sigsegv; 227 goto give_sigsegv;
228 228
229 sigdelsetmask(&set, ~_BLOCKABLE);
230 set_current_blocked(&set); 229 set_current_blocked(&set);
231 230
232 if (restore_sigcontext(sc, regs, sw)) 231 if (restore_sigcontext(sc, regs, sw))
@@ -261,7 +260,6 @@ do_rt_sigreturn(struct rt_sigframe __user *frame, struct pt_regs *regs,
261 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 260 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
262 goto give_sigsegv; 261 goto give_sigsegv;
263 262
264 sigdelsetmask(&set, ~_BLOCKABLE);
265 set_current_blocked(&set); 263 set_current_blocked(&set);
266 264
267 if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw)) 265 if (restore_sigcontext(&frame->uc.uc_mcontext, regs, sw))
@@ -468,12 +466,9 @@ static inline void
468handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, 466handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
469 struct pt_regs * regs, struct switch_stack *sw) 467 struct pt_regs * regs, struct switch_stack *sw)
470{ 468{
471 sigset_t *oldset = &current->blocked; 469 sigset_t *oldset = sigmask_to_save();
472 int ret; 470 int ret;
473 471
474 if (test_thread_flag(TIF_RESTORE_SIGMASK))
475 oldset = &current->saved_sigmask;
476
477 if (ka->sa.sa_flags & SA_SIGINFO) 472 if (ka->sa.sa_flags & SA_SIGINFO)
478 ret = setup_rt_frame(sig, ka, info, oldset, regs, sw); 473 ret = setup_rt_frame(sig, ka, info, oldset, regs, sw);
479 else 474 else
@@ -483,12 +478,7 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
483 force_sigsegv(sig, current); 478 force_sigsegv(sig, current);
484 return; 479 return;
485 } 480 }
486 block_sigmask(ka, sig); 481 signal_delivered(sig, info, ka, regs, 0);
487 /* A signal was successfully delivered, and the
488 saved sigmask was stored on the signal frame,
489 and will be restored by sigreturn. So we can
490 simply clear the restore sigmask flag. */
491 clear_thread_flag(TIF_RESTORE_SIGMASK);
492} 482}
493 483
494static inline void 484static inline void
@@ -572,9 +562,7 @@ do_signal(struct pt_regs * regs, struct switch_stack * sw,
572 } 562 }
573 563
574 /* If there's no signal to deliver, we just restore the saved mask. */ 564 /* If there's no signal to deliver, we just restore the saved mask. */
575 if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK)) 565 restore_saved_sigmask();
576 set_current_blocked(&current->saved_sigmask);
577
578 if (single_stepping) 566 if (single_stepping)
579 ptrace_set_bpt(current); /* re-set breakpoint */ 567 ptrace_set_bpt(current); /* re-set breakpoint */
580} 568}
@@ -590,7 +578,5 @@ do_notify_resume(struct pt_regs *regs, struct switch_stack *sw,
590 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 578 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
591 clear_thread_flag(TIF_NOTIFY_RESUME); 579 clear_thread_flag(TIF_NOTIFY_RESUME);
592 tracehook_notify_resume(regs); 580 tracehook_notify_resume(regs);
593 if (current->replacement_session_keyring)
594 key_replace_session_keyring();
595 } 581 }
596} 582}
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5e7601301b41..a91009c61870 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -7,7 +7,6 @@ config ARM
7 select HAVE_IDE if PCI || ISA || PCMCIA 7 select HAVE_IDE if PCI || ISA || PCMCIA
8 select HAVE_DMA_ATTRS 8 select HAVE_DMA_ATTRS
9 select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7) 9 select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
10 select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
11 select HAVE_MEMBLOCK 10 select HAVE_MEMBLOCK
12 select RTC_LIB 11 select RTC_LIB
13 select SYS_SUPPORTS_APM_EMULATION 12 select SYS_SUPPORTS_APM_EMULATION
@@ -294,6 +293,7 @@ config ARCH_VERSATILE
294 select ICST 293 select ICST
295 select GENERIC_CLOCKEVENTS 294 select GENERIC_CLOCKEVENTS
296 select ARCH_WANT_OPTIONAL_GPIOLIB 295 select ARCH_WANT_OPTIONAL_GPIOLIB
296 select NEED_MACH_IO_H if PCI
297 select PLAT_VERSATILE 297 select PLAT_VERSATILE
298 select PLAT_VERSATILE_CLCD 298 select PLAT_VERSATILE_CLCD
299 select PLAT_VERSATILE_FPGA_IRQ 299 select PLAT_VERSATILE_FPGA_IRQ
@@ -525,7 +525,7 @@ config ARCH_IXP4XX
525 select ARCH_HAS_DMA_SET_COHERENT_MASK 525 select ARCH_HAS_DMA_SET_COHERENT_MASK
526 select CLKSRC_MMIO 526 select CLKSRC_MMIO
527 select CPU_XSCALE 527 select CPU_XSCALE
528 select GENERIC_GPIO 528 select ARCH_REQUIRE_GPIOLIB
529 select GENERIC_CLOCKEVENTS 529 select GENERIC_CLOCKEVENTS
530 select MIGHT_HAVE_PCI 530 select MIGHT_HAVE_PCI
531 select NEED_MACH_IO_H 531 select NEED_MACH_IO_H
@@ -589,6 +589,7 @@ config ARCH_ORION5X
589 select PCI 589 select PCI
590 select ARCH_REQUIRE_GPIOLIB 590 select ARCH_REQUIRE_GPIOLIB
591 select GENERIC_CLOCKEVENTS 591 select GENERIC_CLOCKEVENTS
592 select NEED_MACH_IO_H
592 select PLAT_ORION 593 select PLAT_ORION
593 help 594 help
594 Support for the following Marvell Orion 5x series SoCs: 595 Support for the following Marvell Orion 5x series SoCs:
diff --git a/arch/arm/boot/dts/db8500.dtsi b/arch/arm/boot/dts/db8500.dtsi
index 881bc3987844..4ad5160018cb 100644
--- a/arch/arm/boot/dts/db8500.dtsi
+++ b/arch/arm/boot/dts/db8500.dtsi
@@ -58,6 +58,8 @@
58 "st,nomadik-gpio"; 58 "st,nomadik-gpio";
59 reg = <0x8012e000 0x80>; 59 reg = <0x8012e000 0x80>;
60 interrupts = <0 119 0x4>; 60 interrupts = <0 119 0x4>;
61 interrupt-controller;
62 #interrupt-cells = <2>;
61 supports-sleepmode; 63 supports-sleepmode;
62 gpio-controller; 64 gpio-controller;
63 #gpio-cells = <2>; 65 #gpio-cells = <2>;
@@ -69,6 +71,8 @@
69 "st,nomadik-gpio"; 71 "st,nomadik-gpio";
70 reg = <0x8012e080 0x80>; 72 reg = <0x8012e080 0x80>;
71 interrupts = <0 120 0x4>; 73 interrupts = <0 120 0x4>;
74 interrupt-controller;
75 #interrupt-cells = <2>;
72 supports-sleepmode; 76 supports-sleepmode;
73 gpio-controller; 77 gpio-controller;
74 #gpio-cells = <2>; 78 #gpio-cells = <2>;
@@ -80,6 +84,8 @@
80 "st,nomadik-gpio"; 84 "st,nomadik-gpio";
81 reg = <0x8000e000 0x80>; 85 reg = <0x8000e000 0x80>;
82 interrupts = <0 121 0x4>; 86 interrupts = <0 121 0x4>;
87 interrupt-controller;
88 #interrupt-cells = <2>;
83 supports-sleepmode; 89 supports-sleepmode;
84 gpio-controller; 90 gpio-controller;
85 #gpio-cells = <2>; 91 #gpio-cells = <2>;
@@ -91,6 +97,8 @@
91 "st,nomadik-gpio"; 97 "st,nomadik-gpio";
92 reg = <0x8000e080 0x80>; 98 reg = <0x8000e080 0x80>;
93 interrupts = <0 122 0x4>; 99 interrupts = <0 122 0x4>;
100 interrupt-controller;
101 #interrupt-cells = <2>;
94 supports-sleepmode; 102 supports-sleepmode;
95 gpio-controller; 103 gpio-controller;
96 #gpio-cells = <2>; 104 #gpio-cells = <2>;
@@ -102,6 +110,8 @@
102 "st,nomadik-gpio"; 110 "st,nomadik-gpio";
103 reg = <0x8000e100 0x80>; 111 reg = <0x8000e100 0x80>;
104 interrupts = <0 123 0x4>; 112 interrupts = <0 123 0x4>;
113 interrupt-controller;
114 #interrupt-cells = <2>;
105 supports-sleepmode; 115 supports-sleepmode;
106 gpio-controller; 116 gpio-controller;
107 #gpio-cells = <2>; 117 #gpio-cells = <2>;
@@ -113,6 +123,8 @@
113 "st,nomadik-gpio"; 123 "st,nomadik-gpio";
114 reg = <0x8000e180 0x80>; 124 reg = <0x8000e180 0x80>;
115 interrupts = <0 124 0x4>; 125 interrupts = <0 124 0x4>;
126 interrupt-controller;
127 #interrupt-cells = <2>;
116 supports-sleepmode; 128 supports-sleepmode;
117 gpio-controller; 129 gpio-controller;
118 #gpio-cells = <2>; 130 #gpio-cells = <2>;
@@ -124,6 +136,8 @@
124 "st,nomadik-gpio"; 136 "st,nomadik-gpio";
125 reg = <0x8011e000 0x80>; 137 reg = <0x8011e000 0x80>;
126 interrupts = <0 125 0x4>; 138 interrupts = <0 125 0x4>;
139 interrupt-controller;
140 #interrupt-cells = <2>;
127 supports-sleepmode; 141 supports-sleepmode;
128 gpio-controller; 142 gpio-controller;
129 #gpio-cells = <2>; 143 #gpio-cells = <2>;
@@ -135,6 +149,8 @@
135 "st,nomadik-gpio"; 149 "st,nomadik-gpio";
136 reg = <0x8011e080 0x80>; 150 reg = <0x8011e080 0x80>;
137 interrupts = <0 126 0x4>; 151 interrupts = <0 126 0x4>;
152 interrupt-controller;
153 #interrupt-cells = <2>;
138 supports-sleepmode; 154 supports-sleepmode;
139 gpio-controller; 155 gpio-controller;
140 #gpio-cells = <2>; 156 #gpio-cells = <2>;
@@ -146,12 +162,18 @@
146 "st,nomadik-gpio"; 162 "st,nomadik-gpio";
147 reg = <0xa03fe000 0x80>; 163 reg = <0xa03fe000 0x80>;
148 interrupts = <0 127 0x4>; 164 interrupts = <0 127 0x4>;
165 interrupt-controller;
166 #interrupt-cells = <2>;
149 supports-sleepmode; 167 supports-sleepmode;
150 gpio-controller; 168 gpio-controller;
151 #gpio-cells = <2>; 169 #gpio-cells = <2>;
152 gpio-bank = <8>; 170 gpio-bank = <8>;
153 }; 171 };
154 172
173 pinctrl {
174 compatible = "stericsson,nmk_pinctrl";
175 };
176
155 usb@a03e0000 { 177 usb@a03e0000 {
156 compatible = "stericsson,db8500-musb", 178 compatible = "stericsson,db8500-musb",
157 "mentor,musb"; 179 "mentor,musb";
@@ -169,20 +191,195 @@
169 prcmu@80157000 { 191 prcmu@80157000 {
170 compatible = "stericsson,db8500-prcmu"; 192 compatible = "stericsson,db8500-prcmu";
171 reg = <0x80157000 0x1000>; 193 reg = <0x80157000 0x1000>;
172 interrupts = <46 47>; 194 interrupts = <0 47 0x4>;
173 #address-cells = <1>; 195 #address-cells = <1>;
174 #size-cells = <1>; 196 #size-cells = <1>;
175 ranges; 197 ranges;
176 198
177 prcmu-timer-4@80157450 { 199 prcmu-timer-4@80157450 {
178 compatible = "stericsson,db8500-prcmu-timer-4"; 200 compatible = "stericsson,db8500-prcmu-timer-4";
179 reg = <0x80157450 0xC>; 201 reg = <0x80157450 0xC>;
180 }; 202 };
181 203
204 db8500-prcmu-regulators {
205 compatible = "stericsson,db8500-prcmu-regulator";
206
207 // DB8500_REGULATOR_VAPE
208 db8500_vape_reg: db8500_vape {
209 regulator-name = "db8500-vape";
210 regulator-always-on;
211 };
212
213 // DB8500_REGULATOR_VARM
214 db8500_varm_reg: db8500_varm {
215 regulator-name = "db8500-varm";
216 };
217
218 // DB8500_REGULATOR_VMODEM
219 db8500_vmodem_reg: db8500_vmodem {
220 regulator-name = "db8500-vmodem";
221 };
222
223 // DB8500_REGULATOR_VPLL
224 db8500_vpll_reg: db8500_vpll {
225 regulator-name = "db8500-vpll";
226 };
227
228 // DB8500_REGULATOR_VSMPS1
229 db8500_vsmps1_reg: db8500_vsmps1 {
230 regulator-name = "db8500-vsmps1";
231 };
232
233 // DB8500_REGULATOR_VSMPS2
234 db8500_vsmps2_reg: db8500_vsmps2 {
235 regulator-name = "db8500-vsmps2";
236 };
237
238 // DB8500_REGULATOR_VSMPS3
239 db8500_vsmps3_reg: db8500_vsmps3 {
240 regulator-name = "db8500-vsmps3";
241 };
242
243 // DB8500_REGULATOR_VRF1
244 db8500_vrf1_reg: db8500_vrf1 {
245 regulator-name = "db8500-vrf1";
246 };
247
248 // DB8500_REGULATOR_SWITCH_SVAMMDSP
249 db8500_sva_mmdsp_reg: db8500_sva_mmdsp {
250 regulator-name = "db8500-sva-mmdsp";
251 };
252
253 // DB8500_REGULATOR_SWITCH_SVAMMDSPRET
254 db8500_sva_mmdsp_ret_reg: db8500_sva_mmdsp_ret {
255 regulator-name = "db8500-sva-mmdsp-ret";
256 };
257
258 // DB8500_REGULATOR_SWITCH_SVAPIPE
259 db8500_sva_pipe_reg: db8500_sva_pipe {
260 regulator-name = "db8500_sva_pipe";
261 };
262
263 // DB8500_REGULATOR_SWITCH_SIAMMDSP
264 db8500_sia_mmdsp_reg: db8500_sia_mmdsp {
265 regulator-name = "db8500_sia_mmdsp";
266 };
267
268 // DB8500_REGULATOR_SWITCH_SIAMMDSPRET
269 db8500_sia_mmdsp_ret_reg: db8500_sia_mmdsp_ret {
270 regulator-name = "db8500-sia-mmdsp-ret";
271 };
272
273 // DB8500_REGULATOR_SWITCH_SIAPIPE
274 db8500_sia_pipe_reg: db8500_sia_pipe {
275 regulator-name = "db8500-sia-pipe";
276 };
277
278 // DB8500_REGULATOR_SWITCH_SGA
279 db8500_sga_reg: db8500_sga {
280 regulator-name = "db8500-sga";
281 vin-supply = <&db8500_vape_reg>;
282 };
283
284 // DB8500_REGULATOR_SWITCH_B2R2_MCDE
285 db8500_b2r2_mcde_reg: db8500_b2r2_mcde {
286 regulator-name = "db8500-b2r2-mcde";
287 vin-supply = <&db8500_vape_reg>;
288 };
289
290 // DB8500_REGULATOR_SWITCH_ESRAM12
291 db8500_esram12_reg: db8500_esram12 {
292 regulator-name = "db8500-esram12";
293 };
294
295 // DB8500_REGULATOR_SWITCH_ESRAM12RET
296 db8500_esram12_ret_reg: db8500_esram12_ret {
297 regulator-name = "db8500-esram12-ret";
298 };
299
300 // DB8500_REGULATOR_SWITCH_ESRAM34
301 db8500_esram34_reg: db8500_esram34 {
302 regulator-name = "db8500-esram34";
303 };
304
305 // DB8500_REGULATOR_SWITCH_ESRAM34RET
306 db8500_esram34_ret_reg: db8500_esram34_ret {
307 regulator-name = "db8500-esram34-ret";
308 };
309 };
310
182 ab8500@5 { 311 ab8500@5 {
183 compatible = "stericsson,ab8500"; 312 compatible = "stericsson,ab8500";
184 reg = <5>; /* mailbox 5 is i2c */ 313 reg = <5>; /* mailbox 5 is i2c */
185 interrupts = <0 40 0x4>; 314 interrupts = <0 40 0x4>;
315
316 ab8500-regulators {
317 compatible = "stericsson,ab8500-regulator";
318
319 // supplies to the display/camera
320 ab8500_ldo_aux1_reg: ab8500_ldo_aux1 {
321 regulator-name = "V-DISPLAY";
322 regulator-min-microvolt = <2500000>;
323 regulator-max-microvolt = <2900000>;
324 regulator-boot-on;
325 /* BUG: If turned off MMC will be affected. */
326 regulator-always-on;
327 };
328
329 // supplies to the on-board eMMC
330 ab8500_ldo_aux2_reg: ab8500_ldo_aux2 {
331 regulator-name = "V-eMMC1";
332 regulator-min-microvolt = <1100000>;
333 regulator-max-microvolt = <3300000>;
334 };
335
336 // supply for VAUX3; SDcard slots
337 ab8500_ldo_aux3_reg: ab8500_ldo_aux3 {
338 regulator-name = "V-MMC-SD";
339 regulator-min-microvolt = <1100000>;
340 regulator-max-microvolt = <3300000>;
341 };
342
343 // supply for v-intcore12; VINTCORE12 LDO
344 ab8500_ldo_initcore_reg: ab8500_ldo_initcore {
345 regulator-name = "V-INTCORE";
346 };
347
348 // supply for tvout; gpadc; TVOUT LDO
349 ab8500_ldo_tvout_reg: ab8500_ldo_tvout {
350 regulator-name = "V-TVOUT";
351 };
352
353 // supply for ab8500-usb; USB LDO
354 ab8500_ldo_usb_reg: ab8500_ldo_usb {
355 regulator-name = "dummy";
356 };
357
358 // supply for ab8500-vaudio; VAUDIO LDO
359 ab8500_ldo_audio_reg: ab8500_ldo_audio {
360 regulator-name = "V-AUD";
361 };
362
363 // supply for v-anamic1 VAMic1-LDO
364 ab8500_ldo_anamic1_reg: ab8500_ldo_anamic1 {
365 regulator-name = "V-AMIC1";
366 };
367
368 // supply for v-amic2; VAMIC2 LDO; reuse constants for AMIC1
369 ab8500_ldo_amamic2_reg: ab8500_ldo_amamic2 {
370 regulator-name = "V-AMIC2";
371 };
372
373 // supply for v-dmic; VDMIC LDO
374 ab8500_ldo_dmic_reg: ab8500_ldo_dmic {
375 regulator-name = "V-DMIC";
376 };
377
378 // supply for U8500 CSI/DSI; VANA LDO
379 ab8500_ldo_ana_reg: ab8500_ldo_ana {
380 regulator-name = "V-CSI/DSI";
381 };
382 };
186 }; 383 };
187 }; 384 };
188 385
@@ -235,7 +432,8 @@
235 status = "disabled"; 432 status = "disabled";
236 433
237 // Add one of these for each child device 434 // Add one of these for each child device
238 cs-gpios = <&gpio0 31 &gpio4 14 &gpio4 16 &gpio6 22 &gpio7 0>; 435 cs-gpios = <&gpio0 31 0x4 &gpio4 14 0x4 &gpio4 16 0x4
436 &gpio6 22 0x4 &gpio7 0 0x4>;
239 437
240 }; 438 };
241 439
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index 5ca0cdb76413..4272b2949228 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -30,6 +30,22 @@
30 reg = <0x10481000 0x1000>, <0x10482000 0x2000>; 30 reg = <0x10481000 0x1000>, <0x10482000 0x2000>;
31 }; 31 };
32 32
33 combiner:interrupt-controller@10440000 {
34 compatible = "samsung,exynos4210-combiner";
35 #interrupt-cells = <2>;
36 interrupt-controller;
37 samsung,combiner-nr = <32>;
38 reg = <0x10440000 0x1000>;
39 interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
40 <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
41 <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
42 <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>,
43 <0 16 0>, <0 17 0>, <0 18 0>, <0 19 0>,
44 <0 20 0>, <0 21 0>, <0 22 0>, <0 23 0>,
45 <0 24 0>, <0 25 0>, <0 26 0>, <0 27 0>,
46 <0 28 0>, <0 29 0>, <0 30 0>, <0 31 0>;
47 };
48
33 watchdog { 49 watchdog {
34 compatible = "samsung,s3c2410-wdt"; 50 compatible = "samsung,s3c2410-wdt";
35 reg = <0x101D0000 0x100>; 51 reg = <0x101D0000 0x100>;
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index 2b1a166d41f9..386c769c38d1 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -213,5 +213,14 @@
213 status = "disabled"; 213 status = "disabled";
214 }; 214 };
215 }; 215 };
216 nand@d8000000 {
217 #address-cells = <1>;
218 #size-cells = <1>;
219
220 compatible = "fsl,imx27-nand";
221 reg = <0xd8000000 0x1000>;
222 interrupts = <29>;
223 status = "disabled";
224 };
216 }; 225 };
217}; 226};
diff --git a/arch/arm/boot/dts/lpc32xx.dtsi b/arch/arm/boot/dts/lpc32xx.dtsi
index 2d696866f71c..3f5dad801a98 100644
--- a/arch/arm/boot/dts/lpc32xx.dtsi
+++ b/arch/arm/boot/dts/lpc32xx.dtsi
@@ -215,45 +215,8 @@
215 gpio: gpio@40028000 { 215 gpio: gpio@40028000 {
216 compatible = "nxp,lpc3220-gpio"; 216 compatible = "nxp,lpc3220-gpio";
217 reg = <0x40028000 0x1000>; 217 reg = <0x40028000 0x1000>;
218 /* create a private address space for enumeration */ 218 gpio-controller;
219 #address-cells = <1>; 219 #gpio-cells = <3>; /* bank, pin, flags */
220 #size-cells = <0>;
221
222 gpio_p0: gpio-bank@0 {
223 gpio-controller;
224 #gpio-cells = <2>;
225 reg = <0>;
226 };
227
228 gpio_p1: gpio-bank@1 {
229 gpio-controller;
230 #gpio-cells = <2>;
231 reg = <1>;
232 };
233
234 gpio_p2: gpio-bank@2 {
235 gpio-controller;
236 #gpio-cells = <2>;
237 reg = <2>;
238 };
239
240 gpio_p3: gpio-bank@3 {
241 gpio-controller;
242 #gpio-cells = <2>;
243 reg = <3>;
244 };
245
246 gpi_p3: gpio-bank@4 {
247 gpio-controller;
248 #gpio-cells = <2>;
249 reg = <4>;
250 };
251
252 gpo_p3: gpio-bank@5 {
253 gpio-controller;
254 #gpio-cells = <2>;
255 reg = <5>;
256 };
257 }; 220 };
258 221
259 watchdog@4003C000 { 222 watchdog@4003C000 {
diff --git a/arch/arm/boot/dts/mmp2-brownstone.dts b/arch/arm/boot/dts/mmp2-brownstone.dts
index 153a4b2d12b5..c9b4f27d191e 100644
--- a/arch/arm/boot/dts/mmp2-brownstone.dts
+++ b/arch/arm/boot/dts/mmp2-brownstone.dts
@@ -11,7 +11,7 @@
11/include/ "mmp2.dtsi" 11/include/ "mmp2.dtsi"
12 12
13/ { 13/ {
14 model = "Marvell MMP2 Aspenite Development Board"; 14 model = "Marvell MMP2 Brownstone Development Board";
15 compatible = "mrvl,mmp2-brownstone", "mrvl,mmp2"; 15 compatible = "mrvl,mmp2-brownstone", "mrvl,mmp2";
16 16
17 chosen { 17 chosen {
@@ -19,7 +19,7 @@
19 }; 19 };
20 20
21 memory { 21 memory {
22 reg = <0x00000000 0x04000000>; 22 reg = <0x00000000 0x08000000>;
23 }; 23 };
24 24
25 soc { 25 soc {
diff --git a/arch/arm/boot/dts/omap2.dtsi b/arch/arm/boot/dts/omap2.dtsi
index f2ab4ea7cc0e..581cb081cb0f 100644
--- a/arch/arm/boot/dts/omap2.dtsi
+++ b/arch/arm/boot/dts/omap2.dtsi
@@ -44,6 +44,8 @@
44 compatible = "ti,omap2-intc"; 44 compatible = "ti,omap2-intc";
45 interrupt-controller; 45 interrupt-controller;
46 #interrupt-cells = <1>; 46 #interrupt-cells = <1>;
47 ti,intc-size = <96>;
48 reg = <0x480FE000 0x1000>;
47 }; 49 };
48 50
49 uart1: serial@4806a000 { 51 uart1: serial@4806a000 {
diff --git a/arch/arm/boot/dts/phy3250.dts b/arch/arm/boot/dts/phy3250.dts
index 0167e86314c0..c4ff6d1a018b 100644
--- a/arch/arm/boot/dts/phy3250.dts
+++ b/arch/arm/boot/dts/phy3250.dts
@@ -131,13 +131,13 @@
131 compatible = "gpio-leds"; 131 compatible = "gpio-leds";
132 132
133 led0 { 133 led0 {
134 gpios = <&gpo_p3 1 1>; /* GPO_P3 1, GPIO 80, active low */ 134 gpios = <&gpio 5 1 1>; /* GPO_P3 1, GPIO 80, active low */
135 linux,default-trigger = "heartbeat"; 135 linux,default-trigger = "heartbeat";
136 default-state = "off"; 136 default-state = "off";
137 }; 137 };
138 138
139 led1 { 139 led1 {
140 gpios = <&gpo_p3 14 1>; /* GPO_P3 14, GPIO 93, active low */ 140 gpios = <&gpio 5 14 1>; /* GPO_P3 14, GPIO 93, active low */
141 linux,default-trigger = "timer"; 141 linux,default-trigger = "timer";
142 default-state = "off"; 142 default-state = "off";
143 }; 143 };
diff --git a/arch/arm/boot/dts/snowball.dts b/arch/arm/boot/dts/snowball.dts
index d99dc04f0d91..ec3c33975110 100644
--- a/arch/arm/boot/dts/snowball.dts
+++ b/arch/arm/boot/dts/snowball.dts
@@ -20,6 +20,16 @@
20 reg = <0x00000000 0x20000000>; 20 reg = <0x00000000 0x20000000>;
21 }; 21 };
22 22
23 en_3v3_reg: en_3v3 {
24 compatible = "regulator-fixed";
25 regulator-name = "en-3v3-fixed-supply";
26 regulator-min-microvolt = <3300000>;
27 regulator-max-microvolt = <3300000>;
28 gpios = <&gpio0 26 0x4>; // 26
29 startup-delay-us = <5000>;
30 enable-active-high;
31 };
32
23 gpio_keys { 33 gpio_keys {
24 compatible = "gpio-keys"; 34 compatible = "gpio-keys";
25 #address-cells = <1>; 35 #address-cells = <1>;
@@ -30,35 +40,35 @@
30 wakeup = <1>; 40 wakeup = <1>;
31 linux,code = <2>; 41 linux,code = <2>;
32 label = "userpb"; 42 label = "userpb";
33 gpios = <&gpio1 0 0>; 43 gpios = <&gpio1 0 0x4>;
34 }; 44 };
35 button@2 { 45 button@2 {
36 debounce_interval = <50>; 46 debounce_interval = <50>;
37 wakeup = <1>; 47 wakeup = <1>;
38 linux,code = <3>; 48 linux,code = <3>;
39 label = "extkb1"; 49 label = "extkb1";
40 gpios = <&gpio4 23 0>; 50 gpios = <&gpio4 23 0x4>;
41 }; 51 };
42 button@3 { 52 button@3 {
43 debounce_interval = <50>; 53 debounce_interval = <50>;
44 wakeup = <1>; 54 wakeup = <1>;
45 linux,code = <4>; 55 linux,code = <4>;
46 label = "extkb2"; 56 label = "extkb2";
47 gpios = <&gpio4 24 0>; 57 gpios = <&gpio4 24 0x4>;
48 }; 58 };
49 button@4 { 59 button@4 {
50 debounce_interval = <50>; 60 debounce_interval = <50>;
51 wakeup = <1>; 61 wakeup = <1>;
52 linux,code = <5>; 62 linux,code = <5>;
53 label = "extkb3"; 63 label = "extkb3";
54 gpios = <&gpio5 1 0>; 64 gpios = <&gpio5 1 0x4>;
55 }; 65 };
56 button@5 { 66 button@5 {
57 debounce_interval = <50>; 67 debounce_interval = <50>;
58 wakeup = <1>; 68 wakeup = <1>;
59 linux,code = <6>; 69 linux,code = <6>;
60 label = "extkb4"; 70 label = "extkb4";
61 gpios = <&gpio5 2 0>; 71 gpios = <&gpio5 2 0x4>;
62 }; 72 };
63 }; 73 };
64 74
@@ -66,12 +76,11 @@
66 compatible = "gpio-leds"; 76 compatible = "gpio-leds";
67 used-led { 77 used-led {
68 label = "user_led"; 78 label = "user_led";
69 gpios = <&gpio4 14>; 79 gpios = <&gpio4 14 0x4>;
70 }; 80 };
71 }; 81 };
72 82
73 soc-u9500 { 83 soc-u9500 {
74
75 external-bus@50000000 { 84 external-bus@50000000 {
76 status = "okay"; 85 status = "okay";
77 86
@@ -80,6 +89,9 @@
80 reg = <0 0x10000>; 89 reg = <0 0x10000>;
81 interrupts = <12 0x1>; 90 interrupts = <12 0x1>;
82 interrupt-parent = <&gpio4>; 91 interrupt-parent = <&gpio4>;
92 vdd33a-supply = <&en_3v3_reg>;
93 vddvario-supply = <&db8500_vape_reg>;
94
83 95
84 reg-shift = <1>; 96 reg-shift = <1>;
85 reg-io-width = <2>; 97 reg-io-width = <2>;
@@ -91,11 +103,13 @@
91 103
92 sdi@80126000 { 104 sdi@80126000 {
93 status = "enabled"; 105 status = "enabled";
94 cd-gpios = <&gpio6 26>; 106 vmmc-supply = <&ab8500_ldo_aux3_reg>;
107 cd-gpios = <&gpio6 26 0x4>; // 218
95 }; 108 };
96 109
97 sdi@80114000 { 110 sdi@80114000 {
98 status = "enabled"; 111 status = "enabled";
112 vmmc-supply = <&ab8500_ldo_aux2_reg>;
99 }; 113 };
100 114
101 uart@80120000 { 115 uart@80120000 {
@@ -114,7 +128,7 @@
114 tc3589x@42 { 128 tc3589x@42 {
115 //compatible = "tc3589x"; 129 //compatible = "tc3589x";
116 reg = <0x42>; 130 reg = <0x42>;
117 interrupts = <25>; 131 gpios = <&gpio6 25 0x4>;
118 interrupt-parent = <&gpio6>; 132 interrupt-parent = <&gpio6>;
119 }; 133 };
120 tps61052@33 { 134 tps61052@33 {
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
index 8314e4171884..dd4358bc26e2 100644
--- a/arch/arm/boot/dts/spear1310-evb.dts
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr1310 Evaluation Baord 2 * DTS file for SPEAr1310 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> 4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
index 9e61da404d57..419ea7413d23 100644
--- a/arch/arm/boot/dts/spear1310.dtsi
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for all SPEAr1310 SoCs 2 * DTS file for all SPEAr1310 SoCs
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> 4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts
index 0d8472e5ab9f..c9a54e06fb68 100644
--- a/arch/arm/boot/dts/spear1340-evb.dts
+++ b/arch/arm/boot/dts/spear1340-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr1340 Evaluation Baord 2 * DTS file for SPEAr1340 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> 4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index a26fc47a55e8..d71fe2a68f09 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for all SPEAr1340 SoCs 2 * DTS file for all SPEAr1340 SoCs
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> 4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
index 1f8e1e1481df..10dcec7e7321 100644
--- a/arch/arm/boot/dts/spear13xx.dtsi
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for all SPEAr13xx SoCs 2 * DTS file for all SPEAr13xx SoCs
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> 4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts
index fc82b1a26458..d71b8d581e3d 100644
--- a/arch/arm/boot/dts/spear300-evb.dts
+++ b/arch/arm/boot/dts/spear300-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr300 Evaluation Baord 2 * DTS file for SPEAr300 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> 4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear300.dtsi b/arch/arm/boot/dts/spear300.dtsi
index 01c5e358fdb2..ed3627c116cc 100644
--- a/arch/arm/boot/dts/spear300.dtsi
+++ b/arch/arm/boot/dts/spear300.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr300 SoC 2 * DTS file for SPEAr300 SoC
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> 4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear310-evb.dts b/arch/arm/boot/dts/spear310-evb.dts
index dc5e2d445a93..b00544e0cd5d 100644
--- a/arch/arm/boot/dts/spear310-evb.dts
+++ b/arch/arm/boot/dts/spear310-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr310 Evaluation Baord 2 * DTS file for SPEAr310 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> 4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear310.dtsi b/arch/arm/boot/dts/spear310.dtsi
index e47081c494d9..62fc4fb3e5f9 100644
--- a/arch/arm/boot/dts/spear310.dtsi
+++ b/arch/arm/boot/dts/spear310.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr310 SoC 2 * DTS file for SPEAr310 SoC
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> 4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts
index 6308fa3bec1e..c13fd1f3b09f 100644
--- a/arch/arm/boot/dts/spear320-evb.dts
+++ b/arch/arm/boot/dts/spear320-evb.dts
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr320 Evaluation Baord 2 * DTS file for SPEAr320 Evaluation Baord
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> 4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear320.dtsi b/arch/arm/boot/dts/spear320.dtsi
index 5372ca399b1f..1f49d69595a0 100644
--- a/arch/arm/boot/dts/spear320.dtsi
+++ b/arch/arm/boot/dts/spear320.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for SPEAr320 SoC 2 * DTS file for SPEAr320 SoC
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> 4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index 91072553963f..3a8bb5736928 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -1,7 +1,7 @@
1/* 1/*
2 * DTS file for all SPEAr3xx SoCs 2 * DTS file for all SPEAr3xx SoCs
3 * 3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com> 4 * Copyright 2012 Viresh Kumar <viresh.linux@gmail.com>
5 * 5 *
6 * The code contained herein is licensed under the GNU General Public 6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License 7 * License. You may obtain a copy of the GNU General Public License
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
index 941b161ab78c..7e1091d91af8 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca15-tc1.dts
@@ -73,7 +73,10 @@
73 #address-cells = <0>; 73 #address-cells = <0>;
74 interrupt-controller; 74 interrupt-controller;
75 reg = <0x2c001000 0x1000>, 75 reg = <0x2c001000 0x1000>,
76 <0x2c002000 0x100>; 76 <0x2c002000 0x1000>,
77 <0x2c004000 0x2000>,
78 <0x2c006000 0x2000>;
79 interrupts = <1 9 0xf04>;
77 }; 80 };
78 81
79 memory-controller@7ffd0000 { 82 memory-controller@7ffd0000 {
@@ -93,6 +96,14 @@
93 <0 91 4>; 96 <0 91 4>;
94 }; 97 };
95 98
99 timer {
100 compatible = "arm,armv7-timer";
101 interrupts = <1 13 0xf08>,
102 <1 14 0xf08>,
103 <1 11 0xf08>,
104 <1 10 0xf08>;
105 };
106
96 pmu { 107 pmu {
97 compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu"; 108 compatible = "arm,cortex-a15-pmu", "arm,cortex-a9-pmu";
98 interrupts = <0 68 4>, 109 interrupts = <0 68 4>,
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
index 6905e66d4748..18917a0f8604 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca5s.dts
@@ -77,13 +77,18 @@
77 77
78 timer@2c000600 { 78 timer@2c000600 {
79 compatible = "arm,cortex-a5-twd-timer"; 79 compatible = "arm,cortex-a5-twd-timer";
80 reg = <0x2c000600 0x38>; 80 reg = <0x2c000600 0x20>;
81 interrupts = <1 2 0x304>, 81 interrupts = <1 13 0x304>;
82 <1 3 0x304>; 82 };
83
84 watchdog@2c000620 {
85 compatible = "arm,cortex-a5-twd-wdt";
86 reg = <0x2c000620 0x20>;
87 interrupts = <1 14 0x304>;
83 }; 88 };
84 89
85 gic: interrupt-controller@2c001000 { 90 gic: interrupt-controller@2c001000 {
86 compatible = "arm,corex-a5-gic", "arm,cortex-a9-gic"; 91 compatible = "arm,cortex-a5-gic", "arm,cortex-a9-gic";
87 #interrupt-cells = <3>; 92 #interrupt-cells = <3>;
88 #address-cells = <0>; 93 #address-cells = <0>;
89 interrupt-controller; 94 interrupt-controller;
diff --git a/arch/arm/boot/dts/vexpress-v2p-ca9.dts b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
index da778693be54..3f0c736d31d6 100644
--- a/arch/arm/boot/dts/vexpress-v2p-ca9.dts
+++ b/arch/arm/boot/dts/vexpress-v2p-ca9.dts
@@ -105,8 +105,13 @@
105 timer@1e000600 { 105 timer@1e000600 {
106 compatible = "arm,cortex-a9-twd-timer"; 106 compatible = "arm,cortex-a9-twd-timer";
107 reg = <0x1e000600 0x20>; 107 reg = <0x1e000600 0x20>;
108 interrupts = <1 2 0xf04>, 108 interrupts = <1 13 0xf04>;
109 <1 3 0xf04>; 109 };
110
111 watchdog@1e000620 {
112 compatible = "arm,cortex-a9-twd-wdt";
113 reg = <0x1e000620 0x20>;
114 interrupts = <1 14 0xf04>;
110 }; 115 };
111 116
112 gic: interrupt-controller@1e001000 { 117 gic: interrupt-controller@1e001000 {
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 9d7eb530f95f..aa07f5938f05 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -366,8 +366,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
366 struct safe_buffer *buf; 366 struct safe_buffer *buf;
367 unsigned long off; 367 unsigned long off;
368 368
369 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", 369 dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
370 __func__, addr, off, sz, dir); 370 __func__, addr, sz, dir);
371 371
372 buf = find_safe_buffer_dev(dev, addr, __func__); 372 buf = find_safe_buffer_dev(dev, addr, __func__);
373 if (!buf) 373 if (!buf)
@@ -377,8 +377,8 @@ static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
377 377
378 BUG_ON(buf->direction != dir); 378 BUG_ON(buf->direction != dir);
379 379
380 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 380 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
381 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 381 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
382 buf->safe, buf->safe_dma_addr); 382 buf->safe, buf->safe_dma_addr);
383 383
384 DO_STATS(dev->archdata.dmabounce->bounce_count++); 384 DO_STATS(dev->archdata.dmabounce->bounce_count++);
@@ -406,8 +406,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
406 struct safe_buffer *buf; 406 struct safe_buffer *buf;
407 unsigned long off; 407 unsigned long off;
408 408
409 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", 409 dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
410 __func__, addr, off, sz, dir); 410 __func__, addr, sz, dir);
411 411
412 buf = find_safe_buffer_dev(dev, addr, __func__); 412 buf = find_safe_buffer_dev(dev, addr, __func__);
413 if (!buf) 413 if (!buf)
@@ -417,8 +417,8 @@ static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
417 417
418 BUG_ON(buf->direction != dir); 418 BUG_ON(buf->direction != dir);
419 419
420 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 420 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
421 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), 421 __func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
422 buf->safe, buf->safe_dma_addr); 422 buf->safe, buf->safe_dma_addr);
423 423
424 DO_STATS(dev->archdata.dmabounce->bounce_count++); 424 DO_STATS(dev->archdata.dmabounce->bounce_count++);
diff --git a/arch/arm/configs/u8500_defconfig b/arch/arm/configs/u8500_defconfig
index 7e84f453e8a6..2d4f661d1cf6 100644
--- a/arch/arm/configs/u8500_defconfig
+++ b/arch/arm/configs/u8500_defconfig
@@ -75,6 +75,7 @@ CONFIG_AB5500_CORE=y
75CONFIG_AB8500_CORE=y 75CONFIG_AB8500_CORE=y
76CONFIG_REGULATOR=y 76CONFIG_REGULATOR=y
77CONFIG_REGULATOR_AB8500=y 77CONFIG_REGULATOR_AB8500=y
78CONFIG_REGULATOR_FIXED_VOLTAGE=y
78# CONFIG_HID_SUPPORT is not set 79# CONFIG_HID_SUPPORT is not set
79CONFIG_USB_GADGET=y 80CONFIG_USB_GADGET=y
80CONFIG_AB8500_USB=y 81CONFIG_AB8500_USB=y
diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
index 7be54690aeec..e42cf597f6e6 100644
--- a/arch/arm/include/asm/futex.h
+++ b/arch/arm/include/asm/futex.h
@@ -19,6 +19,7 @@
19 " .long 1b, 4f, 2b, 4f\n" \ 19 " .long 1b, 4f, 2b, 4f\n" \
20 " .popsection\n" \ 20 " .popsection\n" \
21 " .pushsection .fixup,\"ax\"\n" \ 21 " .pushsection .fixup,\"ax\"\n" \
22 " .align 2\n" \
22 "4: mov %0, " err_reg "\n" \ 23 "4: mov %0, " err_reg "\n" \
23 " b 3b\n" \ 24 " b 3b\n" \
24 " .popsection" 25 " .popsection"
diff --git a/arch/arm/include/asm/hardware/sp810.h b/arch/arm/include/asm/hardware/sp810.h
index e0d1c0cfa548..6b9b077d86b3 100644
--- a/arch/arm/include/asm/hardware/sp810.h
+++ b/arch/arm/include/asm/hardware/sp810.h
@@ -4,7 +4,7 @@
4 * ARM PrimeXsys System Controller SP810 header file 4 * ARM PrimeXsys System Controller SP810 header file
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/include/asm/posix_types.h b/arch/arm/include/asm/posix_types.h
index efdf99045d87..d2de9cbbcd9b 100644
--- a/arch/arm/include/asm/posix_types.h
+++ b/arch/arm/include/asm/posix_types.h
@@ -22,9 +22,6 @@
22typedef unsigned short __kernel_mode_t; 22typedef unsigned short __kernel_mode_t;
23#define __kernel_mode_t __kernel_mode_t 23#define __kernel_mode_t __kernel_mode_t
24 24
25typedef unsigned short __kernel_nlink_t;
26#define __kernel_nlink_t __kernel_nlink_t
27
28typedef unsigned short __kernel_ipc_pid_t; 25typedef unsigned short __kernel_ipc_pid_t;
29#define __kernel_ipc_pid_t __kernel_ipc_pid_t 26#define __kernel_ipc_pid_t __kernel_ipc_pid_t
30 27
diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
index 437f0c426517..0d1851ca6eb9 100644
--- a/arch/arm/kernel/entry-armv.S
+++ b/arch/arm/kernel/entry-armv.S
@@ -495,6 +495,7 @@ ENDPROC(__und_usr)
495 * The out of line fixup for the ldrt above. 495 * The out of line fixup for the ldrt above.
496 */ 496 */
497 .pushsection .fixup, "ax" 497 .pushsection .fixup, "ax"
498 .align 2
4984: mov pc, r9 4994: mov pc, r9
499 .popsection 500 .popsection
500 .pushsection __ex_table,"a" 501 .pushsection __ex_table,"a"
diff --git a/arch/arm/kernel/kprobes-thumb.c b/arch/arm/kernel/kprobes-thumb.c
index 8f96ec778e8d..6123daf397a7 100644
--- a/arch/arm/kernel/kprobes-thumb.c
+++ b/arch/arm/kernel/kprobes-thumb.c
@@ -660,7 +660,7 @@ static const union decode_item t32_table_1111_100x[] = {
660 /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */ 660 /* LDRSB (literal) 1111 1001 x001 1111 xxxx xxxx xxxx xxxx */
661 /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */ 661 /* LDRH (literal) 1111 1000 x011 1111 xxxx xxxx xxxx xxxx */
662 /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */ 662 /* LDRSH (literal) 1111 1001 x011 1111 xxxx xxxx xxxx xxxx */
663 DECODE_EMULATEX (0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal, 663 DECODE_SIMULATEX(0xfe5f0000, 0xf81f0000, t32_simulate_ldr_literal,
664 REGS(PC, NOSPPCX, 0, 0, 0)), 664 REGS(PC, NOSPPCX, 0, 0, 0)),
665 665
666 /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */ 666 /* STRB (immediate) 1111 1000 0000 xxxx xxxx 1xxx xxxx xxxx */
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 17fc36c41cff..fd2392a17ac1 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -22,8 +22,6 @@
22 22
23#include "signal.h" 23#include "signal.h"
24 24
25#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
26
27/* 25/*
28 * For ARM syscalls, we encode the syscall number into the instruction. 26 * For ARM syscalls, we encode the syscall number into the instruction.
29 */ 27 */
@@ -210,10 +208,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
210 int err; 208 int err;
211 209
212 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 210 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
213 if (err == 0) { 211 if (err == 0)
214 sigdelsetmask(&set, ~_BLOCKABLE);
215 set_current_blocked(&set); 212 set_current_blocked(&set);
216 }
217 213
218 __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err); 214 __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
219 __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err); 215 __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
@@ -528,13 +524,13 @@ setup_rt_frame(int usig, struct k_sigaction *ka, siginfo_t *info,
528/* 524/*
529 * OK, we're invoking a handler 525 * OK, we're invoking a handler
530 */ 526 */
531static int 527static void
532handle_signal(unsigned long sig, struct k_sigaction *ka, 528handle_signal(unsigned long sig, struct k_sigaction *ka,
533 siginfo_t *info, sigset_t *oldset, 529 siginfo_t *info, struct pt_regs *regs)
534 struct pt_regs * regs)
535{ 530{
536 struct thread_info *thread = current_thread_info(); 531 struct thread_info *thread = current_thread_info();
537 struct task_struct *tsk = current; 532 struct task_struct *tsk = current;
533 sigset_t *oldset = sigmask_to_save();
538 int usig = sig; 534 int usig = sig;
539 int ret; 535 int ret;
540 536
@@ -559,17 +555,9 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
559 555
560 if (ret != 0) { 556 if (ret != 0) {
561 force_sigsegv(sig, tsk); 557 force_sigsegv(sig, tsk);
562 return ret; 558 return;
563 } 559 }
564 560 signal_delivered(sig, info, ka, regs, 0);
565 /*
566 * Block the signal if we were successful.
567 */
568 block_sigmask(ka, sig);
569
570 tracehook_signal_handler(sig, info, ka, regs, 0);
571
572 return 0;
573} 561}
574 562
575/* 563/*
@@ -617,8 +605,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
617 */ 605 */
618 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 606 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
619 if (signr > 0) { 607 if (signr > 0) {
620 sigset_t *oldset;
621
622 /* 608 /*
623 * Depending on the signal settings we may need to revert the 609 * Depending on the signal settings we may need to revert the
624 * decision to restart the system call. But skip this if a 610 * decision to restart the system call. But skip this if a
@@ -635,20 +621,7 @@ static void do_signal(struct pt_regs *regs, int syscall)
635 clear_thread_flag(TIF_SYSCALL_RESTARTSYS); 621 clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
636 } 622 }
637 623
638 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 624 handle_signal(signr, &ka, &info, regs);
639 oldset = &current->saved_sigmask;
640 else
641 oldset = &current->blocked;
642 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) {
643 /*
644 * A signal was successfully delivered; the saved
645 * sigmask will have been stored in the signal frame,
646 * and will be restored by sigreturn, so we can simply
647 * clear the TIF_RESTORE_SIGMASK flag.
648 */
649 if (test_thread_flag(TIF_RESTORE_SIGMASK))
650 clear_thread_flag(TIF_RESTORE_SIGMASK);
651 }
652 return; 625 return;
653 } 626 }
654 627
@@ -663,11 +636,7 @@ static void do_signal(struct pt_regs *regs, int syscall)
663 set_thread_flag(TIF_SYSCALL_RESTARTSYS); 636 set_thread_flag(TIF_SYSCALL_RESTARTSYS);
664 } 637 }
665 638
666 /* If there's no signal to deliver, we just put the saved sigmask 639 restore_saved_sigmask();
667 * back.
668 */
669 if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
670 set_current_blocked(&current->saved_sigmask);
671} 640}
672 641
673asmlinkage void 642asmlinkage void
@@ -679,7 +648,5 @@ do_notify_resume(struct pt_regs *regs, unsigned int thread_flags, int syscall)
679 if (thread_flags & _TIF_NOTIFY_RESUME) { 648 if (thread_flags & _TIF_NOTIFY_RESUME) {
680 clear_thread_flag(TIF_NOTIFY_RESUME); 649 clear_thread_flag(TIF_NOTIFY_RESUME);
681 tracehook_notify_resume(regs); 650 tracehook_notify_resume(regs);
682 if (current->replacement_session_keyring)
683 key_replace_session_keyring();
684 } 651 }
685} 652}
diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
index b735521a4a54..2c7217d971db 100644
--- a/arch/arm/kernel/smp.c
+++ b/arch/arm/kernel/smp.c
@@ -109,7 +109,6 @@ static void percpu_timer_stop(void);
109int __cpu_disable(void) 109int __cpu_disable(void)
110{ 110{
111 unsigned int cpu = smp_processor_id(); 111 unsigned int cpu = smp_processor_id();
112 struct task_struct *p;
113 int ret; 112 int ret;
114 113
115 ret = platform_cpu_disable(cpu); 114 ret = platform_cpu_disable(cpu);
@@ -139,12 +138,7 @@ int __cpu_disable(void)
139 flush_cache_all(); 138 flush_cache_all();
140 local_flush_tlb_all(); 139 local_flush_tlb_all();
141 140
142 read_lock(&tasklist_lock); 141 clear_tasks_mm_cpumask(cpu);
143 for_each_process(p) {
144 if (p->mm)
145 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
146 }
147 read_unlock(&tasklist_lock);
148 142
149 return 0; 143 return 0;
150} 144}
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index eb282378fa78..01abd3516a77 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -82,8 +82,6 @@ static int snappercl15_nand_dev_ready(struct mtd_info *mtd)
82 return !!(__raw_readw(NAND_CTRL_ADDR(chip)) & SNAPPERCL15_NAND_RDY); 82 return !!(__raw_readw(NAND_CTRL_ADDR(chip)) & SNAPPERCL15_NAND_RDY);
83} 83}
84 84
85static const char *snappercl15_nand_part_probes[] = {"cmdlinepart", NULL};
86
87static struct mtd_partition snappercl15_nand_parts[] = { 85static struct mtd_partition snappercl15_nand_parts[] = {
88 { 86 {
89 .name = "Kernel", 87 .name = "Kernel",
@@ -100,10 +98,8 @@ static struct mtd_partition snappercl15_nand_parts[] = {
100static struct platform_nand_data snappercl15_nand_data = { 98static struct platform_nand_data snappercl15_nand_data = {
101 .chip = { 99 .chip = {
102 .nr_chips = 1, 100 .nr_chips = 1,
103 .part_probe_types = snappercl15_nand_part_probes,
104 .partitions = snappercl15_nand_parts, 101 .partitions = snappercl15_nand_parts,
105 .nr_partitions = ARRAY_SIZE(snappercl15_nand_parts), 102 .nr_partitions = ARRAY_SIZE(snappercl15_nand_parts),
106 .options = NAND_NO_AUTOINCR,
107 .chip_delay = 25, 103 .chip_delay = 25,
108 }, 104 },
109 .ctrl = { 105 .ctrl = {
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index d4ef339d961e..75cab2d7ec73 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -105,8 +105,6 @@ static int ts72xx_nand_device_ready(struct mtd_info *mtd)
105 return !!(__raw_readb(addr) & 0x20); 105 return !!(__raw_readb(addr) & 0x20);
106} 106}
107 107
108static const char *ts72xx_nand_part_probes[] = { "cmdlinepart", NULL };
109
110#define TS72XX_BOOTROM_PART_SIZE (SZ_16K) 108#define TS72XX_BOOTROM_PART_SIZE (SZ_16K)
111#define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M) 109#define TS72XX_REDBOOT_PART_SIZE (SZ_2M + SZ_1M)
112 110
@@ -134,7 +132,6 @@ static struct platform_nand_data ts72xx_nand_data = {
134 .nr_chips = 1, 132 .nr_chips = 1,
135 .chip_offset = 0, 133 .chip_offset = 0,
136 .chip_delay = 15, 134 .chip_delay = 15,
137 .part_probe_types = ts72xx_nand_part_probes,
138 .partitions = ts72xx_nand_parts, 135 .partitions = ts72xx_nand_parts,
139 .nr_partitions = ARRAY_SIZE(ts72xx_nand_parts), 136 .nr_partitions = ARRAY_SIZE(ts72xx_nand_parts),
140 }, 137 },
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 43ebe9094411..573be57d3d28 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -62,6 +62,8 @@ config SOC_EXYNOS5250
62 default y 62 default y
63 depends on ARCH_EXYNOS5 63 depends on ARCH_EXYNOS5
64 select SAMSUNG_DMADEV 64 select SAMSUNG_DMADEV
65 select S5P_PM if PM
66 select S5P_SLEEP if PM
65 help 67 help
66 Enable EXYNOS5250 SoC support 68 Enable EXYNOS5250 SoC support
67 69
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index 440a637c76f1..9b58024f7d43 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -22,7 +22,7 @@ obj-$(CONFIG_PM) += pm.o
22obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o 22obj-$(CONFIG_PM_GENERIC_DOMAINS) += pm_domains.o
23obj-$(CONFIG_CPU_IDLE) += cpuidle.o 23obj-$(CONFIG_CPU_IDLE) += cpuidle.o
24 24
25obj-$(CONFIG_ARCH_EXYNOS4) += pmu.o 25obj-$(CONFIG_ARCH_EXYNOS) += pmu.o
26 26
27obj-$(CONFIG_SMP) += platsmp.o headsmp.o 27obj-$(CONFIG_SMP) += platsmp.o headsmp.o
28 28
diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c
index 5aa460b01fdf..fefa336be2b4 100644
--- a/arch/arm/mach-exynos/clock-exynos5.c
+++ b/arch/arm/mach-exynos/clock-exynos5.c
@@ -30,7 +30,56 @@
30 30
31#ifdef CONFIG_PM_SLEEP 31#ifdef CONFIG_PM_SLEEP
32static struct sleep_save exynos5_clock_save[] = { 32static struct sleep_save exynos5_clock_save[] = {
33 /* will be implemented */ 33 SAVE_ITEM(EXYNOS5_CLKSRC_MASK_TOP),
34 SAVE_ITEM(EXYNOS5_CLKSRC_MASK_GSCL),
35 SAVE_ITEM(EXYNOS5_CLKSRC_MASK_DISP1_0),
36 SAVE_ITEM(EXYNOS5_CLKSRC_MASK_FSYS),
37 SAVE_ITEM(EXYNOS5_CLKSRC_MASK_MAUDIO),
38 SAVE_ITEM(EXYNOS5_CLKSRC_MASK_PERIC0),
39 SAVE_ITEM(EXYNOS5_CLKSRC_MASK_PERIC1),
40 SAVE_ITEM(EXYNOS5_CLKGATE_IP_GSCL),
41 SAVE_ITEM(EXYNOS5_CLKGATE_IP_DISP1),
42 SAVE_ITEM(EXYNOS5_CLKGATE_IP_MFC),
43 SAVE_ITEM(EXYNOS5_CLKGATE_IP_G3D),
44 SAVE_ITEM(EXYNOS5_CLKGATE_IP_GEN),
45 SAVE_ITEM(EXYNOS5_CLKGATE_IP_FSYS),
46 SAVE_ITEM(EXYNOS5_CLKGATE_IP_PERIC),
47 SAVE_ITEM(EXYNOS5_CLKGATE_IP_PERIS),
48 SAVE_ITEM(EXYNOS5_CLKGATE_BLOCK),
49 SAVE_ITEM(EXYNOS5_CLKDIV_TOP0),
50 SAVE_ITEM(EXYNOS5_CLKDIV_TOP1),
51 SAVE_ITEM(EXYNOS5_CLKDIV_GSCL),
52 SAVE_ITEM(EXYNOS5_CLKDIV_DISP1_0),
53 SAVE_ITEM(EXYNOS5_CLKDIV_GEN),
54 SAVE_ITEM(EXYNOS5_CLKDIV_MAUDIO),
55 SAVE_ITEM(EXYNOS5_CLKDIV_FSYS0),
56 SAVE_ITEM(EXYNOS5_CLKDIV_FSYS1),
57 SAVE_ITEM(EXYNOS5_CLKDIV_FSYS2),
58 SAVE_ITEM(EXYNOS5_CLKDIV_FSYS3),
59 SAVE_ITEM(EXYNOS5_CLKDIV_PERIC0),
60 SAVE_ITEM(EXYNOS5_CLKDIV_PERIC1),
61 SAVE_ITEM(EXYNOS5_CLKDIV_PERIC2),
62 SAVE_ITEM(EXYNOS5_CLKDIV_PERIC3),
63 SAVE_ITEM(EXYNOS5_CLKDIV_PERIC4),
64 SAVE_ITEM(EXYNOS5_CLKDIV_PERIC5),
65 SAVE_ITEM(EXYNOS5_SCLK_DIV_ISP),
66 SAVE_ITEM(EXYNOS5_CLKSRC_TOP0),
67 SAVE_ITEM(EXYNOS5_CLKSRC_TOP1),
68 SAVE_ITEM(EXYNOS5_CLKSRC_TOP2),
69 SAVE_ITEM(EXYNOS5_CLKSRC_TOP3),
70 SAVE_ITEM(EXYNOS5_CLKSRC_GSCL),
71 SAVE_ITEM(EXYNOS5_CLKSRC_DISP1_0),
72 SAVE_ITEM(EXYNOS5_CLKSRC_MAUDIO),
73 SAVE_ITEM(EXYNOS5_CLKSRC_FSYS),
74 SAVE_ITEM(EXYNOS5_CLKSRC_PERIC0),
75 SAVE_ITEM(EXYNOS5_CLKSRC_PERIC1),
76 SAVE_ITEM(EXYNOS5_SCLK_SRC_ISP),
77 SAVE_ITEM(EXYNOS5_EPLL_CON0),
78 SAVE_ITEM(EXYNOS5_EPLL_CON1),
79 SAVE_ITEM(EXYNOS5_EPLL_CON2),
80 SAVE_ITEM(EXYNOS5_VPLL_CON0),
81 SAVE_ITEM(EXYNOS5_VPLL_CON1),
82 SAVE_ITEM(EXYNOS5_VPLL_CON2),
34}; 83};
35#endif 84#endif
36 85
diff --git a/arch/arm/mach-exynos/cpuidle.c b/arch/arm/mach-exynos/cpuidle.c
index 26dac2893b8e..cff0595d0d35 100644
--- a/arch/arm/mach-exynos/cpuidle.c
+++ b/arch/arm/mach-exynos/cpuidle.c
@@ -100,7 +100,7 @@ static int exynos4_enter_core0_aftr(struct cpuidle_device *dev,
100 exynos4_set_wakeupmask(); 100 exynos4_set_wakeupmask();
101 101
102 /* Set value of power down register for aftr mode */ 102 /* Set value of power down register for aftr mode */
103 exynos4_sys_powerdown_conf(SYS_AFTR); 103 exynos_sys_powerdown_conf(SYS_AFTR);
104 104
105 __raw_writel(virt_to_phys(s3c_cpu_resume), REG_DIRECTGO_ADDR); 105 __raw_writel(virt_to_phys(s3c_cpu_resume), REG_DIRECTGO_ADDR);
106 __raw_writel(S5P_CHECK_AFTR, REG_DIRECTGO_FLAG); 106 __raw_writel(S5P_CHECK_AFTR, REG_DIRECTGO_FLAG);
diff --git a/arch/arm/mach-exynos/include/mach/pm-core.h b/arch/arm/mach-exynos/include/mach/pm-core.h
index 9d8da51e35ca..a67ecfaf1216 100644
--- a/arch/arm/mach-exynos/include/mach/pm-core.h
+++ b/arch/arm/mach-exynos/include/mach/pm-core.h
@@ -33,7 +33,7 @@ static inline void s3c_pm_arch_prepare_irqs(void)
33 __raw_writel(tmp, S5P_WAKEUP_MASK); 33 __raw_writel(tmp, S5P_WAKEUP_MASK);
34 34
35 __raw_writel(s3c_irqwake_intmask, S5P_WAKEUP_MASK); 35 __raw_writel(s3c_irqwake_intmask, S5P_WAKEUP_MASK);
36 __raw_writel(s3c_irqwake_eintmask, S5P_EINT_WAKEUP_MASK); 36 __raw_writel(s3c_irqwake_eintmask & 0xFFFFFFFE, S5P_EINT_WAKEUP_MASK);
37} 37}
38 38
39static inline void s3c_pm_arch_stop_clocks(void) 39static inline void s3c_pm_arch_stop_clocks(void)
diff --git a/arch/arm/mach-exynos/include/mach/pmu.h b/arch/arm/mach-exynos/include/mach/pmu.h
index e76b7faba66b..7c27c2d4bf44 100644
--- a/arch/arm/mach-exynos/include/mach/pmu.h
+++ b/arch/arm/mach-exynos/include/mach/pmu.h
@@ -23,12 +23,12 @@ enum sys_powerdown {
23}; 23};
24 24
25extern unsigned long l2x0_regs_phys; 25extern unsigned long l2x0_regs_phys;
26struct exynos4_pmu_conf { 26struct exynos_pmu_conf {
27 void __iomem *reg; 27 void __iomem *reg;
28 unsigned int val[NUM_SYS_POWERDOWN]; 28 unsigned int val[NUM_SYS_POWERDOWN];
29}; 29};
30 30
31extern void exynos4_sys_powerdown_conf(enum sys_powerdown mode); 31extern void exynos_sys_powerdown_conf(enum sys_powerdown mode);
32extern void s3c_cpu_resume(void); 32extern void s3c_cpu_resume(void);
33 33
34#endif /* __ASM_ARCH_PMU_H */ 34#endif /* __ASM_ARCH_PMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/regs-clock.h b/arch/arm/mach-exynos/include/mach/regs-clock.h
index b78b5f3ad9c0..8c9b38c9c504 100644
--- a/arch/arm/mach-exynos/include/mach/regs-clock.h
+++ b/arch/arm/mach-exynos/include/mach/regs-clock.h
@@ -274,36 +274,51 @@
274 274
275#define EXYNOS5_CLKDIV_ACP EXYNOS_CLKREG(0x08500) 275#define EXYNOS5_CLKDIV_ACP EXYNOS_CLKREG(0x08500)
276 276
277#define EXYNOS5_CLKSRC_TOP2 EXYNOS_CLKREG(0x10218)
278#define EXYNOS5_EPLL_CON0 EXYNOS_CLKREG(0x10130) 277#define EXYNOS5_EPLL_CON0 EXYNOS_CLKREG(0x10130)
279#define EXYNOS5_EPLL_CON1 EXYNOS_CLKREG(0x10134) 278#define EXYNOS5_EPLL_CON1 EXYNOS_CLKREG(0x10134)
279#define EXYNOS5_EPLL_CON2 EXYNOS_CLKREG(0x10138)
280#define EXYNOS5_VPLL_CON0 EXYNOS_CLKREG(0x10140) 280#define EXYNOS5_VPLL_CON0 EXYNOS_CLKREG(0x10140)
281#define EXYNOS5_VPLL_CON1 EXYNOS_CLKREG(0x10144) 281#define EXYNOS5_VPLL_CON1 EXYNOS_CLKREG(0x10144)
282#define EXYNOS5_VPLL_CON2 EXYNOS_CLKREG(0x10148)
282#define EXYNOS5_CPLL_CON0 EXYNOS_CLKREG(0x10120) 283#define EXYNOS5_CPLL_CON0 EXYNOS_CLKREG(0x10120)
283 284
284#define EXYNOS5_CLKSRC_TOP0 EXYNOS_CLKREG(0x10210) 285#define EXYNOS5_CLKSRC_TOP0 EXYNOS_CLKREG(0x10210)
286#define EXYNOS5_CLKSRC_TOP1 EXYNOS_CLKREG(0x10214)
287#define EXYNOS5_CLKSRC_TOP2 EXYNOS_CLKREG(0x10218)
285#define EXYNOS5_CLKSRC_TOP3 EXYNOS_CLKREG(0x1021C) 288#define EXYNOS5_CLKSRC_TOP3 EXYNOS_CLKREG(0x1021C)
286#define EXYNOS5_CLKSRC_GSCL EXYNOS_CLKREG(0x10220) 289#define EXYNOS5_CLKSRC_GSCL EXYNOS_CLKREG(0x10220)
287#define EXYNOS5_CLKSRC_DISP1_0 EXYNOS_CLKREG(0x1022C) 290#define EXYNOS5_CLKSRC_DISP1_0 EXYNOS_CLKREG(0x1022C)
291#define EXYNOS5_CLKSRC_MAUDIO EXYNOS_CLKREG(0x10240)
288#define EXYNOS5_CLKSRC_FSYS EXYNOS_CLKREG(0x10244) 292#define EXYNOS5_CLKSRC_FSYS EXYNOS_CLKREG(0x10244)
289#define EXYNOS5_CLKSRC_PERIC0 EXYNOS_CLKREG(0x10250) 293#define EXYNOS5_CLKSRC_PERIC0 EXYNOS_CLKREG(0x10250)
294#define EXYNOS5_CLKSRC_PERIC1 EXYNOS_CLKREG(0x10254)
295#define EXYNOS5_SCLK_SRC_ISP EXYNOS_CLKREG(0x10270)
290 296
291#define EXYNOS5_CLKSRC_MASK_TOP EXYNOS_CLKREG(0x10310) 297#define EXYNOS5_CLKSRC_MASK_TOP EXYNOS_CLKREG(0x10310)
292#define EXYNOS5_CLKSRC_MASK_GSCL EXYNOS_CLKREG(0x10320) 298#define EXYNOS5_CLKSRC_MASK_GSCL EXYNOS_CLKREG(0x10320)
293#define EXYNOS5_CLKSRC_MASK_DISP1_0 EXYNOS_CLKREG(0x1032C) 299#define EXYNOS5_CLKSRC_MASK_DISP1_0 EXYNOS_CLKREG(0x1032C)
300#define EXYNOS5_CLKSRC_MASK_MAUDIO EXYNOS_CLKREG(0x10334)
294#define EXYNOS5_CLKSRC_MASK_FSYS EXYNOS_CLKREG(0x10340) 301#define EXYNOS5_CLKSRC_MASK_FSYS EXYNOS_CLKREG(0x10340)
295#define EXYNOS5_CLKSRC_MASK_PERIC0 EXYNOS_CLKREG(0x10350) 302#define EXYNOS5_CLKSRC_MASK_PERIC0 EXYNOS_CLKREG(0x10350)
303#define EXYNOS5_CLKSRC_MASK_PERIC1 EXYNOS_CLKREG(0x10354)
296 304
297#define EXYNOS5_CLKDIV_TOP0 EXYNOS_CLKREG(0x10510) 305#define EXYNOS5_CLKDIV_TOP0 EXYNOS_CLKREG(0x10510)
298#define EXYNOS5_CLKDIV_TOP1 EXYNOS_CLKREG(0x10514) 306#define EXYNOS5_CLKDIV_TOP1 EXYNOS_CLKREG(0x10514)
299#define EXYNOS5_CLKDIV_GSCL EXYNOS_CLKREG(0x10520) 307#define EXYNOS5_CLKDIV_GSCL EXYNOS_CLKREG(0x10520)
300#define EXYNOS5_CLKDIV_DISP1_0 EXYNOS_CLKREG(0x1052C) 308#define EXYNOS5_CLKDIV_DISP1_0 EXYNOS_CLKREG(0x1052C)
301#define EXYNOS5_CLKDIV_GEN EXYNOS_CLKREG(0x1053C) 309#define EXYNOS5_CLKDIV_GEN EXYNOS_CLKREG(0x1053C)
310#define EXYNOS5_CLKDIV_MAUDIO EXYNOS_CLKREG(0x10544)
302#define EXYNOS5_CLKDIV_FSYS0 EXYNOS_CLKREG(0x10548) 311#define EXYNOS5_CLKDIV_FSYS0 EXYNOS_CLKREG(0x10548)
303#define EXYNOS5_CLKDIV_FSYS1 EXYNOS_CLKREG(0x1054C) 312#define EXYNOS5_CLKDIV_FSYS1 EXYNOS_CLKREG(0x1054C)
304#define EXYNOS5_CLKDIV_FSYS2 EXYNOS_CLKREG(0x10550) 313#define EXYNOS5_CLKDIV_FSYS2 EXYNOS_CLKREG(0x10550)
305#define EXYNOS5_CLKDIV_FSYS3 EXYNOS_CLKREG(0x10554) 314#define EXYNOS5_CLKDIV_FSYS3 EXYNOS_CLKREG(0x10554)
306#define EXYNOS5_CLKDIV_PERIC0 EXYNOS_CLKREG(0x10558) 315#define EXYNOS5_CLKDIV_PERIC0 EXYNOS_CLKREG(0x10558)
316#define EXYNOS5_CLKDIV_PERIC1 EXYNOS_CLKREG(0x1055C)
317#define EXYNOS5_CLKDIV_PERIC2 EXYNOS_CLKREG(0x10560)
318#define EXYNOS5_CLKDIV_PERIC3 EXYNOS_CLKREG(0x10564)
319#define EXYNOS5_CLKDIV_PERIC4 EXYNOS_CLKREG(0x10568)
320#define EXYNOS5_CLKDIV_PERIC5 EXYNOS_CLKREG(0x1056C)
321#define EXYNOS5_SCLK_DIV_ISP EXYNOS_CLKREG(0x10580)
307 322
308#define EXYNOS5_CLKGATE_IP_ACP EXYNOS_CLKREG(0x08800) 323#define EXYNOS5_CLKGATE_IP_ACP EXYNOS_CLKREG(0x08800)
309#define EXYNOS5_CLKGATE_IP_ISP0 EXYNOS_CLKREG(0x0C800) 324#define EXYNOS5_CLKGATE_IP_ISP0 EXYNOS_CLKREG(0x0C800)
@@ -311,6 +326,7 @@
311#define EXYNOS5_CLKGATE_IP_GSCL EXYNOS_CLKREG(0x10920) 326#define EXYNOS5_CLKGATE_IP_GSCL EXYNOS_CLKREG(0x10920)
312#define EXYNOS5_CLKGATE_IP_DISP1 EXYNOS_CLKREG(0x10928) 327#define EXYNOS5_CLKGATE_IP_DISP1 EXYNOS_CLKREG(0x10928)
313#define EXYNOS5_CLKGATE_IP_MFC EXYNOS_CLKREG(0x1092C) 328#define EXYNOS5_CLKGATE_IP_MFC EXYNOS_CLKREG(0x1092C)
329#define EXYNOS5_CLKGATE_IP_G3D EXYNOS_CLKREG(0x10930)
314#define EXYNOS5_CLKGATE_IP_GEN EXYNOS_CLKREG(0x10934) 330#define EXYNOS5_CLKGATE_IP_GEN EXYNOS_CLKREG(0x10934)
315#define EXYNOS5_CLKGATE_IP_FSYS EXYNOS_CLKREG(0x10944) 331#define EXYNOS5_CLKGATE_IP_FSYS EXYNOS_CLKREG(0x10944)
316#define EXYNOS5_CLKGATE_IP_GPS EXYNOS_CLKREG(0x1094C) 332#define EXYNOS5_CLKGATE_IP_GPS EXYNOS_CLKREG(0x1094C)
diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h
index 4dbb8629b200..43a99e6f56ab 100644
--- a/arch/arm/mach-exynos/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h
@@ -1,9 +1,8 @@
1/* linux/arch/arm/mach-exynos4/include/mach/regs-pmu.h 1/*
2 * 2 * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com 3 * http://www.samsung.com
5 * 4 *
6 * EXYNOS4 - Power management unit definition 5 * EXYNOS - Power management unit definition
7 * 6 *
8 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -229,4 +228,138 @@
229#define S5P_DIS_IRQ_CORE3 S5P_PMUREG(0x1034) 228#define S5P_DIS_IRQ_CORE3 S5P_PMUREG(0x1034)
230#define S5P_DIS_IRQ_CENTRAL3 S5P_PMUREG(0x1038) 229#define S5P_DIS_IRQ_CENTRAL3 S5P_PMUREG(0x1038)
231 230
231/* For EXYNOS5 */
232
233#define EXYNOS5_USB_CFG S5P_PMUREG(0x0230)
234
235#define EXYNOS5_ARM_CORE0_SYS_PWR_REG S5P_PMUREG(0x1000)
236#define EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG S5P_PMUREG(0x1004)
237#define EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1008)
238#define EXYNOS5_ARM_CORE1_SYS_PWR_REG S5P_PMUREG(0x1010)
239#define EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG S5P_PMUREG(0x1014)
240#define EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1018)
241#define EXYNOS5_FSYS_ARM_SYS_PWR_REG S5P_PMUREG(0x1040)
242#define EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1048)
243#define EXYNOS5_ISP_ARM_SYS_PWR_REG S5P_PMUREG(0x1050)
244#define EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG S5P_PMUREG(0x1054)
245#define EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG S5P_PMUREG(0x1058)
246#define EXYNOS5_ARM_COMMON_SYS_PWR_REG S5P_PMUREG(0x1080)
247#define EXYNOS5_ARM_L2_SYS_PWR_REG S5P_PMUREG(0x10C0)
248#define EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG S5P_PMUREG(0x1100)
249#define EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG S5P_PMUREG(0x1104)
250#define EXYNOS5_CMU_RESET_SYS_PWR_REG S5P_PMUREG(0x110C)
251#define EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1120)
252#define EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1124)
253#define EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x112C)
254#define EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG S5P_PMUREG(0x1130)
255#define EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG S5P_PMUREG(0x1134)
256#define EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG S5P_PMUREG(0x1138)
257#define EXYNOS5_APLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1140)
258#define EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1144)
259#define EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1148)
260#define EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x114C)
261#define EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1150)
262#define EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1154)
263#define EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1164)
264#define EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG S5P_PMUREG(0x1170)
265#define EXYNOS5_TOP_BUS_SYS_PWR_REG S5P_PMUREG(0x1180)
266#define EXYNOS5_TOP_RETENTION_SYS_PWR_REG S5P_PMUREG(0x1184)
267#define EXYNOS5_TOP_PWR_SYS_PWR_REG S5P_PMUREG(0x1188)
268#define EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1190)
269#define EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1194)
270#define EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1198)
271#define EXYNOS5_LOGIC_RESET_SYS_PWR_REG S5P_PMUREG(0x11A0)
272#define EXYNOS5_OSCCLK_GATE_SYS_PWR_REG S5P_PMUREG(0x11A4)
273#define EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x11B0)
274#define EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x11B4)
275#define EXYNOS5_USBOTG_MEM_SYS_PWR_REG S5P_PMUREG(0x11C0)
276#define EXYNOS5_G2D_MEM_SYS_PWR_REG S5P_PMUREG(0x11C8)
277#define EXYNOS5_USBDRD_MEM_SYS_PWR_REG S5P_PMUREG(0x11CC)
278#define EXYNOS5_SDMMC_MEM_SYS_PWR_REG S5P_PMUREG(0x11D0)
279#define EXYNOS5_CSSYS_MEM_SYS_PWR_REG S5P_PMUREG(0x11D4)
280#define EXYNOS5_SECSS_MEM_SYS_PWR_REG S5P_PMUREG(0x11D8)
281#define EXYNOS5_ROTATOR_MEM_SYS_PWR_REG S5P_PMUREG(0x11DC)
282#define EXYNOS5_INTRAM_MEM_SYS_PWR_REG S5P_PMUREG(0x11E0)
283#define EXYNOS5_INTROM_MEM_SYS_PWR_REG S5P_PMUREG(0x11E4)
284#define EXYNOS5_JPEG_MEM_SYS_PWR_REG S5P_PMUREG(0x11E8)
285#define EXYNOS5_HSI_MEM_SYS_PWR_REG S5P_PMUREG(0x11EC)
286#define EXYNOS5_MCUIOP_MEM_SYS_PWR_REG S5P_PMUREG(0x11F4)
287#define EXYNOS5_SATA_MEM_SYS_PWR_REG S5P_PMUREG(0x11FC)
288#define EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG S5P_PMUREG(0x1200)
289#define EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG S5P_PMUREG(0x1204)
290#define EXYNOS5_PAD_RETENTION_EFNAND_SYS_PWR_REG S5P_PMUREG(0x1208)
291#define EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG S5P_PMUREG(0x1220)
292#define EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG S5P_PMUREG(0x1224)
293#define EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG S5P_PMUREG(0x1228)
294#define EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG S5P_PMUREG(0x122C)
295#define EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG S5P_PMUREG(0x1230)
296#define EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG S5P_PMUREG(0x1234)
297#define EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG S5P_PMUREG(0x1238)
298#define EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x123C)
299#define EXYNOS5_PAD_ISOLATION_SYS_PWR_REG S5P_PMUREG(0x1240)
300#define EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1250)
301#define EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG S5P_PMUREG(0x1260)
302#define EXYNOS5_XUSBXTI_SYS_PWR_REG S5P_PMUREG(0x1280)
303#define EXYNOS5_XXTI_SYS_PWR_REG S5P_PMUREG(0x1284)
304#define EXYNOS5_EXT_REGULATOR_SYS_PWR_REG S5P_PMUREG(0x12C0)
305#define EXYNOS5_GPIO_MODE_SYS_PWR_REG S5P_PMUREG(0x1300)
306#define EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG S5P_PMUREG(0x1320)
307#define EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG S5P_PMUREG(0x1340)
308#define EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG S5P_PMUREG(0x1344)
309#define EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG S5P_PMUREG(0x1348)
310#define EXYNOS5_GSCL_SYS_PWR_REG S5P_PMUREG(0x1400)
311#define EXYNOS5_ISP_SYS_PWR_REG S5P_PMUREG(0x1404)
312#define EXYNOS5_MFC_SYS_PWR_REG S5P_PMUREG(0x1408)
313#define EXYNOS5_G3D_SYS_PWR_REG S5P_PMUREG(0x140C)
314#define EXYNOS5_DISP1_SYS_PWR_REG S5P_PMUREG(0x1414)
315#define EXYNOS5_MAU_SYS_PWR_REG S5P_PMUREG(0x1418)
316#define EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG S5P_PMUREG(0x1480)
317#define EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG S5P_PMUREG(0x1484)
318#define EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG S5P_PMUREG(0x1488)
319#define EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG S5P_PMUREG(0x148C)
320#define EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG S5P_PMUREG(0x1494)
321#define EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG S5P_PMUREG(0x1498)
322#define EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG S5P_PMUREG(0x14C0)
323#define EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG S5P_PMUREG(0x14C4)
324#define EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG S5P_PMUREG(0x14C8)
325#define EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG S5P_PMUREG(0x14CC)
326#define EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG S5P_PMUREG(0x14D4)
327#define EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG S5P_PMUREG(0x14D8)
328#define EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG S5P_PMUREG(0x1580)
329#define EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG S5P_PMUREG(0x1584)
330#define EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG S5P_PMUREG(0x1588)
331#define EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG S5P_PMUREG(0x158C)
332#define EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG S5P_PMUREG(0x1594)
333#define EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG S5P_PMUREG(0x1598)
334
335#define EXYNOS5_ARM_CORE0_OPTION S5P_PMUREG(0x2008)
336#define EXYNOS5_ARM_CORE1_OPTION S5P_PMUREG(0x2088)
337#define EXYNOS5_FSYS_ARM_OPTION S5P_PMUREG(0x2208)
338#define EXYNOS5_ISP_ARM_OPTION S5P_PMUREG(0x2288)
339#define EXYNOS5_ARM_COMMON_OPTION S5P_PMUREG(0x2408)
340#define EXYNOS5_TOP_PWR_OPTION S5P_PMUREG(0x2C48)
341#define EXYNOS5_TOP_PWR_SYSMEM_OPTION S5P_PMUREG(0x2CC8)
342#define EXYNOS5_JPEG_MEM_OPTION S5P_PMUREG(0x2F48)
343#define EXYNOS5_GSCL_STATUS S5P_PMUREG(0x4004)
344#define EXYNOS5_ISP_STATUS S5P_PMUREG(0x4024)
345#define EXYNOS5_GSCL_OPTION S5P_PMUREG(0x4008)
346#define EXYNOS5_ISP_OPTION S5P_PMUREG(0x4028)
347#define EXYNOS5_MFC_OPTION S5P_PMUREG(0x4048)
348#define EXYNOS5_G3D_CONFIGURATION S5P_PMUREG(0x4060)
349#define EXYNOS5_G3D_STATUS S5P_PMUREG(0x4064)
350#define EXYNOS5_G3D_OPTION S5P_PMUREG(0x4068)
351#define EXYNOS5_DISP1_OPTION S5P_PMUREG(0x40A8)
352#define EXYNOS5_MAU_OPTION S5P_PMUREG(0x40C8)
353
354#define EXYNOS5_USE_SC_FEEDBACK (1 << 1)
355#define EXYNOS5_USE_SC_COUNTER (1 << 0)
356
357#define EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL (1 << 2)
358#define EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN (1 << 7)
359
360#define EXYNOS5_OPTION_USE_STANDBYWFE (1 << 24)
361#define EXYNOS5_OPTION_USE_STANDBYWFI (1 << 16)
362
363#define EXYNOS5_OPTION_USE_RETENTION (1 << 4)
364
232#endif /* __ASM_ARCH_REGS_PMU_H */ 365#endif /* __ASM_ARCH_REGS_PMU_H */
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index 972983e392bc..656f8fc9addd 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -237,25 +237,29 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
237#else 237#else
238/* Frame Buffer */ 238/* Frame Buffer */
239static struct s3c_fb_pd_win nuri_fb_win0 = { 239static struct s3c_fb_pd_win nuri_fb_win0 = {
240 .win_mode = {
241 .left_margin = 64,
242 .right_margin = 16,
243 .upper_margin = 64,
244 .lower_margin = 1,
245 .hsync_len = 48,
246 .vsync_len = 3,
247 .xres = 1024,
248 .yres = 600,
249 .refresh = 60,
250 },
251 .max_bpp = 24, 240 .max_bpp = 24,
252 .default_bpp = 16, 241 .default_bpp = 16,
242 .xres = 1024,
243 .yres = 600,
253 .virtual_x = 1024, 244 .virtual_x = 1024,
254 .virtual_y = 2 * 600, 245 .virtual_y = 2 * 600,
255}; 246};
256 247
248static struct fb_videomode nuri_lcd_timing = {
249 .left_margin = 64,
250 .right_margin = 16,
251 .upper_margin = 64,
252 .lower_margin = 1,
253 .hsync_len = 48,
254 .vsync_len = 3,
255 .xres = 1024,
256 .yres = 600,
257 .refresh = 60,
258};
259
257static struct s3c_fb_platdata nuri_fb_pdata __initdata = { 260static struct s3c_fb_platdata nuri_fb_pdata __initdata = {
258 .win[0] = &nuri_fb_win0, 261 .win[0] = &nuri_fb_win0,
262 .vtiming = &nuri_lcd_timing,
259 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB | 263 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
260 VIDCON0_CLKSEL_LCD, 264 VIDCON0_CLKSEL_LCD,
261 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, 265 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index a7f7fd567dde..f5572be9d7bf 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -604,24 +604,28 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
604}; 604};
605#else 605#else
606static struct s3c_fb_pd_win origen_fb_win0 = { 606static struct s3c_fb_pd_win origen_fb_win0 = {
607 .win_mode = { 607 .xres = 1024,
608 .left_margin = 64, 608 .yres = 600,
609 .right_margin = 16,
610 .upper_margin = 64,
611 .lower_margin = 16,
612 .hsync_len = 48,
613 .vsync_len = 3,
614 .xres = 1024,
615 .yres = 600,
616 },
617 .max_bpp = 32, 609 .max_bpp = 32,
618 .default_bpp = 24, 610 .default_bpp = 24,
619 .virtual_x = 1024, 611 .virtual_x = 1024,
620 .virtual_y = 2 * 600, 612 .virtual_y = 2 * 600,
621}; 613};
622 614
615static struct fb_videomode origen_lcd_timing = {
616 .left_margin = 64,
617 .right_margin = 16,
618 .upper_margin = 64,
619 .lower_margin = 16,
620 .hsync_len = 48,
621 .vsync_len = 3,
622 .xres = 1024,
623 .yres = 600,
624};
625
623static struct s3c_fb_platdata origen_lcd_pdata __initdata = { 626static struct s3c_fb_platdata origen_lcd_pdata __initdata = {
624 .win[0] = &origen_fb_win0, 627 .win[0] = &origen_fb_win0,
628 .vtiming = &origen_lcd_timing,
625 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 629 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
626 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC | 630 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
627 VIDCON1_INV_VCLK, 631 VIDCON1_INV_VCLK,
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index 70df1a0c2118..262e9e446a96 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -178,22 +178,26 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
178}; 178};
179#else 179#else
180static struct s3c_fb_pd_win smdkv310_fb_win0 = { 180static struct s3c_fb_pd_win smdkv310_fb_win0 = {
181 .win_mode = { 181 .max_bpp = 32,
182 .left_margin = 13, 182 .default_bpp = 24,
183 .right_margin = 8, 183 .xres = 800,
184 .upper_margin = 7, 184 .yres = 480,
185 .lower_margin = 5, 185};
186 .hsync_len = 3, 186
187 .vsync_len = 1, 187static struct fb_videomode smdkv310_lcd_timing = {
188 .xres = 800, 188 .left_margin = 13,
189 .yres = 480, 189 .right_margin = 8,
190 }, 190 .upper_margin = 7,
191 .max_bpp = 32, 191 .lower_margin = 5,
192 .default_bpp = 24, 192 .hsync_len = 3,
193 .vsync_len = 1,
194 .xres = 800,
195 .yres = 480,
193}; 196};
194 197
195static struct s3c_fb_platdata smdkv310_lcd0_pdata __initdata = { 198static struct s3c_fb_platdata smdkv310_lcd0_pdata __initdata = {
196 .win[0] = &smdkv310_fb_win0, 199 .win[0] = &smdkv310_fb_win0,
200 .vtiming = &smdkv310_lcd_timing,
197 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 201 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
198 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, 202 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
199 .setup_gpio = exynos4_fimd0_gpio_setup_24bpp, 203 .setup_gpio = exynos4_fimd0_gpio_setup_24bpp,
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 083b44de9c10..cd92fa86ba41 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -843,25 +843,29 @@ static struct exynos_drm_fimd_pdata drm_fimd_pdata = {
843#else 843#else
844/* Frame Buffer */ 844/* Frame Buffer */
845static struct s3c_fb_pd_win universal_fb_win0 = { 845static struct s3c_fb_pd_win universal_fb_win0 = {
846 .win_mode = {
847 .left_margin = 16,
848 .right_margin = 16,
849 .upper_margin = 2,
850 .lower_margin = 28,
851 .hsync_len = 2,
852 .vsync_len = 1,
853 .xres = 480,
854 .yres = 800,
855 .refresh = 55,
856 },
857 .max_bpp = 32, 846 .max_bpp = 32,
858 .default_bpp = 16, 847 .default_bpp = 16,
848 .xres = 480,
849 .yres = 800,
859 .virtual_x = 480, 850 .virtual_x = 480,
860 .virtual_y = 2 * 800, 851 .virtual_y = 2 * 800,
861}; 852};
862 853
854static struct fb_videomode universal_lcd_timing = {
855 .left_margin = 16,
856 .right_margin = 16,
857 .upper_margin = 2,
858 .lower_margin = 28,
859 .hsync_len = 2,
860 .vsync_len = 1,
861 .xres = 480,
862 .yres = 800,
863 .refresh = 55,
864};
865
863static struct s3c_fb_platdata universal_lcd_pdata __initdata = { 866static struct s3c_fb_platdata universal_lcd_pdata __initdata = {
864 .win[0] = &universal_fb_win0, 867 .win[0] = &universal_fb_win0,
868 .vtiming = &universal_lcd_timing,
865 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB | 869 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
866 VIDCON0_CLKSEL_LCD, 870 VIDCON0_CLKSEL_LCD,
867 .vidcon1 = VIDCON1_INV_VCLK | VIDCON1_INV_VDEN 871 .vidcon1 = VIDCON1_INV_VCLK | VIDCON1_INV_VDEN
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 563dea9a6dbb..c06c992943a1 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -1,9 +1,8 @@
1/* linux/arch/arm/mach-exynos4/pm.c 1/*
2 * 2 * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com 3 * http://www.samsung.com
5 * 4 *
6 * EXYNOS4210 - Power Management support 5 * EXYNOS - Power Management support
7 * 6 *
8 * Based on arch/arm/mach-s3c2410/pm.c 7 * Based on arch/arm/mach-s3c2410/pm.c
9 * Copyright (c) 2006 Simtec Electronics 8 * Copyright (c) 2006 Simtec Electronics
@@ -63,90 +62,7 @@ static struct sleep_save exynos4_vpll_save[] = {
63 SAVE_ITEM(EXYNOS4_VPLL_CON1), 62 SAVE_ITEM(EXYNOS4_VPLL_CON1),
64}; 63};
65 64
66static struct sleep_save exynos4_core_save[] = { 65static struct sleep_save exynos_core_save[] = {
67 /* GIC side */
68 SAVE_ITEM(S5P_VA_GIC_CPU + 0x000),
69 SAVE_ITEM(S5P_VA_GIC_CPU + 0x004),
70 SAVE_ITEM(S5P_VA_GIC_CPU + 0x008),
71 SAVE_ITEM(S5P_VA_GIC_CPU + 0x00C),
72 SAVE_ITEM(S5P_VA_GIC_CPU + 0x014),
73 SAVE_ITEM(S5P_VA_GIC_CPU + 0x018),
74 SAVE_ITEM(S5P_VA_GIC_DIST + 0x000),
75 SAVE_ITEM(S5P_VA_GIC_DIST + 0x004),
76 SAVE_ITEM(S5P_VA_GIC_DIST + 0x100),
77 SAVE_ITEM(S5P_VA_GIC_DIST + 0x104),
78 SAVE_ITEM(S5P_VA_GIC_DIST + 0x108),
79 SAVE_ITEM(S5P_VA_GIC_DIST + 0x300),
80 SAVE_ITEM(S5P_VA_GIC_DIST + 0x304),
81 SAVE_ITEM(S5P_VA_GIC_DIST + 0x308),
82 SAVE_ITEM(S5P_VA_GIC_DIST + 0x400),
83 SAVE_ITEM(S5P_VA_GIC_DIST + 0x404),
84 SAVE_ITEM(S5P_VA_GIC_DIST + 0x408),
85 SAVE_ITEM(S5P_VA_GIC_DIST + 0x40C),
86 SAVE_ITEM(S5P_VA_GIC_DIST + 0x410),
87 SAVE_ITEM(S5P_VA_GIC_DIST + 0x414),
88 SAVE_ITEM(S5P_VA_GIC_DIST + 0x418),
89 SAVE_ITEM(S5P_VA_GIC_DIST + 0x41C),
90 SAVE_ITEM(S5P_VA_GIC_DIST + 0x420),
91 SAVE_ITEM(S5P_VA_GIC_DIST + 0x424),
92 SAVE_ITEM(S5P_VA_GIC_DIST + 0x428),
93 SAVE_ITEM(S5P_VA_GIC_DIST + 0x42C),
94 SAVE_ITEM(S5P_VA_GIC_DIST + 0x430),
95 SAVE_ITEM(S5P_VA_GIC_DIST + 0x434),
96 SAVE_ITEM(S5P_VA_GIC_DIST + 0x438),
97 SAVE_ITEM(S5P_VA_GIC_DIST + 0x43C),
98 SAVE_ITEM(S5P_VA_GIC_DIST + 0x440),
99 SAVE_ITEM(S5P_VA_GIC_DIST + 0x444),
100 SAVE_ITEM(S5P_VA_GIC_DIST + 0x448),
101 SAVE_ITEM(S5P_VA_GIC_DIST + 0x44C),
102 SAVE_ITEM(S5P_VA_GIC_DIST + 0x450),
103 SAVE_ITEM(S5P_VA_GIC_DIST + 0x454),
104 SAVE_ITEM(S5P_VA_GIC_DIST + 0x458),
105 SAVE_ITEM(S5P_VA_GIC_DIST + 0x45C),
106
107 SAVE_ITEM(S5P_VA_GIC_DIST + 0x800),
108 SAVE_ITEM(S5P_VA_GIC_DIST + 0x804),
109 SAVE_ITEM(S5P_VA_GIC_DIST + 0x808),
110 SAVE_ITEM(S5P_VA_GIC_DIST + 0x80C),
111 SAVE_ITEM(S5P_VA_GIC_DIST + 0x810),
112 SAVE_ITEM(S5P_VA_GIC_DIST + 0x814),
113 SAVE_ITEM(S5P_VA_GIC_DIST + 0x818),
114 SAVE_ITEM(S5P_VA_GIC_DIST + 0x81C),
115 SAVE_ITEM(S5P_VA_GIC_DIST + 0x820),
116 SAVE_ITEM(S5P_VA_GIC_DIST + 0x824),
117 SAVE_ITEM(S5P_VA_GIC_DIST + 0x828),
118 SAVE_ITEM(S5P_VA_GIC_DIST + 0x82C),
119 SAVE_ITEM(S5P_VA_GIC_DIST + 0x830),
120 SAVE_ITEM(S5P_VA_GIC_DIST + 0x834),
121 SAVE_ITEM(S5P_VA_GIC_DIST + 0x838),
122 SAVE_ITEM(S5P_VA_GIC_DIST + 0x83C),
123 SAVE_ITEM(S5P_VA_GIC_DIST + 0x840),
124 SAVE_ITEM(S5P_VA_GIC_DIST + 0x844),
125 SAVE_ITEM(S5P_VA_GIC_DIST + 0x848),
126 SAVE_ITEM(S5P_VA_GIC_DIST + 0x84C),
127 SAVE_ITEM(S5P_VA_GIC_DIST + 0x850),
128 SAVE_ITEM(S5P_VA_GIC_DIST + 0x854),
129 SAVE_ITEM(S5P_VA_GIC_DIST + 0x858),
130 SAVE_ITEM(S5P_VA_GIC_DIST + 0x85C),
131
132 SAVE_ITEM(S5P_VA_GIC_DIST + 0xC00),
133 SAVE_ITEM(S5P_VA_GIC_DIST + 0xC04),
134 SAVE_ITEM(S5P_VA_GIC_DIST + 0xC08),
135 SAVE_ITEM(S5P_VA_GIC_DIST + 0xC0C),
136 SAVE_ITEM(S5P_VA_GIC_DIST + 0xC10),
137 SAVE_ITEM(S5P_VA_GIC_DIST + 0xC14),
138
139 SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x000),
140 SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x010),
141 SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x020),
142 SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x030),
143 SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x040),
144 SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x050),
145 SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x060),
146 SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x070),
147 SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x080),
148 SAVE_ITEM(S5P_VA_COMBINER_BASE + 0x090),
149
150 /* SROM side */ 66 /* SROM side */
151 SAVE_ITEM(S5P_SROM_BW), 67 SAVE_ITEM(S5P_SROM_BW),
152 SAVE_ITEM(S5P_SROM_BC0), 68 SAVE_ITEM(S5P_SROM_BC0),
@@ -159,9 +75,11 @@ static struct sleep_save exynos4_core_save[] = {
159/* For Cortex-A9 Diagnostic and Power control register */ 75/* For Cortex-A9 Diagnostic and Power control register */
160static unsigned int save_arm_register[2]; 76static unsigned int save_arm_register[2];
161 77
162static int exynos4_cpu_suspend(unsigned long arg) 78static int exynos_cpu_suspend(unsigned long arg)
163{ 79{
80#ifdef CONFIG_CACHE_L2X0
164 outer_flush_all(); 81 outer_flush_all();
82#endif
165 83
166 /* issue the standby signal into the pm unit. */ 84 /* issue the standby signal into the pm unit. */
167 cpu_do_idle(); 85 cpu_do_idle();
@@ -170,19 +88,25 @@ static int exynos4_cpu_suspend(unsigned long arg)
170 panic("sleep resumed to originator?"); 88 panic("sleep resumed to originator?");
171} 89}
172 90
173static void exynos4_pm_prepare(void) 91static void exynos_pm_prepare(void)
174{ 92{
175 u32 tmp; 93 unsigned int tmp;
176 94
177 s3c_pm_do_save(exynos4_core_save, ARRAY_SIZE(exynos4_core_save)); 95 s3c_pm_do_save(exynos_core_save, ARRAY_SIZE(exynos_core_save));
178 s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
179 s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
180 96
181 tmp = __raw_readl(S5P_INFORM1); 97 if (!soc_is_exynos5250()) {
98 s3c_pm_do_save(exynos4_epll_save, ARRAY_SIZE(exynos4_epll_save));
99 s3c_pm_do_save(exynos4_vpll_save, ARRAY_SIZE(exynos4_vpll_save));
100 } else {
101 /* Disable USE_RETENTION of JPEG_MEM_OPTION */
102 tmp = __raw_readl(EXYNOS5_JPEG_MEM_OPTION);
103 tmp &= ~EXYNOS5_OPTION_USE_RETENTION;
104 __raw_writel(tmp, EXYNOS5_JPEG_MEM_OPTION);
105 }
182 106
183 /* Set value of power down register for sleep mode */ 107 /* Set value of power down register for sleep mode */
184 108
185 exynos4_sys_powerdown_conf(SYS_SLEEP); 109 exynos_sys_powerdown_conf(SYS_SLEEP);
186 __raw_writel(S5P_CHECK_SLEEP, S5P_INFORM1); 110 __raw_writel(S5P_CHECK_SLEEP, S5P_INFORM1);
187 111
188 /* ensure at least INFORM0 has the resume address */ 112 /* ensure at least INFORM0 has the resume address */
@@ -191,17 +115,18 @@ static void exynos4_pm_prepare(void)
191 115
192 /* Before enter central sequence mode, clock src register have to set */ 116 /* Before enter central sequence mode, clock src register have to set */
193 117
194 s3c_pm_do_restore_core(exynos4_set_clksrc, ARRAY_SIZE(exynos4_set_clksrc)); 118 if (!soc_is_exynos5250())
119 s3c_pm_do_restore_core(exynos4_set_clksrc, ARRAY_SIZE(exynos4_set_clksrc));
195 120
196 if (soc_is_exynos4210()) 121 if (soc_is_exynos4210())
197 s3c_pm_do_restore_core(exynos4210_set_clksrc, ARRAY_SIZE(exynos4210_set_clksrc)); 122 s3c_pm_do_restore_core(exynos4210_set_clksrc, ARRAY_SIZE(exynos4210_set_clksrc));
198 123
199} 124}
200 125
201static int exynos4_pm_add(struct device *dev, struct subsys_interface *sif) 126static int exynos_pm_add(struct device *dev, struct subsys_interface *sif)
202{ 127{
203 pm_cpu_prep = exynos4_pm_prepare; 128 pm_cpu_prep = exynos_pm_prepare;
204 pm_cpu_sleep = exynos4_cpu_suspend; 129 pm_cpu_sleep = exynos_cpu_suspend;
205 130
206 return 0; 131 return 0;
207} 132}
@@ -273,13 +198,13 @@ static void exynos4_restore_pll(void)
273 } while (epll_wait || vpll_wait); 198 } while (epll_wait || vpll_wait);
274} 199}
275 200
276static struct subsys_interface exynos4_pm_interface = { 201static struct subsys_interface exynos_pm_interface = {
277 .name = "exynos4_pm", 202 .name = "exynos_pm",
278 .subsys = &exynos_subsys, 203 .subsys = &exynos_subsys,
279 .add_dev = exynos4_pm_add, 204 .add_dev = exynos_pm_add,
280}; 205};
281 206
282static __init int exynos4_pm_drvinit(void) 207static __init int exynos_pm_drvinit(void)
283{ 208{
284 struct clk *pll_base; 209 struct clk *pll_base;
285 unsigned int tmp; 210 unsigned int tmp;
@@ -292,18 +217,20 @@ static __init int exynos4_pm_drvinit(void)
292 tmp |= ((0xFF << 8) | (0x1F << 1)); 217 tmp |= ((0xFF << 8) | (0x1F << 1));
293 __raw_writel(tmp, S5P_WAKEUP_MASK); 218 __raw_writel(tmp, S5P_WAKEUP_MASK);
294 219
295 pll_base = clk_get(NULL, "xtal"); 220 if (!soc_is_exynos5250()) {
221 pll_base = clk_get(NULL, "xtal");
296 222
297 if (!IS_ERR(pll_base)) { 223 if (!IS_ERR(pll_base)) {
298 pll_base_rate = clk_get_rate(pll_base); 224 pll_base_rate = clk_get_rate(pll_base);
299 clk_put(pll_base); 225 clk_put(pll_base);
226 }
300 } 227 }
301 228
302 return subsys_interface_register(&exynos4_pm_interface); 229 return subsys_interface_register(&exynos_pm_interface);
303} 230}
304arch_initcall(exynos4_pm_drvinit); 231arch_initcall(exynos_pm_drvinit);
305 232
306static int exynos4_pm_suspend(void) 233static int exynos_pm_suspend(void)
307{ 234{
308 unsigned long tmp; 235 unsigned long tmp;
309 236
@@ -313,27 +240,27 @@ static int exynos4_pm_suspend(void)
313 tmp &= ~S5P_CENTRAL_LOWPWR_CFG; 240 tmp &= ~S5P_CENTRAL_LOWPWR_CFG;
314 __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION); 241 __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION);
315 242
316 if (soc_is_exynos4212() || soc_is_exynos4412()) { 243 /* Setting SEQ_OPTION register */
317 tmp = __raw_readl(S5P_CENTRAL_SEQ_OPTION); 244
318 tmp &= ~(S5P_USE_STANDBYWFI_ISP_ARM | 245 tmp = (S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0);
319 S5P_USE_STANDBYWFE_ISP_ARM); 246 __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
320 __raw_writel(tmp, S5P_CENTRAL_SEQ_OPTION);
321 }
322 247
323 /* Save Power control register */ 248 if (!soc_is_exynos5250()) {
324 asm ("mrc p15, 0, %0, c15, c0, 0" 249 /* Save Power control register */
325 : "=r" (tmp) : : "cc"); 250 asm ("mrc p15, 0, %0, c15, c0, 0"
326 save_arm_register[0] = tmp; 251 : "=r" (tmp) : : "cc");
252 save_arm_register[0] = tmp;
327 253
328 /* Save Diagnostic register */ 254 /* Save Diagnostic register */
329 asm ("mrc p15, 0, %0, c15, c0, 1" 255 asm ("mrc p15, 0, %0, c15, c0, 1"
330 : "=r" (tmp) : : "cc"); 256 : "=r" (tmp) : : "cc");
331 save_arm_register[1] = tmp; 257 save_arm_register[1] = tmp;
258 }
332 259
333 return 0; 260 return 0;
334} 261}
335 262
336static void exynos4_pm_resume(void) 263static void exynos_pm_resume(void)
337{ 264{
338 unsigned long tmp; 265 unsigned long tmp;
339 266
@@ -350,17 +277,19 @@ static void exynos4_pm_resume(void)
350 /* No need to perform below restore code */ 277 /* No need to perform below restore code */
351 goto early_wakeup; 278 goto early_wakeup;
352 } 279 }
353 /* Restore Power control register */ 280 if (!soc_is_exynos5250()) {
354 tmp = save_arm_register[0]; 281 /* Restore Power control register */
355 asm volatile ("mcr p15, 0, %0, c15, c0, 0" 282 tmp = save_arm_register[0];
356 : : "r" (tmp) 283 asm volatile ("mcr p15, 0, %0, c15, c0, 0"
357 : "cc"); 284 : : "r" (tmp)
358 285 : "cc");
359 /* Restore Diagnostic register */ 286
360 tmp = save_arm_register[1]; 287 /* Restore Diagnostic register */
361 asm volatile ("mcr p15, 0, %0, c15, c0, 1" 288 tmp = save_arm_register[1];
362 : : "r" (tmp) 289 asm volatile ("mcr p15, 0, %0, c15, c0, 1"
363 : "cc"); 290 : : "r" (tmp)
291 : "cc");
292 }
364 293
365 /* For release retention */ 294 /* For release retention */
366 295
@@ -372,26 +301,28 @@ static void exynos4_pm_resume(void)
372 __raw_writel((1 << 28), S5P_PAD_RET_EBIA_OPTION); 301 __raw_writel((1 << 28), S5P_PAD_RET_EBIA_OPTION);
373 __raw_writel((1 << 28), S5P_PAD_RET_EBIB_OPTION); 302 __raw_writel((1 << 28), S5P_PAD_RET_EBIB_OPTION);
374 303
375 s3c_pm_do_restore_core(exynos4_core_save, ARRAY_SIZE(exynos4_core_save)); 304 s3c_pm_do_restore_core(exynos_core_save, ARRAY_SIZE(exynos_core_save));
376 305
377 exynos4_restore_pll(); 306 if (!soc_is_exynos5250()) {
307 exynos4_restore_pll();
378 308
379#ifdef CONFIG_SMP 309#ifdef CONFIG_SMP
380 scu_enable(S5P_VA_SCU); 310 scu_enable(S5P_VA_SCU);
381#endif 311#endif
312 }
382 313
383early_wakeup: 314early_wakeup:
384 return; 315 return;
385} 316}
386 317
387static struct syscore_ops exynos4_pm_syscore_ops = { 318static struct syscore_ops exynos_pm_syscore_ops = {
388 .suspend = exynos4_pm_suspend, 319 .suspend = exynos_pm_suspend,
389 .resume = exynos4_pm_resume, 320 .resume = exynos_pm_resume,
390}; 321};
391 322
392static __init int exynos4_pm_syscore_init(void) 323static __init int exynos_pm_syscore_init(void)
393{ 324{
394 register_syscore_ops(&exynos4_pm_syscore_ops); 325 register_syscore_ops(&exynos_pm_syscore_ops);
395 return 0; 326 return 0;
396} 327}
397arch_initcall(exynos4_pm_syscore_init); 328arch_initcall(exynos_pm_syscore_init);
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index 77c6815eebee..4aacb66f7161 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -1,9 +1,8 @@
1/* linux/arch/arm/mach-exynos4/pmu.c 1/*
2 * 2 * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/ 3 * http://www.samsung.com/
5 * 4 *
6 * EXYNOS4210 - CPU PMU(Power Management Unit) support 5 * EXYNOS - CPU PMU(Power Management Unit) support
7 * 6 *
8 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
@@ -12,13 +11,14 @@
12 11
13#include <linux/io.h> 12#include <linux/io.h>
14#include <linux/kernel.h> 13#include <linux/kernel.h>
14#include <linux/bug.h>
15 15
16#include <mach/regs-clock.h> 16#include <mach/regs-clock.h>
17#include <mach/pmu.h> 17#include <mach/pmu.h>
18 18
19static struct exynos4_pmu_conf *exynos4_pmu_config; 19static struct exynos_pmu_conf *exynos_pmu_config;
20 20
21static struct exynos4_pmu_conf exynos4210_pmu_config[] = { 21static struct exynos_pmu_conf exynos4210_pmu_config[] = {
22 /* { .reg = address, .val = { AFTR, LPA, SLEEP } */ 22 /* { .reg = address, .val = { AFTR, LPA, SLEEP } */
23 { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } }, 23 { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
24 { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } }, 24 { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
@@ -94,7 +94,7 @@ static struct exynos4_pmu_conf exynos4210_pmu_config[] = {
94 { PMU_TABLE_END,}, 94 { PMU_TABLE_END,},
95}; 95};
96 96
97static struct exynos4_pmu_conf exynos4x12_pmu_config[] = { 97static struct exynos_pmu_conf exynos4x12_pmu_config[] = {
98 { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } }, 98 { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
99 { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } }, 99 { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
100 { S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } }, 100 { S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
@@ -202,7 +202,7 @@ static struct exynos4_pmu_conf exynos4x12_pmu_config[] = {
202 { PMU_TABLE_END,}, 202 { PMU_TABLE_END,},
203}; 203};
204 204
205static struct exynos4_pmu_conf exynos4412_pmu_config[] = { 205static struct exynos_pmu_conf exynos4412_pmu_config[] = {
206 { S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } }, 206 { S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } },
207 { S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } }, 207 { S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } },
208 { S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } }, 208 { S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } },
@@ -212,13 +212,174 @@ static struct exynos4_pmu_conf exynos4412_pmu_config[] = {
212 { PMU_TABLE_END,}, 212 { PMU_TABLE_END,},
213}; 213};
214 214
215void exynos4_sys_powerdown_conf(enum sys_powerdown mode) 215static struct exynos_pmu_conf exynos5250_pmu_config[] = {
216 /* { .reg = address, .val = { AFTR, LPA, SLEEP } */
217 { EXYNOS5_ARM_CORE0_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
218 { EXYNOS5_DIS_IRQ_ARM_CORE0_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
219 { EXYNOS5_DIS_IRQ_ARM_CORE0_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
220 { EXYNOS5_ARM_CORE1_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
221 { EXYNOS5_DIS_IRQ_ARM_CORE1_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
222 { EXYNOS5_DIS_IRQ_ARM_CORE1_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
223 { EXYNOS5_FSYS_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
224 { EXYNOS5_DIS_IRQ_FSYS_ARM_CENTRAL_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
225 { EXYNOS5_ISP_ARM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
226 { EXYNOS5_DIS_IRQ_ISP_ARM_LOCAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
227 { EXYNOS5_DIS_IRQ_ISP_ARM_CENTRAL_SYS_PWR_REG, { 0x0, 0x0, 0x0} },
228 { EXYNOS5_ARM_COMMON_SYS_PWR_REG, { 0x0, 0x0, 0x2} },
229 { EXYNOS5_ARM_L2_SYS_PWR_REG, { 0x3, 0x3, 0x3} },
230 { EXYNOS5_CMU_ACLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
231 { EXYNOS5_CMU_SCLKSTOP_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
232 { EXYNOS5_CMU_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
233 { EXYNOS5_CMU_ACLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
234 { EXYNOS5_CMU_SCLKSTOP_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
235 { EXYNOS5_CMU_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
236 { EXYNOS5_DRAM_FREQ_DOWN_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
237 { EXYNOS5_DDRPHY_DLLOFF_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
238 { EXYNOS5_DDRPHY_DLLLOCK_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
239 { EXYNOS5_APLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
240 { EXYNOS5_MPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
241 { EXYNOS5_VPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
242 { EXYNOS5_EPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
243 { EXYNOS5_BPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
244 { EXYNOS5_CPLL_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
245 { EXYNOS5_MPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
246 { EXYNOS5_BPLLUSER_SYSCLK_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
247 { EXYNOS5_TOP_BUS_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
248 { EXYNOS5_TOP_RETENTION_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
249 { EXYNOS5_TOP_PWR_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
250 { EXYNOS5_TOP_BUS_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
251 { EXYNOS5_TOP_RETENTION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
252 { EXYNOS5_TOP_PWR_SYSMEM_SYS_PWR_REG, { 0x3, 0x0, 0x3} },
253 { EXYNOS5_LOGIC_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
254 { EXYNOS5_OSCCLK_GATE_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
255 { EXYNOS5_LOGIC_RESET_SYSMEM_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
256 { EXYNOS5_OSCCLK_GATE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
257 { EXYNOS5_USBOTG_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
258 { EXYNOS5_G2D_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
259 { EXYNOS5_USBDRD_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
260 { EXYNOS5_SDMMC_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
261 { EXYNOS5_CSSYS_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
262 { EXYNOS5_SECSS_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
263 { EXYNOS5_ROTATOR_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
264 { EXYNOS5_INTRAM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
265 { EXYNOS5_INTROM_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
266 { EXYNOS5_JPEG_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
267 { EXYNOS5_HSI_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
268 { EXYNOS5_MCUIOP_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
269 { EXYNOS5_SATA_MEM_SYS_PWR_REG, { 0x3, 0x0, 0x0} },
270 { EXYNOS5_PAD_RETENTION_DRAM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
271 { EXYNOS5_PAD_RETENTION_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
272 { EXYNOS5_PAD_RETENTION_GPIO_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
273 { EXYNOS5_PAD_RETENTION_UART_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
274 { EXYNOS5_PAD_RETENTION_MMCA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
275 { EXYNOS5_PAD_RETENTION_MMCB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
276 { EXYNOS5_PAD_RETENTION_EBIA_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
277 { EXYNOS5_PAD_RETENTION_EBIB_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
278 { EXYNOS5_PAD_RETENTION_SPI_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
279 { EXYNOS5_PAD_RETENTION_GPIO_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
280 { EXYNOS5_PAD_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
281 { EXYNOS5_PAD_ISOLATION_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
282 { EXYNOS5_PAD_ALV_SEL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
283 { EXYNOS5_XUSBXTI_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
284 { EXYNOS5_XXTI_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
285 { EXYNOS5_EXT_REGULATOR_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
286 { EXYNOS5_GPIO_MODE_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
287 { EXYNOS5_GPIO_MODE_SYSMEM_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
288 { EXYNOS5_GPIO_MODE_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
289 { EXYNOS5_TOP_ASB_RESET_SYS_PWR_REG, { 0x1, 0x1, 0x1} },
290 { EXYNOS5_TOP_ASB_ISOLATION_SYS_PWR_REG, { 0x1, 0x0, 0x1} },
291 { EXYNOS5_GSCL_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
292 { EXYNOS5_ISP_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
293 { EXYNOS5_MFC_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
294 { EXYNOS5_G3D_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
295 { EXYNOS5_DISP1_SYS_PWR_REG, { 0x7, 0x0, 0x0} },
296 { EXYNOS5_MAU_SYS_PWR_REG, { 0x7, 0x7, 0x0} },
297 { EXYNOS5_CMU_CLKSTOP_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
298 { EXYNOS5_CMU_CLKSTOP_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
299 { EXYNOS5_CMU_CLKSTOP_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
300 { EXYNOS5_CMU_CLKSTOP_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
301 { EXYNOS5_CMU_CLKSTOP_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
302 { EXYNOS5_CMU_CLKSTOP_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
303 { EXYNOS5_CMU_SYSCLK_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
304 { EXYNOS5_CMU_SYSCLK_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
305 { EXYNOS5_CMU_SYSCLK_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
306 { EXYNOS5_CMU_SYSCLK_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
307 { EXYNOS5_CMU_SYSCLK_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
308 { EXYNOS5_CMU_SYSCLK_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
309 { EXYNOS5_CMU_RESET_GSCL_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
310 { EXYNOS5_CMU_RESET_ISP_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
311 { EXYNOS5_CMU_RESET_MFC_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
312 { EXYNOS5_CMU_RESET_G3D_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
313 { EXYNOS5_CMU_RESET_DISP1_SYS_PWR_REG, { 0x1, 0x0, 0x0} },
314 { EXYNOS5_CMU_RESET_MAU_SYS_PWR_REG, { 0x1, 0x1, 0x0} },
315 { PMU_TABLE_END,},
316};
317
318void __iomem *exynos5_list_both_cnt_feed[] = {
319 EXYNOS5_ARM_CORE0_OPTION,
320 EXYNOS5_ARM_CORE1_OPTION,
321 EXYNOS5_ARM_COMMON_OPTION,
322 EXYNOS5_GSCL_OPTION,
323 EXYNOS5_ISP_OPTION,
324 EXYNOS5_MFC_OPTION,
325 EXYNOS5_G3D_OPTION,
326 EXYNOS5_DISP1_OPTION,
327 EXYNOS5_MAU_OPTION,
328 EXYNOS5_TOP_PWR_OPTION,
329 EXYNOS5_TOP_PWR_SYSMEM_OPTION,
330};
331
332void __iomem *exynos5_list_diable_wfi_wfe[] = {
333 EXYNOS5_ARM_CORE1_OPTION,
334 EXYNOS5_FSYS_ARM_OPTION,
335 EXYNOS5_ISP_ARM_OPTION,
336};
337
338static void exynos5_init_pmu(void)
216{ 339{
217 unsigned int i; 340 unsigned int i;
341 unsigned int tmp;
342
343 /*
344 * Enable both SC_FEEDBACK and SC_COUNTER
345 */
346 for (i = 0 ; i < ARRAY_SIZE(exynos5_list_both_cnt_feed) ; i++) {
347 tmp = __raw_readl(exynos5_list_both_cnt_feed[i]);
348 tmp |= (EXYNOS5_USE_SC_FEEDBACK |
349 EXYNOS5_USE_SC_COUNTER);
350 __raw_writel(tmp, exynos5_list_both_cnt_feed[i]);
351 }
352
353 /*
354 * SKIP_DEACTIVATE_ACEACP_IN_PWDN_BITFIELD Enable
355 * MANUAL_L2RSTDISABLE_CONTROL_BITFIELD Enable
356 */
357 tmp = __raw_readl(EXYNOS5_ARM_COMMON_OPTION);
358 tmp |= (EXYNOS5_MANUAL_L2RSTDISABLE_CONTROL |
359 EXYNOS5_SKIP_DEACTIVATE_ACEACP_IN_PWDN);
360 __raw_writel(tmp, EXYNOS5_ARM_COMMON_OPTION);
361
362 /*
363 * Disable WFI/WFE on XXX_OPTION
364 */
365 for (i = 0 ; i < ARRAY_SIZE(exynos5_list_diable_wfi_wfe) ; i++) {
366 tmp = __raw_readl(exynos5_list_diable_wfi_wfe[i]);
367 tmp &= ~(EXYNOS5_OPTION_USE_STANDBYWFE |
368 EXYNOS5_OPTION_USE_STANDBYWFI);
369 __raw_writel(tmp, exynos5_list_diable_wfi_wfe[i]);
370 }
371}
372
373void exynos_sys_powerdown_conf(enum sys_powerdown mode)
374{
375 unsigned int i;
376
377 if (soc_is_exynos5250())
378 exynos5_init_pmu();
218 379
219 for (i = 0; (exynos4_pmu_config[i].reg != PMU_TABLE_END) ; i++) 380 for (i = 0; (exynos_pmu_config[i].reg != PMU_TABLE_END) ; i++)
220 __raw_writel(exynos4_pmu_config[i].val[mode], 381 __raw_writel(exynos_pmu_config[i].val[mode],
221 exynos4_pmu_config[i].reg); 382 exynos_pmu_config[i].reg);
222 383
223 if (soc_is_exynos4412()) { 384 if (soc_is_exynos4412()) {
224 for (i = 0; exynos4412_pmu_config[i].reg != PMU_TABLE_END ; i++) 385 for (i = 0; exynos4412_pmu_config[i].reg != PMU_TABLE_END ; i++)
@@ -227,20 +388,23 @@ void exynos4_sys_powerdown_conf(enum sys_powerdown mode)
227 } 388 }
228} 389}
229 390
230static int __init exynos4_pmu_init(void) 391static int __init exynos_pmu_init(void)
231{ 392{
232 exynos4_pmu_config = exynos4210_pmu_config; 393 exynos_pmu_config = exynos4210_pmu_config;
233 394
234 if (soc_is_exynos4210()) { 395 if (soc_is_exynos4210()) {
235 exynos4_pmu_config = exynos4210_pmu_config; 396 exynos_pmu_config = exynos4210_pmu_config;
236 pr_info("EXYNOS4210 PMU Initialize\n"); 397 pr_info("EXYNOS4210 PMU Initialize\n");
237 } else if (soc_is_exynos4212() || soc_is_exynos4412()) { 398 } else if (soc_is_exynos4212() || soc_is_exynos4412()) {
238 exynos4_pmu_config = exynos4x12_pmu_config; 399 exynos_pmu_config = exynos4x12_pmu_config;
239 pr_info("EXYNOS4x12 PMU Initialize\n"); 400 pr_info("EXYNOS4x12 PMU Initialize\n");
401 } else if (soc_is_exynos5250()) {
402 exynos_pmu_config = exynos5250_pmu_config;
403 pr_info("EXYNOS5250 PMU Initialize\n");
240 } else { 404 } else {
241 pr_info("EXYNOS4: PMU not supported\n"); 405 pr_info("EXYNOS: PMU not supported\n");
242 } 406 }
243 407
244 return 0; 408 return 0;
245} 409}
246arch_initcall(exynos4_pmu_init); 410arch_initcall(exynos_pmu_init);
diff --git a/arch/arm/mach-highbank/Makefile b/arch/arm/mach-highbank/Makefile
index f8437dd238c2..ded4652ada80 100644
--- a/arch/arm/mach-highbank/Makefile
+++ b/arch/arm/mach-highbank/Makefile
@@ -1,4 +1,8 @@
1obj-y := clock.o highbank.o system.o 1obj-y := clock.o highbank.o system.o smc.o
2
3plus_sec := $(call as-instr,.arch_extension sec,+sec)
4AFLAGS_smc.o :=-Wa,-march=armv7-a$(plus_sec)
5
2obj-$(CONFIG_DEBUG_HIGHBANK_UART) += lluart.o 6obj-$(CONFIG_DEBUG_HIGHBANK_UART) += lluart.o
3obj-$(CONFIG_SMP) += platsmp.o 7obj-$(CONFIG_SMP) += platsmp.o
4obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 8obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
diff --git a/arch/arm/mach-highbank/core.h b/arch/arm/mach-highbank/core.h
index d8e2d0be64ac..141ed5171826 100644
--- a/arch/arm/mach-highbank/core.h
+++ b/arch/arm/mach-highbank/core.h
@@ -8,3 +8,4 @@ extern void highbank_lluart_map_io(void);
8static inline void highbank_lluart_map_io(void) {} 8static inline void highbank_lluart_map_io(void) {}
9#endif 9#endif
10 10
11extern void highbank_smc1(int fn, int arg);
diff --git a/arch/arm/mach-highbank/highbank.c b/arch/arm/mach-highbank/highbank.c
index 410a112bb52e..8777612b1a42 100644
--- a/arch/arm/mach-highbank/highbank.c
+++ b/arch/arm/mach-highbank/highbank.c
@@ -85,10 +85,24 @@ const static struct of_device_id irq_match[] = {
85 {} 85 {}
86}; 86};
87 87
88#ifdef CONFIG_CACHE_L2X0
89static void highbank_l2x0_disable(void)
90{
91 /* Disable PL310 L2 Cache controller */
92 highbank_smc1(0x102, 0x0);
93}
94#endif
95
88static void __init highbank_init_irq(void) 96static void __init highbank_init_irq(void)
89{ 97{
90 of_irq_init(irq_match); 98 of_irq_init(irq_match);
99
100#ifdef CONFIG_CACHE_L2X0
101 /* Enable PL310 L2 Cache controller */
102 highbank_smc1(0x102, 0x1);
91 l2x0_of_init(0, ~0UL); 103 l2x0_of_init(0, ~0UL);
104 outer_cache.disable = highbank_l2x0_disable;
105#endif
92} 106}
93 107
94static void __init highbank_timer_init(void) 108static void __init highbank_timer_init(void)
diff --git a/arch/arm/mach-highbank/smc.S b/arch/arm/mach-highbank/smc.S
new file mode 100644
index 000000000000..407d17baaaa9
--- /dev/null
+++ b/arch/arm/mach-highbank/smc.S
@@ -0,0 +1,27 @@
1/*
2 * Copied from omap44xx-smc.S Copyright (C) 2010 Texas Instruments, Inc.
3 * Copyright 2012 Calxeda, Inc.
4 *
5 * This program is free software,you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 */
9
10#include <linux/linkage.h>
11
12/*
13 * This is common routine to manage secure monitor API
14 * used to modify the PL310 secure registers.
15 * 'r0' contains the value to be modified and 'r12' contains
16 * the monitor API number.
17 * Function signature : void highbank_smc1(u32 fn, u32 arg)
18 */
19
20ENTRY(highbank_smc1)
21 stmfd sp!, {r4-r11, lr}
22 mov r12, r0
23 mov r0, r1
24 dsb
25 smc #0
26 ldmfd sp!, {r4-r11, pc}
27ENDPROC(highbank_smc1)
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index 0021f726b153..eff4db5de0dd 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -477,6 +477,7 @@ config MACH_MX31_3DS
477 select IMX_HAVE_PLATFORM_IMX2_WDT 477 select IMX_HAVE_PLATFORM_IMX2_WDT
478 select IMX_HAVE_PLATFORM_IMX_I2C 478 select IMX_HAVE_PLATFORM_IMX_I2C
479 select IMX_HAVE_PLATFORM_IMX_KEYPAD 479 select IMX_HAVE_PLATFORM_IMX_KEYPAD
480 select IMX_HAVE_PLATFORM_IMX_SSI
480 select IMX_HAVE_PLATFORM_IMX_UART 481 select IMX_HAVE_PLATFORM_IMX_UART
481 select IMX_HAVE_PLATFORM_IPU_CORE 482 select IMX_HAVE_PLATFORM_IPU_CORE
482 select IMX_HAVE_PLATFORM_MXC_EHCI 483 select IMX_HAVE_PLATFORM_MXC_EHCI
diff --git a/arch/arm/mach-imx/clk-imx1.c b/arch/arm/mach-imx/clk-imx1.c
index 0f0beb580b73..516ddee1948e 100644
--- a/arch/arm/mach-imx/clk-imx1.c
+++ b/arch/arm/mach-imx/clk-imx1.c
@@ -108,8 +108,7 @@ int __init mx1_clocks_init(unsigned long fref)
108 clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0"); 108 clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0");
109 clk_register_clkdev(clk[clko], "clko", NULL); 109 clk_register_clkdev(clk[clko], "clko", NULL);
110 110
111 mxc_timer_init(NULL, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR), 111 mxc_timer_init(MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR), MX1_TIM1_INT);
112 MX1_TIM1_INT);
113 112
114 return 0; 113 return 0;
115} 114}
diff --git a/arch/arm/mach-imx/clk-imx21.c b/arch/arm/mach-imx/clk-imx21.c
index 4e4f384ee8dd..ea13e61bd5f3 100644
--- a/arch/arm/mach-imx/clk-imx21.c
+++ b/arch/arm/mach-imx/clk-imx21.c
@@ -180,7 +180,7 @@ int __init mx21_clocks_init(unsigned long lref, unsigned long href)
180 clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL); 180 clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL);
181 clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL); 181 clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL);
182 182
183 mxc_timer_init(NULL, MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR), 183 mxc_timer_init(MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR), MX21_INT_GPT1);
184 MX21_INT_GPT1); 184
185 return 0; 185 return 0;
186} 186}
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
index d9833bb5fd61..fdd8cc87c9fe 100644
--- a/arch/arm/mach-imx/clk-imx25.c
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -243,6 +243,6 @@ int __init mx25_clocks_init(void)
243 clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma"); 243 clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
244 clk_register_clkdev(clk[iim_ipg], "iim", NULL); 244 clk_register_clkdev(clk[iim_ipg], "iim", NULL);
245 245
246 mxc_timer_init(NULL, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54); 246 mxc_timer_init(MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
247 return 0; 247 return 0;
248} 248}
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
index 50a7ebd8d1b2..295cbd7c08dc 100644
--- a/arch/arm/mach-imx/clk-imx27.c
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -263,8 +263,7 @@ int __init mx27_clocks_init(unsigned long fref)
263 clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0"); 263 clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0");
264 clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1"); 264 clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1");
265 265
266 mxc_timer_init(NULL, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), 266 mxc_timer_init(MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR), MX27_INT_GPT1);
267 MX27_INT_GPT1);
268 267
269 clk_prepare_enable(clk[emi_ahb_gate]); 268 clk_prepare_enable(clk[emi_ahb_gate]);
270 269
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c
index a854b9cae5ea..c9a06d800f8e 100644
--- a/arch/arm/mach-imx/clk-imx31.c
+++ b/arch/arm/mach-imx/clk-imx31.c
@@ -175,8 +175,7 @@ int __init mx31_clocks_init(unsigned long fref)
175 mx31_revision(); 175 mx31_revision();
176 clk_disable_unprepare(clk[iim_gate]); 176 clk_disable_unprepare(clk[iim_gate]);
177 177
178 mxc_timer_init(NULL, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), 178 mxc_timer_init(MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR), MX31_INT_GPT);
179 MX31_INT_GPT);
180 179
181 return 0; 180 return 0;
182} 181}
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
index a9e60bf7dd75..920a8cc42726 100644
--- a/arch/arm/mach-imx/clk-imx35.c
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -267,11 +267,9 @@ int __init mx35_clocks_init()
267 imx_print_silicon_rev("i.MX35", mx35_revision()); 267 imx_print_silicon_rev("i.MX35", mx35_revision());
268 268
269#ifdef CONFIG_MXC_USE_EPIT 269#ifdef CONFIG_MXC_USE_EPIT
270 epit_timer_init(&epit1_clk, 270 epit_timer_init(MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
271 MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
272#else 271#else
273 mxc_timer_init(NULL, MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), 272 mxc_timer_init(MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
274 MX35_INT_GPT);
275#endif 273#endif
276 274
277 return 0; 275 return 0;
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
index fcd94f3b0f0e..a2200c77bf70 100644
--- a/arch/arm/mach-imx/clk-imx51-imx53.c
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -104,12 +104,12 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
104 periph_apm_sel, ARRAY_SIZE(periph_apm_sel)); 104 periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
105 clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1, 105 clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
106 main_bus_sel, ARRAY_SIZE(main_bus_sel)); 106 main_bus_sel, ARRAY_SIZE(main_bus_sel));
107 clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCDR, 1, 1, 107 clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCMR, 1, 1,
108 per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel)); 108 per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
109 clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2); 109 clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
110 clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3); 110 clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
111 clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3); 111 clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
112 clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCDR, 1, 0, 112 clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCMR, 0, 1,
113 per_root_sel, ARRAY_SIZE(per_root_sel)); 113 per_root_sel, ARRAY_SIZE(per_root_sel));
114 clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3); 114 clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
115 clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28); 115 clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
@@ -172,7 +172,7 @@ static void __init mx5_clocks_common_init(unsigned long rate_ckil,
172 clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12); 172 clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12);
173 clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14); 173 clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
174 clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16); 174 clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16);
175 clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", MXC_CCM_CCGR2, 18); 175 clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per_root", MXC_CCM_CCGR2, 18);
176 clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24); 176 clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
177 clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26); 177 clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
178 clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28); 178 clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
@@ -366,8 +366,7 @@ int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
366 clk_set_rate(clk[esdhc_b_podf], 166250000); 366 clk_set_rate(clk[esdhc_b_podf], 166250000);
367 367
368 /* System timer */ 368 /* System timer */
369 mxc_timer_init(NULL, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), 369 mxc_timer_init(MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR), MX51_INT_GPT);
370 MX51_INT_GPT);
371 370
372 clk_prepare_enable(clk[iim_gate]); 371 clk_prepare_enable(clk[iim_gate]);
373 imx_print_silicon_rev("i.MX51", mx51_revision()); 372 imx_print_silicon_rev("i.MX51", mx51_revision());
@@ -452,8 +451,7 @@ int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
452 clk_set_rate(clk[esdhc_b_podf], 200000000); 451 clk_set_rate(clk[esdhc_b_podf], 200000000);
453 452
454 /* System timer */ 453 /* System timer */
455 mxc_timer_init(NULL, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), 454 mxc_timer_init(MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR), MX53_INT_GPT);
456 MX53_INT_GPT);
457 455
458 clk_prepare_enable(clk[iim_gate]); 456 clk_prepare_enable(clk[iim_gate]);
459 imx_print_silicon_rev("i.MX53", mx53_revision()); 457 imx_print_silicon_rev("i.MX53", mx53_revision());
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
index cab02d0a15d6..17dc66a085a5 100644
--- a/arch/arm/mach-imx/clk-imx6q.c
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -122,10 +122,6 @@ static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5
122 "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0", 122 "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
123 "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", }; 123 "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", };
124 124
125static const char * const clks_init_on[] __initconst = {
126 "mmdc_ch0_axi", "mmdc_ch1_axi", "usboh3",
127};
128
129enum mx6q_clks { 125enum mx6q_clks {
130 dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m, 126 dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
131 pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m, 127 pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m,
@@ -161,11 +157,14 @@ enum mx6q_clks {
161 157
162static struct clk *clk[clk_max]; 158static struct clk *clk[clk_max];
163 159
160static enum mx6q_clks const clks_init_on[] __initconst = {
161 mmdc_ch0_axi, mmdc_ch1_axi,
162};
163
164int __init mx6q_clocks_init(void) 164int __init mx6q_clocks_init(void)
165{ 165{
166 struct device_node *np; 166 struct device_node *np;
167 void __iomem *base; 167 void __iomem *base;
168 struct clk *c;
169 int i, irq; 168 int i, irq;
170 169
171 clk[dummy] = imx_clk_fixed("dummy", 0); 170 clk[dummy] = imx_clk_fixed("dummy", 0);
@@ -424,21 +423,14 @@ int __init mx6q_clocks_init(void)
424 clk_register_clkdev(clk[ahb], "ahb", NULL); 423 clk_register_clkdev(clk[ahb], "ahb", NULL);
425 clk_register_clkdev(clk[cko1], "cko1", NULL); 424 clk_register_clkdev(clk[cko1], "cko1", NULL);
426 425
427 for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) { 426 for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
428 c = clk_get_sys(clks_init_on[i], NULL); 427 clk_prepare_enable(clk[clks_init_on[i]]);
429 if (IS_ERR(c)) {
430 pr_err("%s: failed to get clk %s", __func__,
431 clks_init_on[i]);
432 return PTR_ERR(c);
433 }
434 clk_prepare_enable(c);
435 }
436 428
437 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt"); 429 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
438 base = of_iomap(np, 0); 430 base = of_iomap(np, 0);
439 WARN_ON(!base); 431 WARN_ON(!base);
440 irq = irq_of_parse_and_map(np, 0); 432 irq = irq_of_parse_and_map(np, 0);
441 mxc_timer_init(NULL, base, irq); 433 mxc_timer_init(base, irq);
442 434
443 return 0; 435 return 0;
444} 436}
diff --git a/arch/arm/mach-imx/clk-pllv2.c b/arch/arm/mach-imx/clk-pllv2.c
index 4685919deb63..0440379e3628 100644
--- a/arch/arm/mach-imx/clk-pllv2.c
+++ b/arch/arm/mach-imx/clk-pllv2.c
@@ -74,30 +74,15 @@ struct clk_pllv2 {
74 void __iomem *base; 74 void __iomem *base;
75}; 75};
76 76
77static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw, 77static unsigned long __clk_pllv2_recalc_rate(unsigned long parent_rate,
78 unsigned long parent_rate) 78 u32 dp_ctl, u32 dp_op, u32 dp_mfd, u32 dp_mfn)
79{ 79{
80 long mfi, mfn, mfd, pdf, ref_clk, mfn_abs; 80 long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
81 unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl; 81 unsigned long dbl;
82 void __iomem *pllbase;
83 s64 temp; 82 s64 temp;
84 struct clk_pllv2 *pll = to_clk_pllv2(hw);
85
86 pllbase = pll->base;
87 83
88 dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
89 pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
90 dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN; 84 dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
91 85
92 if (pll_hfsm == 0) {
93 dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
94 dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
95 dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
96 } else {
97 dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
98 dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
99 dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
100 }
101 pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK; 86 pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
102 mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET; 87 mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
103 mfi = (mfi <= 5) ? 5 : mfi; 88 mfi = (mfi <= 5) ? 5 : mfi;
@@ -123,18 +108,30 @@ static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
123 return temp; 108 return temp;
124} 109}
125 110
126static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate, 111static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
127 unsigned long parent_rate) 112 unsigned long parent_rate)
128{ 113{
114 u32 dp_op, dp_mfd, dp_mfn, dp_ctl;
115 void __iomem *pllbase;
129 struct clk_pllv2 *pll = to_clk_pllv2(hw); 116 struct clk_pllv2 *pll = to_clk_pllv2(hw);
117
118 pllbase = pll->base;
119
120 dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
121 dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
122 dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
123 dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
124
125 return __clk_pllv2_recalc_rate(parent_rate, dp_ctl, dp_op, dp_mfd, dp_mfn);
126}
127
128static int __clk_pllv2_set_rate(unsigned long rate, unsigned long parent_rate,
129 u32 *dp_op, u32 *dp_mfd, u32 *dp_mfn)
130{
130 u32 reg; 131 u32 reg;
131 void __iomem *pllbase;
132 long mfi, pdf, mfn, mfd = 999999; 132 long mfi, pdf, mfn, mfd = 999999;
133 s64 temp64; 133 s64 temp64;
134 unsigned long quad_parent_rate; 134 unsigned long quad_parent_rate;
135 unsigned long pll_hfsm, dp_ctl;
136
137 pllbase = pll->base;
138 135
139 quad_parent_rate = 4 * parent_rate; 136 quad_parent_rate = 4 * parent_rate;
140 pdf = mfi = -1; 137 pdf = mfi = -1;
@@ -144,25 +141,41 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
144 return -EINVAL; 141 return -EINVAL;
145 pdf--; 142 pdf--;
146 143
147 temp64 = rate * (pdf+1) - quad_parent_rate * mfi; 144 temp64 = rate * (pdf + 1) - quad_parent_rate * mfi;
148 do_div(temp64, quad_parent_rate/1000000); 145 do_div(temp64, quad_parent_rate / 1000000);
149 mfn = (long)temp64; 146 mfn = (long)temp64;
150 147
148 reg = mfi << 4 | pdf;
149
150 *dp_op = reg;
151 *dp_mfd = mfd;
152 *dp_mfn = mfn;
153
154 return 0;
155}
156
157static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
158 unsigned long parent_rate)
159{
160 struct clk_pllv2 *pll = to_clk_pllv2(hw);
161 void __iomem *pllbase;
162 u32 dp_ctl, dp_op, dp_mfd, dp_mfn;
163 int ret;
164
165 pllbase = pll->base;
166
167
168 ret = __clk_pllv2_set_rate(rate, parent_rate, &dp_op, &dp_mfd, &dp_mfn);
169 if (ret)
170 return ret;
171
151 dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL); 172 dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
152 /* use dpdck0_2 */ 173 /* use dpdck0_2 */
153 __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL); 174 __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
154 pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM; 175
155 if (pll_hfsm == 0) { 176 __raw_writel(dp_op, pllbase + MXC_PLL_DP_OP);
156 reg = mfi << 4 | pdf; 177 __raw_writel(dp_mfd, pllbase + MXC_PLL_DP_MFD);
157 __raw_writel(reg, pllbase + MXC_PLL_DP_OP); 178 __raw_writel(dp_mfn, pllbase + MXC_PLL_DP_MFN);
158 __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
159 __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
160 } else {
161 reg = mfi << 4 | pdf;
162 __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
163 __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
164 __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
165 }
166 179
167 return 0; 180 return 0;
168} 181}
@@ -170,7 +183,11 @@ static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
170static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate, 183static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
171 unsigned long *prate) 184 unsigned long *prate)
172{ 185{
173 return rate; 186 u32 dp_op, dp_mfd, dp_mfn;
187
188 __clk_pllv2_set_rate(rate, *prate, &dp_op, &dp_mfd, &dp_mfn);
189 return __clk_pllv2_recalc_rate(*prate, MXC_PLL_DP_CTL_DPDCK0_2_EN,
190 dp_op, dp_mfd, dp_mfn);
174} 191}
175 192
176static int clk_pllv2_prepare(struct clk_hw *hw) 193static int clk_pllv2_prepare(struct clk_hw *hw)
diff --git a/arch/arm/mach-imx/crm-regs-imx5.h b/arch/arm/mach-imx/crm-regs-imx5.h
index 5e11ba7daee2..5e3f1f0f4cab 100644
--- a/arch/arm/mach-imx/crm-regs-imx5.h
+++ b/arch/arm/mach-imx/crm-regs-imx5.h
@@ -23,7 +23,7 @@
23#define MX53_DPLL1_BASE MX53_IO_ADDRESS(MX53_PLL1_BASE_ADDR) 23#define MX53_DPLL1_BASE MX53_IO_ADDRESS(MX53_PLL1_BASE_ADDR)
24#define MX53_DPLL2_BASE MX53_IO_ADDRESS(MX53_PLL2_BASE_ADDR) 24#define MX53_DPLL2_BASE MX53_IO_ADDRESS(MX53_PLL2_BASE_ADDR)
25#define MX53_DPLL3_BASE MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR) 25#define MX53_DPLL3_BASE MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR)
26#define MX53_DPLL4_BASE MX53_IO_ADDRESS(MX53_PLL3_BASE_ADDR) 26#define MX53_DPLL4_BASE MX53_IO_ADDRESS(MX53_PLL4_BASE_ADDR)
27 27
28/* PLL Register Offsets */ 28/* PLL Register Offsets */
29#define MXC_PLL_DP_CTL 0x00 29#define MXC_PLL_DP_CTL 0x00
diff --git a/arch/arm/mach-imx/hotplug.c b/arch/arm/mach-imx/hotplug.c
index 89493abd497c..20ed2d56c1af 100644
--- a/arch/arm/mach-imx/hotplug.c
+++ b/arch/arm/mach-imx/hotplug.c
@@ -12,6 +12,7 @@
12 12
13#include <linux/errno.h> 13#include <linux/errno.h>
14#include <asm/cacheflush.h> 14#include <asm/cacheflush.h>
15#include <asm/cp15.h>
15#include <mach/common.h> 16#include <mach/common.h>
16 17
17int platform_cpu_kill(unsigned int cpu) 18int platform_cpu_kill(unsigned int cpu)
@@ -19,6 +20,44 @@ int platform_cpu_kill(unsigned int cpu)
19 return 1; 20 return 1;
20} 21}
21 22
23static inline void cpu_enter_lowpower(void)
24{
25 unsigned int v;
26
27 flush_cache_all();
28 asm volatile(
29 "mcr p15, 0, %1, c7, c5, 0\n"
30 " mcr p15, 0, %1, c7, c10, 4\n"
31 /*
32 * Turn off coherency
33 */
34 " mrc p15, 0, %0, c1, c0, 1\n"
35 " bic %0, %0, %3\n"
36 " mcr p15, 0, %0, c1, c0, 1\n"
37 " mrc p15, 0, %0, c1, c0, 0\n"
38 " bic %0, %0, %2\n"
39 " mcr p15, 0, %0, c1, c0, 0\n"
40 : "=&r" (v)
41 : "r" (0), "Ir" (CR_C), "Ir" (0x40)
42 : "cc");
43}
44
45static inline void cpu_leave_lowpower(void)
46{
47 unsigned int v;
48
49 asm volatile(
50 "mrc p15, 0, %0, c1, c0, 0\n"
51 " orr %0, %0, %1\n"
52 " mcr p15, 0, %0, c1, c0, 0\n"
53 " mrc p15, 0, %0, c1, c0, 1\n"
54 " orr %0, %0, %2\n"
55 " mcr p15, 0, %0, c1, c0, 1\n"
56 : "=&r" (v)
57 : "Ir" (CR_C), "Ir" (0x40)
58 : "cc");
59}
60
22/* 61/*
23 * platform-specific code to shutdown a CPU 62 * platform-specific code to shutdown a CPU
24 * 63 *
@@ -26,9 +65,10 @@ int platform_cpu_kill(unsigned int cpu)
26 */ 65 */
27void platform_cpu_die(unsigned int cpu) 66void platform_cpu_die(unsigned int cpu)
28{ 67{
29 flush_cache_all(); 68 cpu_enter_lowpower();
30 imx_enable_cpu(cpu, false); 69 imx_enable_cpu(cpu, false);
31 cpu_do_idle(); 70 cpu_do_idle();
71 cpu_leave_lowpower();
32 72
33 /* We should never return from idle */ 73 /* We should never return from idle */
34 panic("cpu %d unexpectedly exit from shutdown\n", cpu); 74 panic("cpu %d unexpectedly exit from shutdown\n", cpu);
diff --git a/arch/arm/mach-imx/imx27-dt.c b/arch/arm/mach-imx/imx27-dt.c
index ed38d03c61f2..eee0cc8d92a4 100644
--- a/arch/arm/mach-imx/imx27-dt.c
+++ b/arch/arm/mach-imx/imx27-dt.c
@@ -29,6 +29,7 @@ static const struct of_dev_auxdata imx27_auxdata_lookup[] __initconst = {
29 OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI2_BASE_ADDR, "imx27-cspi.1", NULL), 29 OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI2_BASE_ADDR, "imx27-cspi.1", NULL),
30 OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI3_BASE_ADDR, "imx27-cspi.2", NULL), 30 OF_DEV_AUXDATA("fsl,imx27-cspi", MX27_CSPI3_BASE_ADDR, "imx27-cspi.2", NULL),
31 OF_DEV_AUXDATA("fsl,imx27-wdt", MX27_WDOG_BASE_ADDR, "imx2-wdt.0", NULL), 31 OF_DEV_AUXDATA("fsl,imx27-wdt", MX27_WDOG_BASE_ADDR, "imx2-wdt.0", NULL),
32 OF_DEV_AUXDATA("fsl,imx27-nand", MX27_NFC_BASE_ADDR, "mxc_nand.0", NULL),
32 { /* sentinel */ } 33 { /* sentinel */ }
33}; 34};
34 35
diff --git a/arch/arm/mach-imx/mach-cpuimx35.c b/arch/arm/mach-imx/mach-cpuimx35.c
index c515f8ede1a1..6450303f1a7a 100644
--- a/arch/arm/mach-imx/mach-cpuimx35.c
+++ b/arch/arm/mach-imx/mach-cpuimx35.c
@@ -70,7 +70,6 @@ static struct i2c_board_info eukrea_cpuimx35_i2c_devices[] = {
70 I2C_BOARD_INFO("pcf8563", 0x51), 70 I2C_BOARD_INFO("pcf8563", 0x51),
71 }, { 71 }, {
72 I2C_BOARD_INFO("tsc2007", 0x48), 72 I2C_BOARD_INFO("tsc2007", 0x48),
73 .type = "tsc2007",
74 .platform_data = &tsc2007_info, 73 .platform_data = &tsc2007_info,
75 .irq = IMX_GPIO_TO_IRQ(TSC2007_IRQGPIO), 74 .irq = IMX_GPIO_TO_IRQ(TSC2007_IRQGPIO),
76 }, 75 },
diff --git a/arch/arm/mach-imx/mach-cpuimx51sd.c b/arch/arm/mach-imx/mach-cpuimx51sd.c
index ac50f1671e38..1e09de50cbcd 100644
--- a/arch/arm/mach-imx/mach-cpuimx51sd.c
+++ b/arch/arm/mach-imx/mach-cpuimx51sd.c
@@ -142,7 +142,6 @@ static struct i2c_board_info eukrea_cpuimx51sd_i2c_devices[] = {
142 I2C_BOARD_INFO("pcf8563", 0x51), 142 I2C_BOARD_INFO("pcf8563", 0x51),
143 }, { 143 }, {
144 I2C_BOARD_INFO("tsc2007", 0x49), 144 I2C_BOARD_INFO("tsc2007", 0x49),
145 .type = "tsc2007",
146 .platform_data = &tsc2007_info, 145 .platform_data = &tsc2007_info,
147 }, 146 },
148}; 147};
diff --git a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
index dff82eb57cd9..f76edb96a48a 100644
--- a/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
+++ b/arch/arm/mach-imx/mach-imx27_visstrim_m10.c
@@ -116,6 +116,8 @@ static const int visstrim_m10_pins[] __initconst = {
116 PB23_PF_USB_PWR, 116 PB23_PF_USB_PWR,
117 PB24_PF_USB_OC, 117 PB24_PF_USB_OC,
118 /* CSI */ 118 /* CSI */
119 TVP5150_RSTN | GPIO_GPIO | GPIO_OUT,
120 TVP5150_PWDN | GPIO_GPIO | GPIO_OUT,
119 PB10_PF_CSI_D0, 121 PB10_PF_CSI_D0,
120 PB11_PF_CSI_D1, 122 PB11_PF_CSI_D1,
121 PB12_PF_CSI_D2, 123 PB12_PF_CSI_D2,
@@ -147,6 +149,24 @@ static struct gpio visstrim_m10_version_gpios[] = {
147 { MOTHERBOARD_BIT2, GPIOF_IN, "mother-version-2" }, 149 { MOTHERBOARD_BIT2, GPIOF_IN, "mother-version-2" },
148}; 150};
149 151
152static const struct gpio visstrim_m10_gpios[] __initconst = {
153 {
154 .gpio = TVP5150_RSTN,
155 .flags = GPIOF_DIR_OUT | GPIOF_INIT_HIGH,
156 .label = "tvp5150_rstn",
157 },
158 {
159 .gpio = TVP5150_PWDN,
160 .flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW,
161 .label = "tvp5150_pwdn",
162 },
163 {
164 .gpio = OTG_PHY_CS_GPIO,
165 .flags = GPIOF_DIR_OUT | GPIOF_INIT_LOW,
166 .label = "usbotg_cs",
167 },
168};
169
150/* Camera */ 170/* Camera */
151static int visstrim_camera_power(struct device *dev, int on) 171static int visstrim_camera_power(struct device *dev, int on)
152{ 172{
@@ -190,13 +210,6 @@ static void __init visstrim_camera_init(void)
190 struct platform_device *pdev; 210 struct platform_device *pdev;
191 int dma; 211 int dma;
192 212
193 /* Initialize tvp5150 gpios */
194 mxc_gpio_mode(TVP5150_RSTN | GPIO_GPIO | GPIO_OUT);
195 mxc_gpio_mode(TVP5150_PWDN | GPIO_GPIO | GPIO_OUT);
196 gpio_set_value(TVP5150_RSTN, 1);
197 gpio_set_value(TVP5150_PWDN, 0);
198 ndelay(1);
199
200 gpio_set_value(TVP5150_PWDN, 1); 213 gpio_set_value(TVP5150_PWDN, 1);
201 ndelay(1); 214 ndelay(1);
202 gpio_set_value(TVP5150_RSTN, 0); 215 gpio_set_value(TVP5150_RSTN, 0);
@@ -377,10 +390,6 @@ static struct i2c_board_info visstrim_m10_i2c_devices[] = {
377/* USB OTG */ 390/* USB OTG */
378static int otg_phy_init(struct platform_device *pdev) 391static int otg_phy_init(struct platform_device *pdev)
379{ 392{
380 gpio_set_value(OTG_PHY_CS_GPIO, 0);
381
382 mdelay(10);
383
384 return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED); 393 return mx27_initialize_usb_hw(pdev->id, MXC_EHCI_POWER_PINS_ENABLED);
385} 394}
386 395
@@ -435,6 +444,11 @@ static void __init visstrim_m10_board_init(void)
435 if (ret) 444 if (ret)
436 pr_err("Failed to setup pins (%d)\n", ret); 445 pr_err("Failed to setup pins (%d)\n", ret);
437 446
447 ret = gpio_request_array(visstrim_m10_gpios,
448 ARRAY_SIZE(visstrim_m10_gpios));
449 if (ret)
450 pr_err("Failed to request gpios (%d)\n", ret);
451
438 imx27_add_imx_ssi(0, &visstrim_m10_ssi_pdata); 452 imx27_add_imx_ssi(0, &visstrim_m10_ssi_pdata);
439 imx27_add_imx_uart0(&uart_pdata); 453 imx27_add_imx_uart0(&uart_pdata);
440 454
diff --git a/arch/arm/mach-imx/mach-mx21ads.c b/arch/arm/mach-imx/mach-mx21ads.c
index d14bbe949a4f..3e7401fca76c 100644
--- a/arch/arm/mach-imx/mach-mx21ads.c
+++ b/arch/arm/mach-imx/mach-mx21ads.c
@@ -32,7 +32,7 @@
32 * Memory-mapped I/O on MX21ADS base board 32 * Memory-mapped I/O on MX21ADS base board
33 */ 33 */
34#define MX21ADS_MMIO_BASE_ADDR 0xf5000000 34#define MX21ADS_MMIO_BASE_ADDR 0xf5000000
35#define MX21ADS_MMIO_SIZE SZ_16M 35#define MX21ADS_MMIO_SIZE 0xc00000
36 36
37#define MX21ADS_REG_ADDR(offset) (void __force __iomem *) \ 37#define MX21ADS_REG_ADDR(offset) (void __force __iomem *) \
38 (MX21ADS_MMIO_BASE_ADDR + (offset)) 38 (MX21ADS_MMIO_BASE_ADDR + (offset))
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 967ed5b35a45..a8983b9778d1 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -86,6 +86,7 @@ static void __iomem *imx3_ioremap_caller(unsigned long phys_addr, size_t size,
86 86
87void __init imx3_init_l2x0(void) 87void __init imx3_init_l2x0(void)
88{ 88{
89#ifdef CONFIG_CACHE_L2X0
89 void __iomem *l2x0_base; 90 void __iomem *l2x0_base;
90 void __iomem *clkctl_base; 91 void __iomem *clkctl_base;
91 92
@@ -115,6 +116,7 @@ void __init imx3_init_l2x0(void)
115 } 116 }
116 117
117 l2x0_init(l2x0_base, 0x00030024, 0x00000000); 118 l2x0_init(l2x0_base, 0x00030024, 0x00000000);
119#endif
118} 120}
119 121
120#ifdef CONFIG_SOC_IMX31 122#ifdef CONFIG_SOC_IMX31
@@ -179,6 +181,8 @@ void __init imx31_soc_init(void)
179 mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0); 181 mxc_register_gpio("imx31-gpio", 1, MX31_GPIO2_BASE_ADDR, SZ_16K, MX31_INT_GPIO2, 0);
180 mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0); 182 mxc_register_gpio("imx31-gpio", 2, MX31_GPIO3_BASE_ADDR, SZ_16K, MX31_INT_GPIO3, 0);
181 183
184 pinctrl_provide_dummies();
185
182 if (to_version == 1) { 186 if (to_version == 1) {
183 strncpy(imx31_sdma_pdata.fw_name, "sdma-imx31-to1.bin", 187 strncpy(imx31_sdma_pdata.fw_name, "sdma-imx31-to1.bin",
184 strlen(imx31_sdma_pdata.fw_name)); 188 strlen(imx31_sdma_pdata.fw_name));
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index feeee17da96b..1d003053d562 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -202,6 +202,8 @@ void __init imx51_soc_init(void)
202 mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_INT_GPIO3_LOW, MX51_INT_GPIO3_HIGH); 202 mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_INT_GPIO3_LOW, MX51_INT_GPIO3_HIGH);
203 mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_INT_GPIO4_LOW, MX51_INT_GPIO4_HIGH); 203 mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_INT_GPIO4_LOW, MX51_INT_GPIO4_HIGH);
204 204
205 pinctrl_provide_dummies();
206
205 /* i.mx51 has the i.mx35 type sdma */ 207 /* i.mx51 has the i.mx35 type sdma */
206 imx_add_imx_sdma("imx35-sdma", MX51_SDMA_BASE_ADDR, MX51_INT_SDMA, &imx51_sdma_pdata); 208 imx_add_imx_sdma("imx35-sdma", MX51_SDMA_BASE_ADDR, MX51_INT_SDMA, &imx51_sdma_pdata);
207 209
diff --git a/arch/arm/mach-ixp4xx/common.c b/arch/arm/mach-ixp4xx/common.c
index ebbd7fc90eb4..a9f80943d01f 100644
--- a/arch/arm/mach-ixp4xx/common.c
+++ b/arch/arm/mach-ixp4xx/common.c
@@ -28,6 +28,7 @@
28#include <linux/clockchips.h> 28#include <linux/clockchips.h>
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/export.h> 30#include <linux/export.h>
31#include <linux/gpio.h>
31 32
32#include <mach/udc.h> 33#include <mach/udc.h>
33#include <mach/hardware.h> 34#include <mach/hardware.h>
@@ -107,7 +108,7 @@ static signed char irq2gpio[32] = {
107 7, 8, 9, 10, 11, 12, -1, -1, 108 7, 8, 9, 10, 11, 12, -1, -1,
108}; 109};
109 110
110int gpio_to_irq(int gpio) 111static int ixp4xx_gpio_to_irq(struct gpio_chip *chip, unsigned gpio)
111{ 112{
112 int irq; 113 int irq;
113 114
@@ -117,7 +118,6 @@ int gpio_to_irq(int gpio)
117 } 118 }
118 return -EINVAL; 119 return -EINVAL;
119} 120}
120EXPORT_SYMBOL(gpio_to_irq);
121 121
122int irq_to_gpio(unsigned int irq) 122int irq_to_gpio(unsigned int irq)
123{ 123{
@@ -383,12 +383,56 @@ static struct platform_device *ixp46x_devices[] __initdata = {
383unsigned long ixp4xx_exp_bus_size; 383unsigned long ixp4xx_exp_bus_size;
384EXPORT_SYMBOL(ixp4xx_exp_bus_size); 384EXPORT_SYMBOL(ixp4xx_exp_bus_size);
385 385
386static int ixp4xx_gpio_direction_input(struct gpio_chip *chip, unsigned gpio)
387{
388 gpio_line_config(gpio, IXP4XX_GPIO_IN);
389
390 return 0;
391}
392
393static int ixp4xx_gpio_direction_output(struct gpio_chip *chip, unsigned gpio,
394 int level)
395{
396 gpio_line_set(gpio, level);
397 gpio_line_config(gpio, IXP4XX_GPIO_OUT);
398
399 return 0;
400}
401
402static int ixp4xx_gpio_get_value(struct gpio_chip *chip, unsigned gpio)
403{
404 int value;
405
406 gpio_line_get(gpio, &value);
407
408 return value;
409}
410
411static void ixp4xx_gpio_set_value(struct gpio_chip *chip, unsigned gpio,
412 int value)
413{
414 gpio_line_set(gpio, value);
415}
416
417static struct gpio_chip ixp4xx_gpio_chip = {
418 .label = "IXP4XX_GPIO_CHIP",
419 .direction_input = ixp4xx_gpio_direction_input,
420 .direction_output = ixp4xx_gpio_direction_output,
421 .get = ixp4xx_gpio_get_value,
422 .set = ixp4xx_gpio_set_value,
423 .to_irq = ixp4xx_gpio_to_irq,
424 .base = 0,
425 .ngpio = 16,
426};
427
386void __init ixp4xx_sys_init(void) 428void __init ixp4xx_sys_init(void)
387{ 429{
388 ixp4xx_exp_bus_size = SZ_16M; 430 ixp4xx_exp_bus_size = SZ_16M;
389 431
390 platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices)); 432 platform_add_devices(ixp4xx_devices, ARRAY_SIZE(ixp4xx_devices));
391 433
434 gpiochip_add(&ixp4xx_gpio_chip);
435
392 if (cpu_is_ixp46x()) { 436 if (cpu_is_ixp46x()) {
393 int region; 437 int region;
394 438
diff --git a/arch/arm/mach-ixp4xx/include/mach/gpio.h b/arch/arm/mach-ixp4xx/include/mach/gpio.h
index 83d6b4ed60bb..ef37f2635b0e 100644
--- a/arch/arm/mach-ixp4xx/include/mach/gpio.h
+++ b/arch/arm/mach-ixp4xx/include/mach/gpio.h
@@ -1,79 +1,2 @@
1/* 1/* empty */
2 * arch/arm/mach-ixp4xx/include/mach/gpio.h
3 *
4 * IXP4XX GPIO wrappers for arch-neutral GPIO calls
5 *
6 * Written by Milan Svoboda <msvoboda@ra.rockwell.com>
7 * Based on PXA implementation by Philipp Zabel <philipp.zabel@gmail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 */
24
25#ifndef __ASM_ARCH_IXP4XX_GPIO_H
26#define __ASM_ARCH_IXP4XX_GPIO_H
27
28#include <linux/kernel.h>
29#include <mach/hardware.h>
30
31#define __ARM_GPIOLIB_COMPLEX
32
33static inline int gpio_request(unsigned gpio, const char *label)
34{
35 return 0;
36}
37
38static inline void gpio_free(unsigned gpio)
39{
40 might_sleep();
41
42 return;
43}
44
45static inline int gpio_direction_input(unsigned gpio)
46{
47 gpio_line_config(gpio, IXP4XX_GPIO_IN);
48 return 0;
49}
50
51static inline int gpio_direction_output(unsigned gpio, int level)
52{
53 gpio_line_set(gpio, level);
54 gpio_line_config(gpio, IXP4XX_GPIO_OUT);
55 return 0;
56}
57
58static inline int gpio_get_value(unsigned gpio)
59{
60 int value;
61
62 gpio_line_get(gpio, &value);
63
64 return value;
65}
66
67static inline void gpio_set_value(unsigned gpio, int value)
68{
69 gpio_line_set(gpio, value);
70}
71
72#include <asm-generic/gpio.h> /* cansleep wrappers */
73
74extern int gpio_to_irq(int gpio);
75#define gpio_to_irq gpio_to_irq
76extern int irq_to_gpio(unsigned int irq);
77
78#endif
79 2
diff --git a/arch/arm/mach-ixp4xx/ixdp425-setup.c b/arch/arm/mach-ixp4xx/ixdp425-setup.c
index 3d742aee1773..108a9d3f382d 100644
--- a/arch/arm/mach-ixp4xx/ixdp425-setup.c
+++ b/arch/arm/mach-ixp4xx/ixdp425-setup.c
@@ -60,8 +60,6 @@ static struct platform_device ixdp425_flash = {
60#if defined(CONFIG_MTD_NAND_PLATFORM) || \ 60#if defined(CONFIG_MTD_NAND_PLATFORM) || \
61 defined(CONFIG_MTD_NAND_PLATFORM_MODULE) 61 defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
62 62
63const char *part_probes[] = { "cmdlinepart", NULL };
64
65static struct mtd_partition ixdp425_partitions[] = { 63static struct mtd_partition ixdp425_partitions[] = {
66 { 64 {
67 .name = "ixp400 NAND FS 0", 65 .name = "ixp400 NAND FS 0",
@@ -100,8 +98,6 @@ static struct platform_nand_data ixdp425_flash_nand_data = {
100 .chip = { 98 .chip = {
101 .nr_chips = 1, 99 .nr_chips = 1,
102 .chip_delay = 30, 100 .chip_delay = 30,
103 .options = NAND_NO_AUTOINCR,
104 .part_probe_types = part_probes,
105 .partitions = ixdp425_partitions, 101 .partitions = ixdp425_partitions,
106 .nr_partitions = ARRAY_SIZE(ixdp425_partitions), 102 .nr_partitions = ARRAY_SIZE(ixdp425_partitions),
107 }, 103 },
diff --git a/arch/arm/mach-kirkwood/board-iconnect.c b/arch/arm/mach-kirkwood/board-iconnect.c
index 2222c5739519..b0d3cc49269d 100644
--- a/arch/arm/mach-kirkwood/board-iconnect.c
+++ b/arch/arm/mach-kirkwood/board-iconnect.c
@@ -20,9 +20,6 @@
20#include <linux/mv643xx_eth.h> 20#include <linux/mv643xx_eth.h>
21#include <linux/gpio.h> 21#include <linux/gpio.h>
22#include <linux/leds.h> 22#include <linux/leds.h>
23#include <linux/spi/flash.h>
24#include <linux/spi/spi.h>
25#include <linux/spi/orion_spi.h>
26#include <linux/i2c.h> 23#include <linux/i2c.h>
27#include <linux/input.h> 24#include <linux/input.h>
28#include <linux/gpio_keys.h> 25#include <linux/gpio_keys.h>
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 25fb3fd418ef..f261cd242643 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -159,6 +159,7 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
159 gate_fn->gate.flags = clk_gate_flags; 159 gate_fn->gate.flags = clk_gate_flags;
160 gate_fn->gate.lock = lock; 160 gate_fn->gate.lock = lock;
161 gate_fn->gate.hw.init = &init; 161 gate_fn->gate.hw.init = &init;
162 gate_fn->fn = fn;
162 163
163 /* ops is the gate ops, but with our disable function */ 164 /* ops is the gate ops, but with our disable function */
164 if (clk_gate_fn_ops.disable != clk_gate_fn_disable) { 165 if (clk_gate_fn_ops.disable != clk_gate_fn_disable) {
@@ -193,9 +194,11 @@ static struct clk __init *kirkwood_register_gate_fn(const char *name,
193 bit_idx, 0, &gating_lock, fn); 194 bit_idx, 0, &gating_lock, fn);
194} 195}
195 196
197static struct clk *ge0, *ge1;
198
196void __init kirkwood_clk_init(void) 199void __init kirkwood_clk_init(void)
197{ 200{
198 struct clk *runit, *ge0, *ge1, *sata0, *sata1, *usb0, *sdio; 201 struct clk *runit, *sata0, *sata1, *usb0, *sdio;
199 struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio; 202 struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio;
200 203
201 tclk = clk_register_fixed_rate(NULL, "tclk", NULL, 204 tclk = clk_register_fixed_rate(NULL, "tclk", NULL,
@@ -257,6 +260,9 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
257 orion_ge00_init(eth_data, 260 orion_ge00_init(eth_data,
258 GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM, 261 GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
259 IRQ_KIRKWOOD_GE00_ERR); 262 IRQ_KIRKWOOD_GE00_ERR);
263 /* The interface forgets the MAC address assigned by u-boot if
264 the clock is turned off, so claim the clk now. */
265 clk_prepare_enable(ge0);
260} 266}
261 267
262 268
@@ -268,6 +274,7 @@ void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
268 orion_ge01_init(eth_data, 274 orion_ge01_init(eth_data,
269 GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM, 275 GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
270 IRQ_KIRKWOOD_GE01_ERR); 276 IRQ_KIRKWOOD_GE01_ERR);
277 clk_prepare_enable(ge1);
271} 278}
272 279
273 280
diff --git a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
index 3eee37a3b501..a115142f8690 100644
--- a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
+++ b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
@@ -38,6 +38,7 @@
38#define IRQ_MASK_HIGH_OFF 0x0014 38#define IRQ_MASK_HIGH_OFF 0x0014
39 39
40#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300) 40#define TIMER_VIRT_BASE (BRIDGE_VIRT_BASE | 0x0300)
41#define TIMER_PHYS_BASE (BRIDGE_PHYS_BASE | 0x0300)
41 42
42#define L2_CONFIG_REG (BRIDGE_VIRT_BASE | 0x0128) 43#define L2_CONFIG_REG (BRIDGE_VIRT_BASE | 0x0128)
43#define L2_WRITETHROUGH 0x00000010 44#define L2_WRITETHROUGH 0x00000010
diff --git a/arch/arm/mach-kirkwood/include/mach/kirkwood.h b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
index fede3d503efa..c5b68510776b 100644
--- a/arch/arm/mach-kirkwood/include/mach/kirkwood.h
+++ b/arch/arm/mach-kirkwood/include/mach/kirkwood.h
@@ -80,6 +80,7 @@
80#define UART1_VIRT_BASE (DEV_BUS_VIRT_BASE | 0x2100) 80#define UART1_VIRT_BASE (DEV_BUS_VIRT_BASE | 0x2100)
81 81
82#define BRIDGE_VIRT_BASE (KIRKWOOD_REGS_VIRT_BASE | 0x20000) 82#define BRIDGE_VIRT_BASE (KIRKWOOD_REGS_VIRT_BASE | 0x20000)
83#define BRIDGE_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x20000)
83 84
84#define CRYPTO_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x30000) 85#define CRYPTO_PHYS_BASE (KIRKWOOD_REGS_PHYS_BASE | 0x30000)
85 86
diff --git a/arch/arm/mach-mmp/irq.c b/arch/arm/mach-mmp/irq.c
index fcfe0e3bd701..e60c7d98922b 100644
--- a/arch/arm/mach-mmp/irq.c
+++ b/arch/arm/mach-mmp/irq.c
@@ -241,6 +241,7 @@ void __init mmp2_init_icu(void)
241 icu_data[1].clr_mfp_irq_base = IRQ_MMP2_PMIC_BASE; 241 icu_data[1].clr_mfp_irq_base = IRQ_MMP2_PMIC_BASE;
242 icu_data[1].clr_mfp_hwirq = IRQ_MMP2_PMIC - IRQ_MMP2_PMIC_BASE; 242 icu_data[1].clr_mfp_hwirq = IRQ_MMP2_PMIC - IRQ_MMP2_PMIC_BASE;
243 icu_data[1].nr_irqs = 2; 243 icu_data[1].nr_irqs = 2;
244 icu_data[1].cascade_irq = 4;
244 icu_data[1].virq_base = IRQ_MMP2_PMIC_BASE; 245 icu_data[1].virq_base = IRQ_MMP2_PMIC_BASE;
245 icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs, 246 icu_data[1].domain = irq_domain_add_legacy(NULL, icu_data[1].nr_irqs,
246 icu_data[1].virq_base, 0, 247 icu_data[1].virq_base, 0,
@@ -249,6 +250,7 @@ void __init mmp2_init_icu(void)
249 icu_data[2].reg_status = mmp_icu_base + 0x154; 250 icu_data[2].reg_status = mmp_icu_base + 0x154;
250 icu_data[2].reg_mask = mmp_icu_base + 0x16c; 251 icu_data[2].reg_mask = mmp_icu_base + 0x16c;
251 icu_data[2].nr_irqs = 2; 252 icu_data[2].nr_irqs = 2;
253 icu_data[2].cascade_irq = 5;
252 icu_data[2].virq_base = IRQ_MMP2_RTC_BASE; 254 icu_data[2].virq_base = IRQ_MMP2_RTC_BASE;
253 icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs, 255 icu_data[2].domain = irq_domain_add_legacy(NULL, icu_data[2].nr_irqs,
254 icu_data[2].virq_base, 0, 256 icu_data[2].virq_base, 0,
@@ -257,6 +259,7 @@ void __init mmp2_init_icu(void)
257 icu_data[3].reg_status = mmp_icu_base + 0x180; 259 icu_data[3].reg_status = mmp_icu_base + 0x180;
258 icu_data[3].reg_mask = mmp_icu_base + 0x17c; 260 icu_data[3].reg_mask = mmp_icu_base + 0x17c;
259 icu_data[3].nr_irqs = 3; 261 icu_data[3].nr_irqs = 3;
262 icu_data[3].cascade_irq = 9;
260 icu_data[3].virq_base = IRQ_MMP2_KEYPAD_BASE; 263 icu_data[3].virq_base = IRQ_MMP2_KEYPAD_BASE;
261 icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs, 264 icu_data[3].domain = irq_domain_add_legacy(NULL, icu_data[3].nr_irqs,
262 icu_data[3].virq_base, 0, 265 icu_data[3].virq_base, 0,
@@ -265,6 +268,7 @@ void __init mmp2_init_icu(void)
265 icu_data[4].reg_status = mmp_icu_base + 0x158; 268 icu_data[4].reg_status = mmp_icu_base + 0x158;
266 icu_data[4].reg_mask = mmp_icu_base + 0x170; 269 icu_data[4].reg_mask = mmp_icu_base + 0x170;
267 icu_data[4].nr_irqs = 5; 270 icu_data[4].nr_irqs = 5;
271 icu_data[4].cascade_irq = 17;
268 icu_data[4].virq_base = IRQ_MMP2_TWSI_BASE; 272 icu_data[4].virq_base = IRQ_MMP2_TWSI_BASE;
269 icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs, 273 icu_data[4].domain = irq_domain_add_legacy(NULL, icu_data[4].nr_irqs,
270 icu_data[4].virq_base, 0, 274 icu_data[4].virq_base, 0,
@@ -273,6 +277,7 @@ void __init mmp2_init_icu(void)
273 icu_data[5].reg_status = mmp_icu_base + 0x15c; 277 icu_data[5].reg_status = mmp_icu_base + 0x15c;
274 icu_data[5].reg_mask = mmp_icu_base + 0x174; 278 icu_data[5].reg_mask = mmp_icu_base + 0x174;
275 icu_data[5].nr_irqs = 15; 279 icu_data[5].nr_irqs = 15;
280 icu_data[5].cascade_irq = 35;
276 icu_data[5].virq_base = IRQ_MMP2_MISC_BASE; 281 icu_data[5].virq_base = IRQ_MMP2_MISC_BASE;
277 icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs, 282 icu_data[5].domain = irq_domain_add_legacy(NULL, icu_data[5].nr_irqs,
278 icu_data[5].virq_base, 0, 283 icu_data[5].virq_base, 0,
@@ -281,6 +286,7 @@ void __init mmp2_init_icu(void)
281 icu_data[6].reg_status = mmp_icu_base + 0x160; 286 icu_data[6].reg_status = mmp_icu_base + 0x160;
282 icu_data[6].reg_mask = mmp_icu_base + 0x178; 287 icu_data[6].reg_mask = mmp_icu_base + 0x178;
283 icu_data[6].nr_irqs = 2; 288 icu_data[6].nr_irqs = 2;
289 icu_data[6].cascade_irq = 51;
284 icu_data[6].virq_base = IRQ_MMP2_MIPI_HSI1_BASE; 290 icu_data[6].virq_base = IRQ_MMP2_MIPI_HSI1_BASE;
285 icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs, 291 icu_data[6].domain = irq_domain_add_legacy(NULL, icu_data[6].nr_irqs,
286 icu_data[6].virq_base, 0, 292 icu_data[6].virq_base, 0,
@@ -289,6 +295,7 @@ void __init mmp2_init_icu(void)
289 icu_data[7].reg_status = mmp_icu_base + 0x188; 295 icu_data[7].reg_status = mmp_icu_base + 0x188;
290 icu_data[7].reg_mask = mmp_icu_base + 0x184; 296 icu_data[7].reg_mask = mmp_icu_base + 0x184;
291 icu_data[7].nr_irqs = 2; 297 icu_data[7].nr_irqs = 2;
298 icu_data[7].cascade_irq = 55;
292 icu_data[7].virq_base = IRQ_MMP2_MIPI_HSI0_BASE; 299 icu_data[7].virq_base = IRQ_MMP2_MIPI_HSI0_BASE;
293 icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs, 300 icu_data[7].domain = irq_domain_add_legacy(NULL, icu_data[7].nr_irqs,
294 icu_data[7].virq_base, 0, 301 icu_data[7].virq_base, 0,
diff --git a/arch/arm/mach-nomadik/board-nhk8815.c b/arch/arm/mach-nomadik/board-nhk8815.c
index 58cacafcf662..2e8d3e176bc7 100644
--- a/arch/arm/mach-nomadik/board-nhk8815.c
+++ b/arch/arm/mach-nomadik/board-nhk8815.c
@@ -111,7 +111,7 @@ static struct nomadik_nand_platform_data nhk8815_nand_data = {
111 .parts = nhk8815_partitions, 111 .parts = nhk8815_partitions,
112 .nparts = ARRAY_SIZE(nhk8815_partitions), 112 .nparts = ARRAY_SIZE(nhk8815_partitions),
113 .options = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING \ 113 .options = NAND_COPYBACK | NAND_CACHEPRG | NAND_NO_PADDING \
114 | NAND_NO_READRDY | NAND_NO_AUTOINCR, 114 | NAND_NO_READRDY,
115 .init = nhk8815_nand_init, 115 .init = nhk8815_nand_init,
116}; 116};
117 117
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index c7364fdbda05..6872f3fd400f 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -192,14 +192,11 @@ static int nand_dev_ready(struct mtd_info *mtd)
192 return gpio_get_value(FSAMPLE_NAND_RB_GPIO_PIN); 192 return gpio_get_value(FSAMPLE_NAND_RB_GPIO_PIN);
193} 193}
194 194
195static const char *part_probes[] = { "cmdlinepart", NULL };
196
197static struct platform_nand_data nand_data = { 195static struct platform_nand_data nand_data = {
198 .chip = { 196 .chip = {
199 .nr_chips = 1, 197 .nr_chips = 1,
200 .chip_offset = 0, 198 .chip_offset = 0,
201 .options = NAND_SAMSUNG_LP_OPTIONS, 199 .options = NAND_SAMSUNG_LP_OPTIONS,
202 .part_probe_types = part_probes,
203 }, 200 },
204 .ctrl = { 201 .ctrl = {
205 .cmd_ctrl = omap1_nand_cmd_ctl, 202 .cmd_ctrl = omap1_nand_cmd_ctl,
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 7e503686f7af..a28e989a63f4 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -186,8 +186,6 @@ static int h2_nand_dev_ready(struct mtd_info *mtd)
186 return gpio_get_value(H2_NAND_RB_GPIO_PIN); 186 return gpio_get_value(H2_NAND_RB_GPIO_PIN);
187} 187}
188 188
189static const char *h2_part_probes[] = { "cmdlinepart", NULL };
190
191static struct platform_nand_data h2_nand_platdata = { 189static struct platform_nand_data h2_nand_platdata = {
192 .chip = { 190 .chip = {
193 .nr_chips = 1, 191 .nr_chips = 1,
@@ -195,7 +193,6 @@ static struct platform_nand_data h2_nand_platdata = {
195 .nr_partitions = ARRAY_SIZE(h2_nand_partitions), 193 .nr_partitions = ARRAY_SIZE(h2_nand_partitions),
196 .partitions = h2_nand_partitions, 194 .partitions = h2_nand_partitions,
197 .options = NAND_SAMSUNG_LP_OPTIONS, 195 .options = NAND_SAMSUNG_LP_OPTIONS,
198 .part_probe_types = h2_part_probes,
199 }, 196 },
200 .ctrl = { 197 .ctrl = {
201 .cmd_ctrl = omap1_nand_cmd_ctl, 198 .cmd_ctrl = omap1_nand_cmd_ctl,
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index 9fb03f189d93..108a8640fc6f 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -188,8 +188,6 @@ static int nand_dev_ready(struct mtd_info *mtd)
188 return gpio_get_value(H3_NAND_RB_GPIO_PIN); 188 return gpio_get_value(H3_NAND_RB_GPIO_PIN);
189} 189}
190 190
191static const char *part_probes[] = { "cmdlinepart", NULL };
192
193static struct platform_nand_data nand_platdata = { 191static struct platform_nand_data nand_platdata = {
194 .chip = { 192 .chip = {
195 .nr_chips = 1, 193 .nr_chips = 1,
@@ -197,7 +195,6 @@ static struct platform_nand_data nand_platdata = {
197 .nr_partitions = ARRAY_SIZE(nand_partitions), 195 .nr_partitions = ARRAY_SIZE(nand_partitions),
198 .partitions = nand_partitions, 196 .partitions = nand_partitions,
199 .options = NAND_SAMSUNG_LP_OPTIONS, 197 .options = NAND_SAMSUNG_LP_OPTIONS,
200 .part_probe_types = part_probes,
201 }, 198 },
202 .ctrl = { 199 .ctrl = {
203 .cmd_ctrl = omap1_nand_cmd_ctl, 200 .cmd_ctrl = omap1_nand_cmd_ctl,
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index f2cb24387c22..703d55ecffe2 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -150,14 +150,11 @@ static int nand_dev_ready(struct mtd_info *mtd)
150 return gpio_get_value(P2_NAND_RB_GPIO_PIN); 150 return gpio_get_value(P2_NAND_RB_GPIO_PIN);
151} 151}
152 152
153static const char *part_probes[] = { "cmdlinepart", NULL };
154
155static struct platform_nand_data nand_data = { 153static struct platform_nand_data nand_data = {
156 .chip = { 154 .chip = {
157 .nr_chips = 1, 155 .nr_chips = 1,
158 .chip_offset = 0, 156 .chip_offset = 0,
159 .options = NAND_SAMSUNG_LP_OPTIONS, 157 .options = NAND_SAMSUNG_LP_OPTIONS,
160 .part_probe_types = part_probes,
161 }, 158 },
162 .ctrl = { 159 .ctrl = {
163 .cmd_ctrl = omap1_nand_cmd_ctl, 160 .cmd_ctrl = omap1_nand_cmd_ctl,
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 8ca14e88a31a..2c5d0ed75285 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -83,11 +83,9 @@ static struct musb_hdrc_config musb_config = {
83}; 83};
84 84
85static struct musb_hdrc_platform_data tusb_data = { 85static struct musb_hdrc_platform_data tusb_data = {
86#if defined(CONFIG_USB_MUSB_OTG) 86#ifdef CONFIG_USB_GADGET_MUSB_HDRC
87 .mode = MUSB_OTG, 87 .mode = MUSB_OTG,
88#elif defined(CONFIG_USB_MUSB_PERIPHERAL) 88#else
89 .mode = MUSB_PERIPHERAL,
90#else /* defined(CONFIG_USB_MUSB_HOST) */
91 .mode = MUSB_HOST, 89 .mode = MUSB_HOST,
92#endif 90#endif
93 .set_power = tusb_set_power, 91 .set_power = tusb_set_power,
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 79c6909eeb78..580fd17208da 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -81,13 +81,13 @@ static u8 omap3_beagle_version;
81static struct { 81static struct {
82 int mmc1_gpio_wp; 82 int mmc1_gpio_wp;
83 int usb_pwr_level; 83 int usb_pwr_level;
84 int reset_gpio; 84 int dvi_pd_gpio;
85 int usr_button_gpio; 85 int usr_button_gpio;
86 int mmc_caps; 86 int mmc_caps;
87} beagle_config = { 87} beagle_config = {
88 .mmc1_gpio_wp = -EINVAL, 88 .mmc1_gpio_wp = -EINVAL,
89 .usb_pwr_level = GPIOF_OUT_INIT_LOW, 89 .usb_pwr_level = GPIOF_OUT_INIT_LOW,
90 .reset_gpio = 129, 90 .dvi_pd_gpio = -EINVAL,
91 .usr_button_gpio = 4, 91 .usr_button_gpio = 4,
92 .mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA, 92 .mmc_caps = MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA,
93}; 93};
@@ -126,21 +126,21 @@ static void __init omap3_beagle_init_rev(void)
126 printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n"); 126 printk(KERN_INFO "OMAP3 Beagle Rev: Ax/Bx\n");
127 omap3_beagle_version = OMAP3BEAGLE_BOARD_AXBX; 127 omap3_beagle_version = OMAP3BEAGLE_BOARD_AXBX;
128 beagle_config.mmc1_gpio_wp = 29; 128 beagle_config.mmc1_gpio_wp = 29;
129 beagle_config.reset_gpio = 170; 129 beagle_config.dvi_pd_gpio = 170;
130 beagle_config.usr_button_gpio = 7; 130 beagle_config.usr_button_gpio = 7;
131 break; 131 break;
132 case 6: 132 case 6:
133 printk(KERN_INFO "OMAP3 Beagle Rev: C1/C2/C3\n"); 133 printk(KERN_INFO "OMAP3 Beagle Rev: C1/C2/C3\n");
134 omap3_beagle_version = OMAP3BEAGLE_BOARD_C1_3; 134 omap3_beagle_version = OMAP3BEAGLE_BOARD_C1_3;
135 beagle_config.mmc1_gpio_wp = 23; 135 beagle_config.mmc1_gpio_wp = 23;
136 beagle_config.reset_gpio = 170; 136 beagle_config.dvi_pd_gpio = 170;
137 beagle_config.usr_button_gpio = 7; 137 beagle_config.usr_button_gpio = 7;
138 break; 138 break;
139 case 5: 139 case 5:
140 printk(KERN_INFO "OMAP3 Beagle Rev: C4\n"); 140 printk(KERN_INFO "OMAP3 Beagle Rev: C4\n");
141 omap3_beagle_version = OMAP3BEAGLE_BOARD_C4; 141 omap3_beagle_version = OMAP3BEAGLE_BOARD_C4;
142 beagle_config.mmc1_gpio_wp = 23; 142 beagle_config.mmc1_gpio_wp = 23;
143 beagle_config.reset_gpio = 170; 143 beagle_config.dvi_pd_gpio = 170;
144 beagle_config.usr_button_gpio = 7; 144 beagle_config.usr_button_gpio = 7;
145 break; 145 break;
146 case 0: 146 case 0:
@@ -274,11 +274,9 @@ static int beagle_twl_gpio_setup(struct device *dev,
274 if (r) 274 if (r)
275 pr_err("%s: unable to configure nDVI_PWR_EN\n", 275 pr_err("%s: unable to configure nDVI_PWR_EN\n",
276 __func__); 276 __func__);
277 r = gpio_request_one(gpio + 2, GPIOF_OUT_INIT_HIGH, 277
278 "DVI_LDO_EN"); 278 beagle_config.dvi_pd_gpio = gpio + 2;
279 if (r) 279
280 pr_err("%s: unable to configure DVI_LDO_EN\n",
281 __func__);
282 } else { 280 } else {
283 /* 281 /*
284 * REVISIT: need ehci-omap hooks for external VBUS 282 * REVISIT: need ehci-omap hooks for external VBUS
@@ -287,7 +285,7 @@ static int beagle_twl_gpio_setup(struct device *dev,
287 if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC")) 285 if (gpio_request_one(gpio + 1, GPIOF_IN, "EHCI_nOC"))
288 pr_err("%s: unable to configure EHCI_nOC\n", __func__); 286 pr_err("%s: unable to configure EHCI_nOC\n", __func__);
289 } 287 }
290 dvi_panel.power_down_gpio = beagle_config.reset_gpio; 288 dvi_panel.power_down_gpio = beagle_config.dvi_pd_gpio;
291 289
292 gpio_request_one(gpio + TWL4030_GPIO_MAX, beagle_config.usb_pwr_level, 290 gpio_request_one(gpio + TWL4030_GPIO_MAX, beagle_config.usb_pwr_level,
293 "nEN_USB_PWR"); 291 "nEN_USB_PWR");
@@ -499,7 +497,7 @@ static void __init omap3_beagle_init(void)
499 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB); 497 omap3_mux_init(board_mux, OMAP_PACKAGE_CBB);
500 omap3_beagle_init_rev(); 498 omap3_beagle_init_rev();
501 499
502 if (beagle_config.mmc1_gpio_wp != -EINVAL) 500 if (gpio_is_valid(beagle_config.mmc1_gpio_wp))
503 omap_mux_init_gpio(beagle_config.mmc1_gpio_wp, OMAP_PIN_INPUT); 501 omap_mux_init_gpio(beagle_config.mmc1_gpio_wp, OMAP_PIN_INPUT);
504 mmc[0].caps = beagle_config.mmc_caps; 502 mmc[0].caps = beagle_config.mmc_caps;
505 omap_hsmmc_init(mmc); 503 omap_hsmmc_init(mmc);
@@ -510,15 +508,13 @@ static void __init omap3_beagle_init(void)
510 508
511 platform_add_devices(omap3_beagle_devices, 509 platform_add_devices(omap3_beagle_devices,
512 ARRAY_SIZE(omap3_beagle_devices)); 510 ARRAY_SIZE(omap3_beagle_devices));
511 if (gpio_is_valid(beagle_config.dvi_pd_gpio))
512 omap_mux_init_gpio(beagle_config.dvi_pd_gpio, OMAP_PIN_OUTPUT);
513 omap_display_init(&beagle_dss_data); 513 omap_display_init(&beagle_dss_data);
514 omap_serial_init(); 514 omap_serial_init();
515 omap_sdrc_init(mt46h32m32lf6_sdrc_params, 515 omap_sdrc_init(mt46h32m32lf6_sdrc_params,
516 mt46h32m32lf6_sdrc_params); 516 mt46h32m32lf6_sdrc_params);
517 517
518 omap_mux_init_gpio(170, OMAP_PIN_INPUT);
519 /* REVISIT leave DVI powered down until it's needed ... */
520 gpio_request_one(170, GPIOF_OUT_INIT_HIGH, "DVI_nPD");
521
522 usb_musb_init(NULL); 518 usb_musb_init(NULL);
523 usbhs_init(&usbhs_bdata); 519 usbhs_init(&usbhs_bdata);
524 omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions, 520 omap_nand_flash_init(NAND_BUSWIDTH_16, omap3beagle_nand_partitions,
diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c
index ff53deccecab..df2534de3361 100644
--- a/arch/arm/mach-omap2/board-rx51-peripherals.c
+++ b/arch/arm/mach-omap2/board-rx51-peripherals.c
@@ -144,7 +144,6 @@ static struct lis3lv02d_platform_data rx51_lis3lv02d_data = {
144 .release_resources = lis302_release, 144 .release_resources = lis302_release,
145 .st_min_limits = {-32, 3, 3}, 145 .st_min_limits = {-32, 3, 3},
146 .st_max_limits = {-3, 32, 32}, 146 .st_max_limits = {-3, 32, 32},
147 .irq2 = OMAP_GPIO_IRQ(LIS302_IRQ2_GPIO),
148}; 147};
149#endif 148#endif
150 149
@@ -1030,7 +1029,6 @@ static struct i2c_board_info __initdata rx51_peripherals_i2c_board_info_3[] = {
1030 { 1029 {
1031 I2C_BOARD_INFO("lis3lv02d", 0x1d), 1030 I2C_BOARD_INFO("lis3lv02d", 0x1d),
1032 .platform_data = &rx51_lis3lv02d_data, 1031 .platform_data = &rx51_lis3lv02d_data,
1033 .irq = OMAP_GPIO_IRQ(LIS302_IRQ1_GPIO),
1034 }, 1032 },
1035#endif 1033#endif
1036}; 1034};
@@ -1056,6 +1054,10 @@ static int __init rx51_i2c_init(void)
1056 omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata); 1054 omap_pmic_init(1, 2200, "twl5030", INT_34XX_SYS_NIRQ, &rx51_twldata);
1057 omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2, 1055 omap_register_i2c_bus(2, 100, rx51_peripherals_i2c_board_info_2,
1058 ARRAY_SIZE(rx51_peripherals_i2c_board_info_2)); 1056 ARRAY_SIZE(rx51_peripherals_i2c_board_info_2));
1057#if defined(CONFIG_SENSORS_LIS3_I2C) || defined(CONFIG_SENSORS_LIS3_I2C_MODULE)
1058 rx51_lis3lv02d_data.irq2 = gpio_to_irq(LIS302_IRQ2_GPIO);
1059 rx51_peripherals_i2c_board_info_3[0].irq = gpio_to_irq(LIS302_IRQ1_GPIO);
1060#endif
1059 omap_register_i2c_bus(3, 400, rx51_peripherals_i2c_board_info_3, 1061 omap_register_i2c_bus(3, 400, rx51_peripherals_i2c_board_info_3,
1060 ARRAY_SIZE(rx51_peripherals_i2c_board_info_3)); 1062 ARRAY_SIZE(rx51_peripherals_i2c_board_info_3));
1061 return 0; 1063 return 0;
diff --git a/arch/arm/mach-omap2/clock3xxx_data.c b/arch/arm/mach-omap2/clock3xxx_data.c
index 4e1a3b0e8cc8..1efdec236ae8 100644
--- a/arch/arm/mach-omap2/clock3xxx_data.c
+++ b/arch/arm/mach-omap2/clock3xxx_data.c
@@ -3514,7 +3514,7 @@ int __init omap3xxx_clk_init(void)
3514 struct omap_clk *c; 3514 struct omap_clk *c;
3515 u32 cpu_clkflg = 0; 3515 u32 cpu_clkflg = 0;
3516 3516
3517 if (cpu_is_omap3517()) { 3517 if (soc_is_am35xx()) {
3518 cpu_mask = RATE_IN_34XX; 3518 cpu_mask = RATE_IN_34XX;
3519 cpu_clkflg = CK_AM35XX; 3519 cpu_clkflg = CK_AM35XX;
3520 } else if (cpu_is_omap3630()) { 3520 } else if (cpu_is_omap3630()) {
diff --git a/arch/arm/mach-omap2/clock44xx_data.c b/arch/arm/mach-omap2/clock44xx_data.c
index 2172f6603848..e2b701e164f6 100644
--- a/arch/arm/mach-omap2/clock44xx_data.c
+++ b/arch/arm/mach-omap2/clock44xx_data.c
@@ -84,6 +84,7 @@ static struct clk slimbus_clk = {
84 84
85static struct clk sys_32k_ck = { 85static struct clk sys_32k_ck = {
86 .name = "sys_32k_ck", 86 .name = "sys_32k_ck",
87 .clkdm_name = "prm_clkdm",
87 .rate = 32768, 88 .rate = 32768,
88 .ops = &clkops_null, 89 .ops = &clkops_null,
89}; 90};
@@ -512,6 +513,7 @@ static struct clk ddrphy_ck = {
512 .name = "ddrphy_ck", 513 .name = "ddrphy_ck",
513 .parent = &dpll_core_m2_ck, 514 .parent = &dpll_core_m2_ck,
514 .ops = &clkops_null, 515 .ops = &clkops_null,
516 .clkdm_name = "l3_emif_clkdm",
515 .fixed_div = 2, 517 .fixed_div = 2,
516 .recalc = &omap_fixed_divisor_recalc, 518 .recalc = &omap_fixed_divisor_recalc,
517}; 519};
@@ -769,6 +771,7 @@ static const struct clksel dpll_mpu_m2_div[] = {
769static struct clk dpll_mpu_m2_ck = { 771static struct clk dpll_mpu_m2_ck = {
770 .name = "dpll_mpu_m2_ck", 772 .name = "dpll_mpu_m2_ck",
771 .parent = &dpll_mpu_ck, 773 .parent = &dpll_mpu_ck,
774 .clkdm_name = "cm_clkdm",
772 .clksel = dpll_mpu_m2_div, 775 .clksel = dpll_mpu_m2_div,
773 .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_MPU, 776 .clksel_reg = OMAP4430_CM_DIV_M2_DPLL_MPU,
774 .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK, 777 .clksel_mask = OMAP4430_DPLL_CLKOUT_DIV_MASK,
@@ -1149,6 +1152,7 @@ static const struct clksel l3_div_div[] = {
1149static struct clk l3_div_ck = { 1152static struct clk l3_div_ck = {
1150 .name = "l3_div_ck", 1153 .name = "l3_div_ck",
1151 .parent = &div_core_ck, 1154 .parent = &div_core_ck,
1155 .clkdm_name = "cm_clkdm",
1152 .clksel = l3_div_div, 1156 .clksel = l3_div_div,
1153 .clksel_reg = OMAP4430_CM_CLKSEL_CORE, 1157 .clksel_reg = OMAP4430_CM_CLKSEL_CORE,
1154 .clksel_mask = OMAP4430_CLKSEL_L3_MASK, 1158 .clksel_mask = OMAP4430_CLKSEL_L3_MASK,
@@ -2824,6 +2828,7 @@ static const struct clksel trace_clk_div_div[] = {
2824static struct clk trace_clk_div_ck = { 2828static struct clk trace_clk_div_ck = {
2825 .name = "trace_clk_div_ck", 2829 .name = "trace_clk_div_ck",
2826 .parent = &pmd_trace_clk_mux_ck, 2830 .parent = &pmd_trace_clk_mux_ck,
2831 .clkdm_name = "emu_sys_clkdm",
2827 .clksel = trace_clk_div_div, 2832 .clksel = trace_clk_div_div,
2828 .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL, 2833 .clksel_reg = OMAP4430_CM_EMU_DEBUGSS_CLKCTRL,
2829 .clksel_mask = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK, 2834 .clksel_mask = OMAP4430_CLKSEL_PMD_TRACE_CLK_MASK,
diff --git a/arch/arm/mach-omap2/cm.h b/arch/arm/mach-omap2/cm.h
index a7bc096bd407..f24e3f7a2bbc 100644
--- a/arch/arm/mach-omap2/cm.h
+++ b/arch/arm/mach-omap2/cm.h
@@ -22,4 +22,15 @@
22 */ 22 */
23#define MAX_MODULE_READY_TIME 2000 23#define MAX_MODULE_READY_TIME 2000
24 24
25/*
26 * MAX_MODULE_DISABLE_TIME: max duration in microseconds to wait for
27 * the PRCM to request that a module enter the inactive state in the
28 * case of OMAP2 & 3. In the case of OMAP4 this is the max duration
29 * in microseconds for the module to reach the inactive state from
30 * a functional state.
31 * XXX FSUSB on OMAP4430 takes ~4ms to idle after reset during
32 * kernel init.
33 */
34#define MAX_MODULE_DISABLE_TIME 5000
35
25#endif 36#endif
diff --git a/arch/arm/mach-omap2/cminst44xx.c b/arch/arm/mach-omap2/cminst44xx.c
index 8c86d294b1a3..1a39945d9ff8 100644
--- a/arch/arm/mach-omap2/cminst44xx.c
+++ b/arch/arm/mach-omap2/cminst44xx.c
@@ -313,9 +313,9 @@ int omap4_cminst_wait_module_idle(u8 part, u16 inst, s16 cdoffs, u16 clkctrl_off
313 313
314 omap_test_timeout((_clkctrl_idlest(part, inst, cdoffs, clkctrl_offs) == 314 omap_test_timeout((_clkctrl_idlest(part, inst, cdoffs, clkctrl_offs) ==
315 CLKCTRL_IDLEST_DISABLED), 315 CLKCTRL_IDLEST_DISABLED),
316 MAX_MODULE_READY_TIME, i); 316 MAX_MODULE_DISABLE_TIME, i);
317 317
318 return (i < MAX_MODULE_READY_TIME) ? 0 : -EBUSY; 318 return (i < MAX_MODULE_DISABLE_TIME) ? 0 : -EBUSY;
319} 319}
320 320
321/** 321/**
diff --git a/arch/arm/mach-omap2/display.c b/arch/arm/mach-omap2/display.c
index db5a88a36c63..5fb47a14f4ba 100644
--- a/arch/arm/mach-omap2/display.c
+++ b/arch/arm/mach-omap2/display.c
@@ -180,16 +180,133 @@ static void omap_dsi_disable_pads(int dsi_id, unsigned lane_mask)
180 omap4_dsi_mux_pads(dsi_id, 0); 180 omap4_dsi_mux_pads(dsi_id, 0);
181} 181}
182 182
183static int omap_dss_set_min_bus_tput(struct device *dev, unsigned long tput)
184{
185 return omap_pm_set_min_bus_tput(dev, OCP_INITIATOR_AGENT, tput);
186}
187
188static struct platform_device *create_dss_pdev(const char *pdev_name,
189 int pdev_id, const char *oh_name, void *pdata, int pdata_len,
190 struct platform_device *parent)
191{
192 struct platform_device *pdev;
193 struct omap_device *od;
194 struct omap_hwmod *ohs[1];
195 struct omap_hwmod *oh;
196 int r;
197
198 oh = omap_hwmod_lookup(oh_name);
199 if (!oh) {
200 pr_err("Could not look up %s\n", oh_name);
201 r = -ENODEV;
202 goto err;
203 }
204
205 pdev = platform_device_alloc(pdev_name, pdev_id);
206 if (!pdev) {
207 pr_err("Could not create pdev for %s\n", pdev_name);
208 r = -ENOMEM;
209 goto err;
210 }
211
212 if (parent != NULL)
213 pdev->dev.parent = &parent->dev;
214
215 if (pdev->id != -1)
216 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
217 else
218 dev_set_name(&pdev->dev, "%s", pdev->name);
219
220 ohs[0] = oh;
221 od = omap_device_alloc(pdev, ohs, 1, NULL, 0);
222 if (!od) {
223 pr_err("Could not alloc omap_device for %s\n", pdev_name);
224 r = -ENOMEM;
225 goto err;
226 }
227
228 r = platform_device_add_data(pdev, pdata, pdata_len);
229 if (r) {
230 pr_err("Could not set pdata for %s\n", pdev_name);
231 goto err;
232 }
233
234 r = omap_device_register(pdev);
235 if (r) {
236 pr_err("Could not register omap_device for %s\n", pdev_name);
237 goto err;
238 }
239
240 return pdev;
241
242err:
243 return ERR_PTR(r);
244}
245
246static struct platform_device *create_simple_dss_pdev(const char *pdev_name,
247 int pdev_id, void *pdata, int pdata_len,
248 struct platform_device *parent)
249{
250 struct platform_device *pdev;
251 int r;
252
253 pdev = platform_device_alloc(pdev_name, pdev_id);
254 if (!pdev) {
255 pr_err("Could not create pdev for %s\n", pdev_name);
256 r = -ENOMEM;
257 goto err;
258 }
259
260 if (parent != NULL)
261 pdev->dev.parent = &parent->dev;
262
263 if (pdev->id != -1)
264 dev_set_name(&pdev->dev, "%s.%d", pdev->name, pdev->id);
265 else
266 dev_set_name(&pdev->dev, "%s", pdev->name);
267
268 r = platform_device_add_data(pdev, pdata, pdata_len);
269 if (r) {
270 pr_err("Could not set pdata for %s\n", pdev_name);
271 goto err;
272 }
273
274 r = platform_device_add(pdev);
275 if (r) {
276 pr_err("Could not register platform_device for %s\n", pdev_name);
277 goto err;
278 }
279
280 return pdev;
281
282err:
283 return ERR_PTR(r);
284}
285
183int __init omap_display_init(struct omap_dss_board_info *board_data) 286int __init omap_display_init(struct omap_dss_board_info *board_data)
184{ 287{
185 int r = 0; 288 int r = 0;
186 struct omap_hwmod *oh;
187 struct platform_device *pdev; 289 struct platform_device *pdev;
188 int i, oh_count; 290 int i, oh_count;
189 struct omap_display_platform_data pdata;
190 const struct omap_dss_hwmod_data *curr_dss_hwmod; 291 const struct omap_dss_hwmod_data *curr_dss_hwmod;
292 struct platform_device *dss_pdev;
293
294 /* create omapdss device */
295
296 board_data->dsi_enable_pads = omap_dsi_enable_pads;
297 board_data->dsi_disable_pads = omap_dsi_disable_pads;
298 board_data->get_context_loss_count = omap_pm_get_dev_context_loss_count;
299 board_data->set_min_bus_tput = omap_dss_set_min_bus_tput;
300
301 omap_display_device.dev.platform_data = board_data;
302
303 r = platform_device_register(&omap_display_device);
304 if (r < 0) {
305 pr_err("Unable to register omapdss device\n");
306 return r;
307 }
191 308
192 memset(&pdata, 0, sizeof(pdata)); 309 /* create devices for dss hwmods */
193 310
194 if (cpu_is_omap24xx()) { 311 if (cpu_is_omap24xx()) {
195 curr_dss_hwmod = omap2_dss_hwmod_data; 312 curr_dss_hwmod = omap2_dss_hwmod_data;
@@ -202,39 +319,58 @@ int __init omap_display_init(struct omap_dss_board_info *board_data)
202 oh_count = ARRAY_SIZE(omap4_dss_hwmod_data); 319 oh_count = ARRAY_SIZE(omap4_dss_hwmod_data);
203 } 320 }
204 321
205 if (board_data->dsi_enable_pads == NULL) 322 /*
206 board_data->dsi_enable_pads = omap_dsi_enable_pads; 323 * First create the pdev for dss_core, which is used as a parent device
207 if (board_data->dsi_disable_pads == NULL) 324 * by the other dss pdevs. Note: dss_core has to be the first item in
208 board_data->dsi_disable_pads = omap_dsi_disable_pads; 325 * the hwmod list.
209 326 */
210 pdata.board_data = board_data; 327 dss_pdev = create_dss_pdev(curr_dss_hwmod[0].dev_name,
211 pdata.board_data->get_context_loss_count = 328 curr_dss_hwmod[0].id,
212 omap_pm_get_dev_context_loss_count; 329 curr_dss_hwmod[0].oh_name,
213 330 board_data, sizeof(*board_data),
214 for (i = 0; i < oh_count; i++) { 331 NULL);
215 oh = omap_hwmod_lookup(curr_dss_hwmod[i].oh_name); 332
216 if (!oh) { 333 if (IS_ERR(dss_pdev)) {
217 pr_err("Could not look up %s\n", 334 pr_err("Could not build omap_device for %s\n",
218 curr_dss_hwmod[i].oh_name); 335 curr_dss_hwmod[0].oh_name);
219 return -ENODEV; 336
337 return PTR_ERR(dss_pdev);
338 }
339
340 for (i = 1; i < oh_count; i++) {
341 pdev = create_dss_pdev(curr_dss_hwmod[i].dev_name,
342 curr_dss_hwmod[i].id,
343 curr_dss_hwmod[i].oh_name,
344 board_data, sizeof(*board_data),
345 dss_pdev);
346
347 if (IS_ERR(pdev)) {
348 pr_err("Could not build omap_device for %s\n",
349 curr_dss_hwmod[i].oh_name);
350
351 return PTR_ERR(pdev);
220 } 352 }
353 }
221 354
222 pdev = omap_device_build(curr_dss_hwmod[i].dev_name, 355 /* Create devices for DPI and SDI */
223 curr_dss_hwmod[i].id, oh, &pdata,
224 sizeof(struct omap_display_platform_data),
225 NULL, 0, 0);
226 356
227 if (WARN((IS_ERR(pdev)), "Could not build omap_device for %s\n", 357 pdev = create_simple_dss_pdev("omapdss_dpi", -1,
228 curr_dss_hwmod[i].oh_name)) 358 board_data, sizeof(*board_data), dss_pdev);
229 return -ENODEV; 359 if (IS_ERR(pdev)) {
360 pr_err("Could not build platform_device for omapdss_dpi\n");
361 return PTR_ERR(pdev);
230 } 362 }
231 omap_display_device.dev.platform_data = board_data;
232 363
233 r = platform_device_register(&omap_display_device); 364 if (cpu_is_omap34xx()) {
234 if (r < 0) 365 pdev = create_simple_dss_pdev("omapdss_sdi", -1,
235 printk(KERN_ERR "Unable to register OMAP-Display device\n"); 366 board_data, sizeof(*board_data), dss_pdev);
367 if (IS_ERR(pdev)) {
368 pr_err("Could not build platform_device for omapdss_sdi\n");
369 return PTR_ERR(pdev);
370 }
371 }
236 372
237 return r; 373 return 0;
238} 374}
239 375
240static void dispc_disable_outputs(void) 376static void dispc_disable_outputs(void)
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
index 845309f146fe..88ffa1e645cd 100644
--- a/arch/arm/mach-omap2/dsp.c
+++ b/arch/arm/mach-omap2/dsp.c
@@ -20,6 +20,9 @@
20 20
21#include <linux/module.h> 21#include <linux/module.h>
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23
24#include <asm/memblock.h>
25
23#include "cm2xxx_3xxx.h" 26#include "cm2xxx_3xxx.h"
24#include "prm2xxx_3xxx.h" 27#include "prm2xxx_3xxx.h"
25#ifdef CONFIG_BRIDGE_DVFS 28#ifdef CONFIG_BRIDGE_DVFS
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 46b09dae770e..2286410671e7 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -49,6 +49,7 @@
49#define GPMC_ECC_CONTROL 0x1f8 49#define GPMC_ECC_CONTROL 0x1f8
50#define GPMC_ECC_SIZE_CONFIG 0x1fc 50#define GPMC_ECC_SIZE_CONFIG 0x1fc
51#define GPMC_ECC1_RESULT 0x200 51#define GPMC_ECC1_RESULT 0x200
52#define GPMC_ECC_BCH_RESULT_0 0x240 /* not available on OMAP2 */
52 53
53/* GPMC ECC control settings */ 54/* GPMC ECC control settings */
54#define GPMC_ECC_CTRL_ECCCLEAR 0x100 55#define GPMC_ECC_CTRL_ECCCLEAR 0x100
@@ -935,3 +936,186 @@ int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code)
935 return 0; 936 return 0;
936} 937}
937EXPORT_SYMBOL_GPL(gpmc_calculate_ecc); 938EXPORT_SYMBOL_GPL(gpmc_calculate_ecc);
939
940#ifdef CONFIG_ARCH_OMAP3
941
942/**
943 * gpmc_init_hwecc_bch - initialize hardware BCH ecc functionality
944 * @cs: chip select number
945 * @nsectors: how many 512-byte sectors to process
946 * @nerrors: how many errors to correct per sector (4 or 8)
947 *
948 * This function must be executed before any call to gpmc_enable_hwecc_bch.
949 */
950int gpmc_init_hwecc_bch(int cs, int nsectors, int nerrors)
951{
952 /* check if ecc module is in use */
953 if (gpmc_ecc_used != -EINVAL)
954 return -EINVAL;
955
956 /* support only OMAP3 class */
957 if (!cpu_is_omap34xx()) {
958 printk(KERN_ERR "BCH ecc is not supported on this CPU\n");
959 return -EINVAL;
960 }
961
962 /*
963 * For now, assume 4-bit mode is only supported on OMAP3630 ES1.x, x>=1.
964 * Other chips may be added if confirmed to work.
965 */
966 if ((nerrors == 4) &&
967 (!cpu_is_omap3630() || (GET_OMAP_REVISION() == 0))) {
968 printk(KERN_ERR "BCH 4-bit mode is not supported on this CPU\n");
969 return -EINVAL;
970 }
971
972 /* sanity check */
973 if (nsectors > 8) {
974 printk(KERN_ERR "BCH cannot process %d sectors (max is 8)\n",
975 nsectors);
976 return -EINVAL;
977 }
978
979 return 0;
980}
981EXPORT_SYMBOL_GPL(gpmc_init_hwecc_bch);
982
983/**
984 * gpmc_enable_hwecc_bch - enable hardware BCH ecc functionality
985 * @cs: chip select number
986 * @mode: read/write mode
987 * @dev_width: device bus width(1 for x16, 0 for x8)
988 * @nsectors: how many 512-byte sectors to process
989 * @nerrors: how many errors to correct per sector (4 or 8)
990 */
991int gpmc_enable_hwecc_bch(int cs, int mode, int dev_width, int nsectors,
992 int nerrors)
993{
994 unsigned int val;
995
996 /* check if ecc module is in use */
997 if (gpmc_ecc_used != -EINVAL)
998 return -EINVAL;
999
1000 gpmc_ecc_used = cs;
1001
1002 /* clear ecc and enable bits */
1003 gpmc_write_reg(GPMC_ECC_CONTROL, 0x1);
1004
1005 /*
1006 * When using BCH, sector size is hardcoded to 512 bytes.
1007 * Here we are using wrapping mode 6 both for reading and writing, with:
1008 * size0 = 0 (no additional protected byte in spare area)
1009 * size1 = 32 (skip 32 nibbles = 16 bytes per sector in spare area)
1010 */
1011 gpmc_write_reg(GPMC_ECC_SIZE_CONFIG, (32 << 22) | (0 << 12));
1012
1013 /* BCH configuration */
1014 val = ((1 << 16) | /* enable BCH */
1015 (((nerrors == 8) ? 1 : 0) << 12) | /* 8 or 4 bits */
1016 (0x06 << 8) | /* wrap mode = 6 */
1017 (dev_width << 7) | /* bus width */
1018 (((nsectors-1) & 0x7) << 4) | /* number of sectors */
1019 (cs << 1) | /* ECC CS */
1020 (0x1)); /* enable ECC */
1021
1022 gpmc_write_reg(GPMC_ECC_CONFIG, val);
1023 gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
1024 return 0;
1025}
1026EXPORT_SYMBOL_GPL(gpmc_enable_hwecc_bch);
1027
1028/**
1029 * gpmc_calculate_ecc_bch4 - Generate 7 ecc bytes per sector of 512 data bytes
1030 * @cs: chip select number
1031 * @dat: The pointer to data on which ecc is computed
1032 * @ecc: The ecc output buffer
1033 */
1034int gpmc_calculate_ecc_bch4(int cs, const u_char *dat, u_char *ecc)
1035{
1036 int i;
1037 unsigned long nsectors, reg, val1, val2;
1038
1039 if (gpmc_ecc_used != cs)
1040 return -EINVAL;
1041
1042 nsectors = ((gpmc_read_reg(GPMC_ECC_CONFIG) >> 4) & 0x7) + 1;
1043
1044 for (i = 0; i < nsectors; i++) {
1045
1046 reg = GPMC_ECC_BCH_RESULT_0 + 16*i;
1047
1048 /* Read hw-computed remainder */
1049 val1 = gpmc_read_reg(reg + 0);
1050 val2 = gpmc_read_reg(reg + 4);
1051
1052 /*
1053 * Add constant polynomial to remainder, in order to get an ecc
1054 * sequence of 0xFFs for a buffer filled with 0xFFs; and
1055 * left-justify the resulting polynomial.
1056 */
1057 *ecc++ = 0x28 ^ ((val2 >> 12) & 0xFF);
1058 *ecc++ = 0x13 ^ ((val2 >> 4) & 0xFF);
1059 *ecc++ = 0xcc ^ (((val2 & 0xF) << 4)|((val1 >> 28) & 0xF));
1060 *ecc++ = 0x39 ^ ((val1 >> 20) & 0xFF);
1061 *ecc++ = 0x96 ^ ((val1 >> 12) & 0xFF);
1062 *ecc++ = 0xac ^ ((val1 >> 4) & 0xFF);
1063 *ecc++ = 0x7f ^ ((val1 & 0xF) << 4);
1064 }
1065
1066 gpmc_ecc_used = -EINVAL;
1067 return 0;
1068}
1069EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch4);
1070
1071/**
1072 * gpmc_calculate_ecc_bch8 - Generate 13 ecc bytes per block of 512 data bytes
1073 * @cs: chip select number
1074 * @dat: The pointer to data on which ecc is computed
1075 * @ecc: The ecc output buffer
1076 */
1077int gpmc_calculate_ecc_bch8(int cs, const u_char *dat, u_char *ecc)
1078{
1079 int i;
1080 unsigned long nsectors, reg, val1, val2, val3, val4;
1081
1082 if (gpmc_ecc_used != cs)
1083 return -EINVAL;
1084
1085 nsectors = ((gpmc_read_reg(GPMC_ECC_CONFIG) >> 4) & 0x7) + 1;
1086
1087 for (i = 0; i < nsectors; i++) {
1088
1089 reg = GPMC_ECC_BCH_RESULT_0 + 16*i;
1090
1091 /* Read hw-computed remainder */
1092 val1 = gpmc_read_reg(reg + 0);
1093 val2 = gpmc_read_reg(reg + 4);
1094 val3 = gpmc_read_reg(reg + 8);
1095 val4 = gpmc_read_reg(reg + 12);
1096
1097 /*
1098 * Add constant polynomial to remainder, in order to get an ecc
1099 * sequence of 0xFFs for a buffer filled with 0xFFs.
1100 */
1101 *ecc++ = 0xef ^ (val4 & 0xFF);
1102 *ecc++ = 0x51 ^ ((val3 >> 24) & 0xFF);
1103 *ecc++ = 0x2e ^ ((val3 >> 16) & 0xFF);
1104 *ecc++ = 0x09 ^ ((val3 >> 8) & 0xFF);
1105 *ecc++ = 0xed ^ (val3 & 0xFF);
1106 *ecc++ = 0x93 ^ ((val2 >> 24) & 0xFF);
1107 *ecc++ = 0x9a ^ ((val2 >> 16) & 0xFF);
1108 *ecc++ = 0xc2 ^ ((val2 >> 8) & 0xFF);
1109 *ecc++ = 0x97 ^ (val2 & 0xFF);
1110 *ecc++ = 0x79 ^ ((val1 >> 24) & 0xFF);
1111 *ecc++ = 0xe5 ^ ((val1 >> 16) & 0xFF);
1112 *ecc++ = 0x24 ^ ((val1 >> 8) & 0xFF);
1113 *ecc++ = 0xb5 ^ (val1 & 0xFF);
1114 }
1115
1116 gpmc_ecc_used = -EINVAL;
1117 return 0;
1118}
1119EXPORT_SYMBOL_GPL(gpmc_calculate_ecc_bch8);
1120
1121#endif /* CONFIG_ARCH_OMAP3 */
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index 0389b3264abe..00486a8564fd 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -247,6 +247,17 @@ void __init omap3xxx_check_features(void)
247 omap_features |= OMAP3_HAS_SDRC; 247 omap_features |= OMAP3_HAS_SDRC;
248 248
249 /* 249 /*
250 * am35x fixups:
251 * - The am35x Chip ID register has bits 12, 7:5, and 3:2 marked as
252 * reserved and therefore return 0 when read. Unfortunately,
253 * OMAP3_CHECK_FEATURE() will interpret some of those zeroes to
254 * mean that a feature is present even though it isn't so clear
255 * the incorrectly set feature bits.
256 */
257 if (soc_is_am35xx())
258 omap_features &= ~(OMAP3_HAS_IVA | OMAP3_HAS_ISP);
259
260 /*
250 * TODO: Get additional info (where applicable) 261 * TODO: Get additional info (where applicable)
251 * e.g. Size of L2 cache. 262 * e.g. Size of L2 cache.
252 */ 263 */
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index fdc4303be563..6038a8c84b74 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -149,6 +149,7 @@ omap_alloc_gc(void __iomem *base, unsigned int irq_start, unsigned int num)
149 ct->chip.irq_ack = omap_mask_ack_irq; 149 ct->chip.irq_ack = omap_mask_ack_irq;
150 ct->chip.irq_mask = irq_gc_mask_disable_reg; 150 ct->chip.irq_mask = irq_gc_mask_disable_reg;
151 ct->chip.irq_unmask = irq_gc_unmask_enable_reg; 151 ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
152 ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
152 153
153 ct->regs.enable = INTC_MIR_CLEAR0; 154 ct->regs.enable = INTC_MIR_CLEAR0;
154 ct->regs.disable = INTC_MIR_SET0; 155 ct->regs.disable = INTC_MIR_SET0;
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 80e55c5c9998..9fe6829f4c16 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -41,6 +41,7 @@
41#include "control.h" 41#include "control.h"
42#include "mux.h" 42#include "mux.h"
43#include "prm.h" 43#include "prm.h"
44#include "common.h"
44 45
45#define OMAP_MUX_BASE_OFFSET 0x30 /* Offset from CTRL_BASE */ 46#define OMAP_MUX_BASE_OFFSET 0x30 /* Offset from CTRL_BASE */
46#define OMAP_MUX_BASE_SZ 0x5ca 47#define OMAP_MUX_BASE_SZ 0x5ca
@@ -217,8 +218,7 @@ static int __init _omap_mux_get_by_name(struct omap_mux_partition *partition,
217 return -ENODEV; 218 return -ENODEV;
218} 219}
219 220
220static int __init 221int __init omap_mux_get_by_name(const char *muxname,
221omap_mux_get_by_name(const char *muxname,
222 struct omap_mux_partition **found_partition, 222 struct omap_mux_partition **found_partition,
223 struct omap_mux **found_mux) 223 struct omap_mux **found_mux)
224{ 224{
diff --git a/arch/arm/mach-omap2/mux.h b/arch/arm/mach-omap2/mux.h
index 69fe060a0b75..471e62a74a16 100644
--- a/arch/arm/mach-omap2/mux.h
+++ b/arch/arm/mach-omap2/mux.h
@@ -59,6 +59,7 @@
59#define OMAP_PIN_OFF_WAKEUPENABLE OMAP_WAKEUP_EN 59#define OMAP_PIN_OFF_WAKEUPENABLE OMAP_WAKEUP_EN
60 60
61#define OMAP_MODE_GPIO(x) (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE4) 61#define OMAP_MODE_GPIO(x) (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE4)
62#define OMAP_MODE_UART(x) (((x) & OMAP_MUX_MODE7) == OMAP_MUX_MODE0)
62 63
63/* Flags for omapX_mux_init */ 64/* Flags for omapX_mux_init */
64#define OMAP_PACKAGE_MASK 0xffff 65#define OMAP_PACKAGE_MASK 0xffff
@@ -225,8 +226,18 @@ omap_hwmod_mux_init(struct omap_device_pad *bpads, int nr_pads);
225 */ 226 */
226void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state); 227void omap_hwmod_mux(struct omap_hwmod_mux_info *hmux, u8 state);
227 228
229int omap_mux_get_by_name(const char *muxname,
230 struct omap_mux_partition **found_partition,
231 struct omap_mux **found_mux);
228#else 232#else
229 233
234static inline int omap_mux_get_by_name(const char *muxname,
235 struct omap_mux_partition **found_partition,
236 struct omap_mux **found_mux)
237{
238 return 0;
239}
240
230static inline int omap_mux_init_gpio(int gpio, int val) 241static inline int omap_mux_init_gpio(int gpio, int val)
231{ 242{
232 return 0; 243 return 0;
diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
index bf86f7e8f91f..773193670ea2 100644
--- a/arch/arm/mach-omap2/omap_hwmod.c
+++ b/arch/arm/mach-omap2/omap_hwmod.c
@@ -530,7 +530,7 @@ static int _disable_wakeup(struct omap_hwmod *oh, u32 *v)
530 if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP) 530 if (oh->class->sysc->idlemodes & SIDLE_SMART_WKUP)
531 _set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v); 531 _set_slave_idlemode(oh, HWMOD_IDLEMODE_SMART, v);
532 if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP) 532 if (oh->class->sysc->idlemodes & MSTANDBY_SMART_WKUP)
533 _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART_WKUP, v); 533 _set_master_standbymode(oh, HWMOD_IDLEMODE_SMART, v);
534 534
535 /* XXX test pwrdm_get_wken for this hwmod's subsystem */ 535 /* XXX test pwrdm_get_wken for this hwmod's subsystem */
536 536
diff --git a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
index 950454a3fa31..f30e861ce6d9 100644
--- a/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_44xx_data.c
@@ -393,8 +393,7 @@ static struct omap_hwmod_class_sysconfig omap44xx_counter_sysc = {
393 .rev_offs = 0x0000, 393 .rev_offs = 0x0000,
394 .sysc_offs = 0x0004, 394 .sysc_offs = 0x0004,
395 .sysc_flags = SYSC_HAS_SIDLEMODE, 395 .sysc_flags = SYSC_HAS_SIDLEMODE,
396 .idlemodes = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART | 396 .idlemodes = (SIDLE_FORCE | SIDLE_NO),
397 SIDLE_SMART_WKUP),
398 .sysc_fields = &omap_hwmod_sysc_type1, 397 .sysc_fields = &omap_hwmod_sysc_type1,
399}; 398};
400 399
@@ -854,6 +853,11 @@ static struct omap_hwmod omap44xx_dss_hdmi_hwmod = {
854 .name = "dss_hdmi", 853 .name = "dss_hdmi",
855 .class = &omap44xx_hdmi_hwmod_class, 854 .class = &omap44xx_hdmi_hwmod_class,
856 .clkdm_name = "l3_dss_clkdm", 855 .clkdm_name = "l3_dss_clkdm",
856 /*
857 * HDMI audio requires to use no-idle mode. Hence,
858 * set idle mode by software.
859 */
860 .flags = HWMOD_SWSUP_SIDLE,
857 .mpu_irqs = omap44xx_dss_hdmi_irqs, 861 .mpu_irqs = omap44xx_dss_hdmi_irqs,
858 .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs, 862 .sdma_reqs = omap44xx_dss_hdmi_sdma_reqs,
859 .main_clk = "dss_48mhz_clk", 863 .main_clk = "dss_48mhz_clk",
diff --git a/arch/arm/mach-omap2/omap_l3_smx.c b/arch/arm/mach-omap2/omap_l3_smx.c
index a05a62f9ee5b..acc216491b8a 100644
--- a/arch/arm/mach-omap2/omap_l3_smx.c
+++ b/arch/arm/mach-omap2/omap_l3_smx.c
@@ -155,10 +155,11 @@ static irqreturn_t omap3_l3_block_irq(struct omap3_l3 *l3,
155 u8 multi = error & L3_ERROR_LOG_MULTI; 155 u8 multi = error & L3_ERROR_LOG_MULTI;
156 u32 address = omap3_l3_decode_addr(error_addr); 156 u32 address = omap3_l3_decode_addr(error_addr);
157 157
158 WARN(true, "%s seen by %s %s at address %x\n", 158 pr_err("%s seen by %s %s at address %x\n",
159 omap3_l3_code_string(code), 159 omap3_l3_code_string(code),
160 omap3_l3_initiator_string(initid), 160 omap3_l3_initiator_string(initid),
161 multi ? "Multiple Errors" : "", address); 161 multi ? "Multiple Errors" : "", address);
162 WARN_ON(1);
162 163
163 return IRQ_HANDLED; 164 return IRQ_HANDLED;
164} 165}
diff --git a/arch/arm/mach-omap2/omap_phy_internal.c b/arch/arm/mach-omap2/omap_phy_internal.c
index 4c90477e6f82..d52651a05daa 100644
--- a/arch/arm/mach-omap2/omap_phy_internal.c
+++ b/arch/arm/mach-omap2/omap_phy_internal.c
@@ -239,21 +239,15 @@ void am35x_set_mode(u8 musb_mode)
239 239
240 devconf2 &= ~CONF2_OTGMODE; 240 devconf2 &= ~CONF2_OTGMODE;
241 switch (musb_mode) { 241 switch (musb_mode) {
242#ifdef CONFIG_USB_MUSB_HDRC_HCD
243 case MUSB_HOST: /* Force VBUS valid, ID = 0 */ 242 case MUSB_HOST: /* Force VBUS valid, ID = 0 */
244 devconf2 |= CONF2_FORCE_HOST; 243 devconf2 |= CONF2_FORCE_HOST;
245 break; 244 break;
246#endif
247#ifdef CONFIG_USB_GADGET_MUSB_HDRC
248 case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */ 245 case MUSB_PERIPHERAL: /* Force VBUS valid, ID = 1 */
249 devconf2 |= CONF2_FORCE_DEVICE; 246 devconf2 |= CONF2_FORCE_DEVICE;
250 break; 247 break;
251#endif
252#ifdef CONFIG_USB_MUSB_OTG
253 case MUSB_OTG: /* Don't override the VBUS/ID comparators */ 248 case MUSB_OTG: /* Don't override the VBUS/ID comparators */
254 devconf2 |= CONF2_NO_OVERRIDE; 249 devconf2 |= CONF2_NO_OVERRIDE;
255 break; 250 break;
256#endif
257 default: 251 default:
258 pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode); 252 pr_info(KERN_INFO "Unsupported mode %u\n", musb_mode);
259 } 253 }
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index a34023d0ca7c..3a595e899724 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -724,6 +724,7 @@ int __init omap3_pm_init(void)
724 ret = request_irq(omap_prcm_event_to_irq("io"), 724 ret = request_irq(omap_prcm_event_to_irq("io"),
725 _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io", 725 _prcm_int_handle_io, IRQF_SHARED | IRQF_NO_SUSPEND, "pm_io",
726 omap3_pm_init); 726 omap3_pm_init);
727 enable_irq(omap_prcm_event_to_irq("io"));
727 728
728 if (ret) { 729 if (ret) {
729 pr_err("pm: Failed to request pm_io irq\n"); 730 pr_err("pm: Failed to request pm_io irq\n");
diff --git a/arch/arm/mach-omap2/prm2xxx_3xxx.c b/arch/arm/mach-omap2/prm2xxx_3xxx.c
index 9ce765407ad5..21cb74003a56 100644
--- a/arch/arm/mach-omap2/prm2xxx_3xxx.c
+++ b/arch/arm/mach-omap2/prm2xxx_3xxx.c
@@ -15,6 +15,7 @@
15#include <linux/errno.h> 15#include <linux/errno.h>
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/irq.h>
18 19
19#include "common.h" 20#include "common.h"
20#include <plat/cpu.h> 21#include <plat/cpu.h>
@@ -303,8 +304,15 @@ void omap3xxx_prm_restore_irqen(u32 *saved_mask)
303 304
304static int __init omap3xxx_prcm_init(void) 305static int __init omap3xxx_prcm_init(void)
305{ 306{
306 if (cpu_is_omap34xx()) 307 int ret = 0;
307 return omap_prcm_register_chain_handler(&omap3_prcm_irq_setup); 308
308 return 0; 309 if (cpu_is_omap34xx()) {
310 ret = omap_prcm_register_chain_handler(&omap3_prcm_irq_setup);
311 if (!ret)
312 irq_set_status_flags(omap_prcm_event_to_irq("io"),
313 IRQ_NOAUTOEN);
314 }
315
316 return ret;
309} 317}
310subsys_initcall(omap3xxx_prcm_init); 318subsys_initcall(omap3xxx_prcm_init);
diff --git a/arch/arm/mach-omap2/serial.c b/arch/arm/mach-omap2/serial.c
index 292d4aaca068..c1b93c752d70 100644
--- a/arch/arm/mach-omap2/serial.c
+++ b/arch/arm/mach-omap2/serial.c
@@ -57,6 +57,7 @@ struct omap_uart_state {
57 57
58 struct list_head node; 58 struct list_head node;
59 struct omap_hwmod *oh; 59 struct omap_hwmod *oh;
60 struct omap_device_pad default_omap_uart_pads[2];
60}; 61};
61 62
62static LIST_HEAD(uart_list); 63static LIST_HEAD(uart_list);
@@ -126,11 +127,70 @@ static void omap_uart_set_smartidle(struct platform_device *pdev) {}
126#endif /* CONFIG_PM */ 127#endif /* CONFIG_PM */
127 128
128#ifdef CONFIG_OMAP_MUX 129#ifdef CONFIG_OMAP_MUX
129static void omap_serial_fill_default_pads(struct omap_board_data *bdata) 130
131#define OMAP_UART_DEFAULT_PAD_NAME_LEN 28
132static char rx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN],
133 tx_pad_name[OMAP_UART_DEFAULT_PAD_NAME_LEN] __initdata;
134
135static void __init
136omap_serial_fill_uart_tx_rx_pads(struct omap_board_data *bdata,
137 struct omap_uart_state *uart)
138{
139 uart->default_omap_uart_pads[0].name = rx_pad_name;
140 uart->default_omap_uart_pads[0].flags = OMAP_DEVICE_PAD_REMUX |
141 OMAP_DEVICE_PAD_WAKEUP;
142 uart->default_omap_uart_pads[0].enable = OMAP_PIN_INPUT |
143 OMAP_MUX_MODE0;
144 uart->default_omap_uart_pads[0].idle = OMAP_PIN_INPUT | OMAP_MUX_MODE0;
145 uart->default_omap_uart_pads[1].name = tx_pad_name;
146 uart->default_omap_uart_pads[1].enable = OMAP_PIN_OUTPUT |
147 OMAP_MUX_MODE0;
148 bdata->pads = uart->default_omap_uart_pads;
149 bdata->pads_cnt = ARRAY_SIZE(uart->default_omap_uart_pads);
150}
151
152static void __init omap_serial_check_wakeup(struct omap_board_data *bdata,
153 struct omap_uart_state *uart)
130{ 154{
155 struct omap_mux_partition *tx_partition = NULL, *rx_partition = NULL;
156 struct omap_mux *rx_mux = NULL, *tx_mux = NULL;
157 char *rx_fmt, *tx_fmt;
158 int uart_nr = bdata->id + 1;
159
160 if (bdata->id != 2) {
161 rx_fmt = "uart%d_rx.uart%d_rx";
162 tx_fmt = "uart%d_tx.uart%d_tx";
163 } else {
164 rx_fmt = "uart%d_rx_irrx.uart%d_rx_irrx";
165 tx_fmt = "uart%d_tx_irtx.uart%d_tx_irtx";
166 }
167
168 snprintf(rx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, rx_fmt,
169 uart_nr, uart_nr);
170 snprintf(tx_pad_name, OMAP_UART_DEFAULT_PAD_NAME_LEN, tx_fmt,
171 uart_nr, uart_nr);
172
173 if (omap_mux_get_by_name(rx_pad_name, &rx_partition, &rx_mux) >= 0 &&
174 omap_mux_get_by_name
175 (tx_pad_name, &tx_partition, &tx_mux) >= 0) {
176 u16 tx_mode, rx_mode;
177
178 tx_mode = omap_mux_read(tx_partition, tx_mux->reg_offset);
179 rx_mode = omap_mux_read(rx_partition, rx_mux->reg_offset);
180
181 /*
182 * Check if uart is used in default tx/rx mode i.e. in mux mode0
183 * if yes then configure rx pin for wake up capability
184 */
185 if (OMAP_MODE_UART(rx_mode) && OMAP_MODE_UART(tx_mode))
186 omap_serial_fill_uart_tx_rx_pads(bdata, uart);
187 }
131} 188}
132#else 189#else
133static void omap_serial_fill_default_pads(struct omap_board_data *bdata) {} 190static void __init omap_serial_check_wakeup(struct omap_board_data *bdata,
191 struct omap_uart_state *uart)
192{
193}
134#endif 194#endif
135 195
136static char *cmdline_find_option(char *str) 196static char *cmdline_find_option(char *str)
@@ -287,8 +347,7 @@ void __init omap_serial_board_init(struct omap_uart_port_info *info)
287 bdata.pads = NULL; 347 bdata.pads = NULL;
288 bdata.pads_cnt = 0; 348 bdata.pads_cnt = 0;
289 349
290 if (cpu_is_omap44xx() || cpu_is_omap34xx()) 350 omap_serial_check_wakeup(&bdata, uart);
291 omap_serial_fill_default_pads(&bdata);
292 351
293 if (!info) 352 if (!info)
294 omap_serial_init_port(&bdata, NULL); 353 omap_serial_init_port(&bdata, NULL);
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index b19d1b43c12e..c4a576856661 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -41,12 +41,10 @@ static struct musb_hdrc_config musb_config = {
41}; 41};
42 42
43static struct musb_hdrc_platform_data musb_plat = { 43static struct musb_hdrc_platform_data musb_plat = {
44#ifdef CONFIG_USB_MUSB_OTG 44#ifdef CONFIG_USB_GADGET_MUSB_HDRC
45 .mode = MUSB_OTG, 45 .mode = MUSB_OTG,
46#elif defined(CONFIG_USB_MUSB_HDRC_HCD) 46#else
47 .mode = MUSB_HOST, 47 .mode = MUSB_HOST,
48#elif defined(CONFIG_USB_GADGET_MUSB_HDRC)
49 .mode = MUSB_PERIPHERAL,
50#endif 48#endif
51 /* .clock is set dynamically */ 49 /* .clock is set dynamically */
52 .config = &musb_config, 50 .config = &musb_config,
diff --git a/arch/arm/mach-omap2/usb-tusb6010.c b/arch/arm/mach-omap2/usb-tusb6010.c
index db84a46ce7fd..805bea6edf17 100644
--- a/arch/arm/mach-omap2/usb-tusb6010.c
+++ b/arch/arm/mach-omap2/usb-tusb6010.c
@@ -300,7 +300,7 @@ tusb6010_setup_interface(struct musb_hdrc_platform_data *data,
300 printk(error, 3, status); 300 printk(error, 3, status);
301 return status; 301 return status;
302 } 302 }
303 tusb_resources[2].start = irq + IH_GPIO_BASE; 303 tusb_resources[2].start = gpio_to_irq(irq);
304 304
305 /* set up memory timings ... can speed them up later */ 305 /* set up memory timings ... can speed them up later */
306 if (!ps_refclk) { 306 if (!ps_refclk) {
diff --git a/arch/arm/mach-orion5x/include/mach/bridge-regs.h b/arch/arm/mach-orion5x/include/mach/bridge-regs.h
index 96484bcd34ca..11a3c1e9801f 100644
--- a/arch/arm/mach-orion5x/include/mach/bridge-regs.h
+++ b/arch/arm/mach-orion5x/include/mach/bridge-regs.h
@@ -35,5 +35,5 @@
35#define MAIN_IRQ_MASK (ORION5X_BRIDGE_VIRT_BASE | 0x204) 35#define MAIN_IRQ_MASK (ORION5X_BRIDGE_VIRT_BASE | 0x204)
36 36
37#define TIMER_VIRT_BASE (ORION5X_BRIDGE_VIRT_BASE | 0x300) 37#define TIMER_VIRT_BASE (ORION5X_BRIDGE_VIRT_BASE | 0x300)
38 38#define TIMER_PHYS_BASE (ORION5X_BRIDGE_PHYS_BASE | 0x300)
39#endif 39#endif
diff --git a/arch/arm/mach-orion5x/include/mach/io.h b/arch/arm/mach-orion5x/include/mach/io.h
new file mode 100644
index 000000000000..1aa5d0a50a0b
--- /dev/null
+++ b/arch/arm/mach-orion5x/include/mach/io.h
@@ -0,0 +1,22 @@
1/*
2 * arch/arm/mach-orion5x/include/mach/io.h
3 *
4 * This file is licensed under the terms of the GNU General Public
5 * License version 2. This program is licensed "as is" without any
6 * warranty of any kind, whether express or implied.
7 */
8
9#ifndef __ASM_ARCH_IO_H
10#define __ASM_ARCH_IO_H
11
12#include <mach/orion5x.h>
13#include <asm/sizes.h>
14
15#define IO_SPACE_LIMIT SZ_2M
16static inline void __iomem *__io(unsigned long addr)
17{
18 return (void __iomem *)(addr + ORION5X_PCIE_IO_VIRT_BASE);
19}
20
21#define __io(a) __io(a)
22#endif
diff --git a/arch/arm/mach-orion5x/include/mach/orion5x.h b/arch/arm/mach-orion5x/include/mach/orion5x.h
index 2745f5d95b3f..683e085ce162 100644
--- a/arch/arm/mach-orion5x/include/mach/orion5x.h
+++ b/arch/arm/mach-orion5x/include/mach/orion5x.h
@@ -82,6 +82,7 @@
82#define UART1_VIRT_BASE (ORION5X_DEV_BUS_VIRT_BASE | 0x2100) 82#define UART1_VIRT_BASE (ORION5X_DEV_BUS_VIRT_BASE | 0x2100)
83 83
84#define ORION5X_BRIDGE_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x20000) 84#define ORION5X_BRIDGE_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x20000)
85#define ORION5X_BRIDGE_PHYS_BASE (ORION5X_REGS_PHYS_BASE | 0x20000)
85 86
86#define ORION5X_PCI_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x30000) 87#define ORION5X_PCI_VIRT_BASE (ORION5X_REGS_VIRT_BASE | 0x30000)
87 88
diff --git a/arch/arm/mach-orion5x/ts78xx-setup.c b/arch/arm/mach-orion5x/ts78xx-setup.c
index a74f3cf54cc5..b4203277f3cd 100644
--- a/arch/arm/mach-orion5x/ts78xx-setup.c
+++ b/arch/arm/mach-orion5x/ts78xx-setup.c
@@ -251,8 +251,6 @@ static void ts78xx_ts_nand_read_buf(struct mtd_info *mtd,
251 readsb(io_base, buf, len); 251 readsb(io_base, buf, len);
252} 252}
253 253
254const char *ts_nand_part_probes[] = { "cmdlinepart", NULL };
255
256static struct mtd_partition ts78xx_ts_nand_parts[] = { 254static struct mtd_partition ts78xx_ts_nand_parts[] = {
257 { 255 {
258 .name = "mbr", 256 .name = "mbr",
@@ -277,7 +275,6 @@ static struct mtd_partition ts78xx_ts_nand_parts[] = {
277static struct platform_nand_data ts78xx_ts_nand_data = { 275static struct platform_nand_data ts78xx_ts_nand_data = {
278 .chip = { 276 .chip = {
279 .nr_chips = 1, 277 .nr_chips = 1,
280 .part_probe_types = ts_nand_part_probes,
281 .partitions = ts78xx_ts_nand_parts, 278 .partitions = ts78xx_ts_nand_parts,
282 .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts), 279 .nr_partitions = ARRAY_SIZE(ts78xx_ts_nand_parts),
283 .chip_delay = 15, 280 .chip_delay = 15,
diff --git a/arch/arm/mach-pxa/balloon3.c b/arch/arm/mach-pxa/balloon3.c
index 56e8cebeb7d5..9244493dbcb7 100644
--- a/arch/arm/mach-pxa/balloon3.c
+++ b/arch/arm/mach-pxa/balloon3.c
@@ -679,8 +679,6 @@ static struct mtd_partition balloon3_partition_info[] = {
679 }, 679 },
680}; 680};
681 681
682static const char *balloon3_part_probes[] = { "cmdlinepart", NULL };
683
684struct platform_nand_data balloon3_nand_pdata = { 682struct platform_nand_data balloon3_nand_pdata = {
685 .chip = { 683 .chip = {
686 .nr_chips = 4, 684 .nr_chips = 4,
@@ -688,7 +686,6 @@ struct platform_nand_data balloon3_nand_pdata = {
688 .nr_partitions = ARRAY_SIZE(balloon3_partition_info), 686 .nr_partitions = ARRAY_SIZE(balloon3_partition_info),
689 .partitions = balloon3_partition_info, 687 .partitions = balloon3_partition_info,
690 .chip_delay = 50, 688 .chip_delay = 50,
691 .part_probe_types = balloon3_part_probes,
692 }, 689 },
693 .ctrl = { 690 .ctrl = {
694 .hwcontrol = 0, 691 .hwcontrol = 0,
diff --git a/arch/arm/mach-pxa/em-x270.c b/arch/arm/mach-pxa/em-x270.c
index a3a4a38d4972..97f82ad341bf 100644
--- a/arch/arm/mach-pxa/em-x270.c
+++ b/arch/arm/mach-pxa/em-x270.c
@@ -338,8 +338,6 @@ static struct mtd_partition em_x270_partition_info[] = {
338 }, 338 },
339}; 339};
340 340
341static const char *em_x270_part_probes[] = { "cmdlinepart", NULL };
342
343struct platform_nand_data em_x270_nand_platdata = { 341struct platform_nand_data em_x270_nand_platdata = {
344 .chip = { 342 .chip = {
345 .nr_chips = 1, 343 .nr_chips = 1,
@@ -347,7 +345,6 @@ struct platform_nand_data em_x270_nand_platdata = {
347 .nr_partitions = ARRAY_SIZE(em_x270_partition_info), 345 .nr_partitions = ARRAY_SIZE(em_x270_partition_info),
348 .partitions = em_x270_partition_info, 346 .partitions = em_x270_partition_info,
349 .chip_delay = 20, 347 .chip_delay = 20,
350 .part_probe_types = em_x270_part_probes,
351 }, 348 },
352 .ctrl = { 349 .ctrl = {
353 .hwcontrol = 0, 350 .hwcontrol = 0,
diff --git a/arch/arm/mach-pxa/palmtx.c b/arch/arm/mach-pxa/palmtx.c
index 9507605ed547..0da35dccfd89 100644
--- a/arch/arm/mach-pxa/palmtx.c
+++ b/arch/arm/mach-pxa/palmtx.c
@@ -268,8 +268,6 @@ static struct mtd_partition palmtx_partition_info[] = {
268 }, 268 },
269}; 269};
270 270
271static const char *palmtx_part_probes[] = { "cmdlinepart", NULL };
272
273struct platform_nand_data palmtx_nand_platdata = { 271struct platform_nand_data palmtx_nand_platdata = {
274 .chip = { 272 .chip = {
275 .nr_chips = 1, 273 .nr_chips = 1,
@@ -277,7 +275,6 @@ struct platform_nand_data palmtx_nand_platdata = {
277 .nr_partitions = ARRAY_SIZE(palmtx_partition_info), 275 .nr_partitions = ARRAY_SIZE(palmtx_partition_info),
278 .partitions = palmtx_partition_info, 276 .partitions = palmtx_partition_info,
279 .chip_delay = 20, 277 .chip_delay = 20,
280 .part_probe_types = palmtx_part_probes,
281 }, 278 },
282 .ctrl = { 279 .ctrl = {
283 .cmd_ctrl = palmtx_nand_cmd_ctl, 280 .cmd_ctrl = palmtx_nand_cmd_ctl,
diff --git a/arch/arm/mach-s3c24xx/include/mach/irqs.h b/arch/arm/mach-s3c24xx/include/mach/irqs.h
index e53b2177319e..b7a9f4d469e8 100644
--- a/arch/arm/mach-s3c24xx/include/mach/irqs.h
+++ b/arch/arm/mach-s3c24xx/include/mach/irqs.h
@@ -134,6 +134,17 @@
134#define IRQ_S32416_WDT S3C2410_IRQSUB(27) 134#define IRQ_S32416_WDT S3C2410_IRQSUB(27)
135#define IRQ_S32416_AC97 S3C2410_IRQSUB(28) 135#define IRQ_S32416_AC97 S3C2410_IRQSUB(28)
136 136
137/* second interrupt-register of s3c2416/s3c2450 */
138
139#define S3C2416_IRQ(x) S3C2410_IRQ((x) + 54 + 29)
140#define IRQ_S3C2416_2D S3C2416_IRQ(0)
141#define IRQ_S3C2416_IIC1 S3C2416_IRQ(1)
142#define IRQ_S3C2416_RESERVED2 S3C2416_IRQ(2)
143#define IRQ_S3C2416_RESERVED3 S3C2416_IRQ(3)
144#define IRQ_S3C2416_PCM0 S3C2416_IRQ(4)
145#define IRQ_S3C2416_PCM1 S3C2416_IRQ(5)
146#define IRQ_S3C2416_I2S0 S3C2416_IRQ(6)
147#define IRQ_S3C2416_I2S1 S3C2416_IRQ(7)
137 148
138/* extra irqs for s3c2440 */ 149/* extra irqs for s3c2440 */
139 150
@@ -175,7 +186,9 @@
175#define IRQ_S3C2443_WDT S3C2410_IRQSUB(27) 186#define IRQ_S3C2443_WDT S3C2410_IRQSUB(27)
176#define IRQ_S3C2443_AC97 S3C2410_IRQSUB(28) 187#define IRQ_S3C2443_AC97 S3C2410_IRQSUB(28)
177 188
178#if defined(CONFIG_CPU_S3C2443) || defined(CONFIG_CPU_S3C2416) 189#if defined(CONFIG_CPU_S3C2416)
190#define NR_IRQS (IRQ_S3C2416_I2S1 + 1)
191#elif defined(CONFIG_CPU_S3C2443)
179#define NR_IRQS (IRQ_S3C2443_AC97+1) 192#define NR_IRQS (IRQ_S3C2443_AC97+1)
180#else 193#else
181#define NR_IRQS (IRQ_S3C2440_AC97+1) 194#define NR_IRQS (IRQ_S3C2440_AC97+1)
diff --git a/arch/arm/mach-s3c24xx/irq-s3c2416.c b/arch/arm/mach-s3c24xx/irq-s3c2416.c
index fd49f35e448e..23ec97370f32 100644
--- a/arch/arm/mach-s3c24xx/irq-s3c2416.c
+++ b/arch/arm/mach-s3c24xx/irq-s3c2416.c
@@ -27,6 +27,7 @@
27#include <linux/ioport.h> 27#include <linux/ioport.h>
28#include <linux/device.h> 28#include <linux/device.h>
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/syscore_ops.h>
30 31
31#include <mach/hardware.h> 32#include <mach/hardware.h>
32#include <asm/irq.h> 33#include <asm/irq.h>
@@ -192,6 +193,43 @@ static struct irq_chip s3c2416_irq_uart3 = {
192 .irq_ack = s3c2416_irq_uart3_ack, 193 .irq_ack = s3c2416_irq_uart3_ack,
193}; 194};
194 195
196/* second interrupt register */
197
198static inline void s3c2416_irq_ack_second(struct irq_data *data)
199{
200 unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
201
202 __raw_writel(bitval, S3C2416_SRCPND2);
203 __raw_writel(bitval, S3C2416_INTPND2);
204}
205
206static void s3c2416_irq_mask_second(struct irq_data *data)
207{
208 unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
209 unsigned long mask;
210
211 mask = __raw_readl(S3C2416_INTMSK2);
212 mask |= bitval;
213 __raw_writel(mask, S3C2416_INTMSK2);
214}
215
216static void s3c2416_irq_unmask_second(struct irq_data *data)
217{
218 unsigned long bitval = 1UL << (data->irq - IRQ_S3C2416_2D);
219 unsigned long mask;
220
221 mask = __raw_readl(S3C2416_INTMSK2);
222 mask &= ~bitval;
223 __raw_writel(mask, S3C2416_INTMSK2);
224}
225
226struct irq_chip s3c2416_irq_second = {
227 .irq_ack = s3c2416_irq_ack_second,
228 .irq_mask = s3c2416_irq_mask_second,
229 .irq_unmask = s3c2416_irq_unmask_second,
230};
231
232
195/* IRQ initialisation code */ 233/* IRQ initialisation code */
196 234
197static int __init s3c2416_add_sub(unsigned int base, 235static int __init s3c2416_add_sub(unsigned int base,
@@ -213,6 +251,42 @@ static int __init s3c2416_add_sub(unsigned int base,
213 return 0; 251 return 0;
214} 252}
215 253
254static void __init s3c2416_irq_add_second(void)
255{
256 unsigned long pend;
257 unsigned long last;
258 int irqno;
259 int i;
260
261 /* first, clear all interrupts pending... */
262 last = 0;
263 for (i = 0; i < 4; i++) {
264 pend = __raw_readl(S3C2416_INTPND2);
265
266 if (pend == 0 || pend == last)
267 break;
268
269 __raw_writel(pend, S3C2416_SRCPND2);
270 __raw_writel(pend, S3C2416_INTPND2);
271 printk(KERN_INFO "irq: clearing pending status %08x\n",
272 (int)pend);
273 last = pend;
274 }
275
276 for (irqno = IRQ_S3C2416_2D; irqno <= IRQ_S3C2416_I2S1; irqno++) {
277 switch (irqno) {
278 case IRQ_S3C2416_RESERVED2:
279 case IRQ_S3C2416_RESERVED3:
280 /* no IRQ here */
281 break;
282 default:
283 irq_set_chip_and_handler(irqno, &s3c2416_irq_second,
284 handle_edge_irq);
285 set_irq_flags(irqno, IRQF_VALID);
286 }
287 }
288}
289
216static int __init s3c2416_irq_add(struct device *dev, 290static int __init s3c2416_irq_add(struct device *dev,
217 struct subsys_interface *sif) 291 struct subsys_interface *sif)
218{ 292{
@@ -232,6 +306,8 @@ static int __init s3c2416_irq_add(struct device *dev,
232 &s3c2416_irq_wdtac97, 306 &s3c2416_irq_wdtac97,
233 IRQ_S3C2443_WDT, IRQ_S3C2443_AC97); 307 IRQ_S3C2443_WDT, IRQ_S3C2443_AC97);
234 308
309 s3c2416_irq_add_second();
310
235 return 0; 311 return 0;
236} 312}
237 313
@@ -248,3 +324,25 @@ static int __init s3c2416_irq_init(void)
248 324
249arch_initcall(s3c2416_irq_init); 325arch_initcall(s3c2416_irq_init);
250 326
327#ifdef CONFIG_PM
328static struct sleep_save irq_save[] = {
329 SAVE_ITEM(S3C2416_INTMSK2),
330};
331
332int s3c2416_irq_suspend(void)
333{
334 s3c_pm_do_save(irq_save, ARRAY_SIZE(irq_save));
335
336 return 0;
337}
338
339void s3c2416_irq_resume(void)
340{
341 s3c_pm_do_restore(irq_save, ARRAY_SIZE(irq_save));
342}
343
344struct syscore_ops s3c2416_irq_syscore_ops = {
345 .suspend = s3c2416_irq_suspend,
346 .resume = s3c2416_irq_resume,
347};
348#endif
diff --git a/arch/arm/mach-s3c24xx/mach-smdk2416.c b/arch/arm/mach-s3c24xx/mach-smdk2416.c
index 30a44f806e01..c3100a044fbe 100644
--- a/arch/arm/mach-s3c24xx/mach-smdk2416.c
+++ b/arch/arm/mach-s3c24xx/mach-smdk2416.c
@@ -148,23 +148,25 @@ static struct s3c24xx_hsudc_platdata smdk2416_hsudc_platdata = {
148 148
149static struct s3c_fb_pd_win smdk2416_fb_win[] = { 149static struct s3c_fb_pd_win smdk2416_fb_win[] = {
150 [0] = { 150 [0] = {
151 /* think this is the same as the smdk6410 */
152 .win_mode = {
153 .pixclock = 41094,
154 .left_margin = 8,
155 .right_margin = 13,
156 .upper_margin = 7,
157 .lower_margin = 5,
158 .hsync_len = 3,
159 .vsync_len = 1,
160 .xres = 800,
161 .yres = 480,
162 },
163 .default_bpp = 16, 151 .default_bpp = 16,
164 .max_bpp = 32, 152 .max_bpp = 32,
153 .xres = 800,
154 .yres = 480,
165 }, 155 },
166}; 156};
167 157
158static struct fb_videomode smdk2416_lcd_timing = {
159 .pixclock = 41094,
160 .left_margin = 8,
161 .right_margin = 13,
162 .upper_margin = 7,
163 .lower_margin = 5,
164 .hsync_len = 3,
165 .vsync_len = 1,
166 .xres = 800,
167 .yres = 480,
168};
169
168static void s3c2416_fb_gpio_setup_24bpp(void) 170static void s3c2416_fb_gpio_setup_24bpp(void)
169{ 171{
170 unsigned int gpio; 172 unsigned int gpio;
@@ -187,6 +189,7 @@ static void s3c2416_fb_gpio_setup_24bpp(void)
187 189
188static struct s3c_fb_platdata smdk2416_fb_platdata = { 190static struct s3c_fb_platdata smdk2416_fb_platdata = {
189 .win[0] = &smdk2416_fb_win[0], 191 .win[0] = &smdk2416_fb_win[0],
192 .vtiming = &smdk2416_lcd_timing,
190 .setup_gpio = s3c2416_fb_gpio_setup_24bpp, 193 .setup_gpio = s3c2416_fb_gpio_setup_24bpp,
191 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 194 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
192 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, 195 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
diff --git a/arch/arm/mach-s3c24xx/s3c2416.c b/arch/arm/mach-s3c24xx/s3c2416.c
index 7743fade50df..ed5a95ece9eb 100644
--- a/arch/arm/mach-s3c24xx/s3c2416.c
+++ b/arch/arm/mach-s3c24xx/s3c2416.c
@@ -106,6 +106,7 @@ int __init s3c2416_init(void)
106 register_syscore_ops(&s3c2416_pm_syscore_ops); 106 register_syscore_ops(&s3c2416_pm_syscore_ops);
107#endif 107#endif
108 register_syscore_ops(&s3c24xx_irq_syscore_ops); 108 register_syscore_ops(&s3c24xx_irq_syscore_ops);
109 register_syscore_ops(&s3c2416_irq_syscore_ops);
109 110
110 return device_register(&s3c2416_dev); 111 return device_register(&s3c2416_dev);
111} 112}
diff --git a/arch/arm/mach-s3c64xx/cpuidle.c b/arch/arm/mach-s3c64xx/cpuidle.c
index 179460f38db7..acb197ccf3f7 100644
--- a/arch/arm/mach-s3c64xx/cpuidle.c
+++ b/arch/arm/mach-s3c64xx/cpuidle.c
@@ -27,12 +27,7 @@ static int s3c64xx_enter_idle(struct cpuidle_device *dev,
27 struct cpuidle_driver *drv, 27 struct cpuidle_driver *drv,
28 int index) 28 int index)
29{ 29{
30 struct timeval before, after;
31 unsigned long tmp; 30 unsigned long tmp;
32 int idle_time;
33
34 local_irq_disable();
35 do_gettimeofday(&before);
36 31
37 /* Setup PWRCFG to enter idle mode */ 32 /* Setup PWRCFG to enter idle mode */
38 tmp = __raw_readl(S3C64XX_PWR_CFG); 33 tmp = __raw_readl(S3C64XX_PWR_CFG);
@@ -42,42 +37,32 @@ static int s3c64xx_enter_idle(struct cpuidle_device *dev,
42 37
43 cpu_do_idle(); 38 cpu_do_idle();
44 39
45 do_gettimeofday(&after);
46 local_irq_enable();
47 idle_time = (after.tv_sec - before.tv_sec) * USEC_PER_SEC +
48 (after.tv_usec - before.tv_usec);
49
50 dev->last_residency = idle_time;
51 return index; 40 return index;
52} 41}
53 42
54static struct cpuidle_state s3c64xx_cpuidle_set[] = { 43static DEFINE_PER_CPU(struct cpuidle_device, s3c64xx_cpuidle_device);
55 [0] = {
56 .enter = s3c64xx_enter_idle,
57 .exit_latency = 1,
58 .target_residency = 1,
59 .flags = CPUIDLE_FLAG_TIME_VALID,
60 .name = "IDLE",
61 .desc = "System active, ARM gated",
62 },
63};
64 44
65static struct cpuidle_driver s3c64xx_cpuidle_driver = { 45static struct cpuidle_driver s3c64xx_cpuidle_driver = {
66 .name = "s3c64xx_cpuidle", 46 .name = "s3c64xx_cpuidle",
67 .owner = THIS_MODULE, 47 .owner = THIS_MODULE,
68 .state_count = ARRAY_SIZE(s3c64xx_cpuidle_set), 48 .en_core_tk_irqen = 1,
69}; 49 .states = {
70 50 {
71static struct cpuidle_device s3c64xx_cpuidle_device = { 51 .enter = s3c64xx_enter_idle,
72 .state_count = ARRAY_SIZE(s3c64xx_cpuidle_set), 52 .exit_latency = 1,
53 .target_residency = 1,
54 .flags = CPUIDLE_FLAG_TIME_VALID,
55 .name = "IDLE",
56 .desc = "System active, ARM gated",
57 },
58 },
59 .state_count = 1,
73}; 60};
74 61
75static int __init s3c64xx_init_cpuidle(void) 62static int __init s3c64xx_init_cpuidle(void)
76{ 63{
77 int ret; 64 int ret;
78 65
79 memcpy(s3c64xx_cpuidle_driver.states, s3c64xx_cpuidle_set,
80 sizeof(s3c64xx_cpuidle_set));
81 cpuidle_register_driver(&s3c64xx_cpuidle_driver); 66 cpuidle_register_driver(&s3c64xx_cpuidle_driver);
82 67
83 ret = cpuidle_register_device(&s3c64xx_cpuidle_device); 68 ret = cpuidle_register_device(&s3c64xx_cpuidle_device);
diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c
index 314df0518afd..ffa29ddfdfce 100644
--- a/arch/arm/mach-s3c64xx/mach-anw6410.c
+++ b/arch/arm/mach-s3c64xx/mach-anw6410.c
@@ -134,24 +134,27 @@ static struct platform_device anw6410_lcd_powerdev = {
134}; 134};
135 135
136static struct s3c_fb_pd_win anw6410_fb_win0 = { 136static struct s3c_fb_pd_win anw6410_fb_win0 = {
137 /* this is to ensure we use win0 */
138 .win_mode = {
139 .left_margin = 8,
140 .right_margin = 13,
141 .upper_margin = 7,
142 .lower_margin = 5,
143 .hsync_len = 3,
144 .vsync_len = 1,
145 .xres = 800,
146 .yres = 480,
147 },
148 .max_bpp = 32, 137 .max_bpp = 32,
149 .default_bpp = 16, 138 .default_bpp = 16,
139 .xres = 800,
140 .yres = 480,
141};
142
143static struct fb_videomode anw6410_lcd_timing = {
144 .left_margin = 8,
145 .right_margin = 13,
146 .upper_margin = 7,
147 .lower_margin = 5,
148 .hsync_len = 3,
149 .vsync_len = 1,
150 .xres = 800,
151 .yres = 480,
150}; 152};
151 153
152/* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */ 154/* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
153static struct s3c_fb_platdata anw6410_lcd_pdata __initdata = { 155static struct s3c_fb_platdata anw6410_lcd_pdata __initdata = {
154 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, 156 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
157 .vtiming = &anw6410_lcd_timing,
155 .win[0] = &anw6410_fb_win0, 158 .win[0] = &anw6410_fb_win0,
156 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 159 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
157 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, 160 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410-module.c b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
index 0ace108c3e3d..7a27f5603c74 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410-module.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410-module.c
@@ -182,6 +182,11 @@ static const struct i2c_board_info wm1277_devs[] = {
182 }, 182 },
183}; 183};
184 184
185static const struct i2c_board_info wm6230_i2c_devs[] = {
186 { I2C_BOARD_INFO("wm9081", 0x6c),
187 .platform_data = &wm9081_pdata, },
188};
189
185static __devinitdata const struct { 190static __devinitdata const struct {
186 u8 id; 191 u8 id;
187 const char *name; 192 const char *name;
@@ -195,7 +200,9 @@ static __devinitdata const struct {
195 { .id = 0x03, .name = "1252-EV1 Glenlivet" }, 200 { .id = 0x03, .name = "1252-EV1 Glenlivet" },
196 { .id = 0x11, .name = "6249-EV2 Glenfarclas", }, 201 { .id = 0x11, .name = "6249-EV2 Glenfarclas", },
197 { .id = 0x14, .name = "6271-EV1 Lochnagar" }, 202 { .id = 0x14, .name = "6271-EV1 Lochnagar" },
198 { .id = 0x15, .name = "XXXX-EV1 Bells" }, 203 { .id = 0x15, .name = "6320-EV1 Bells",
204 .i2c_devs = wm6230_i2c_devs,
205 .num_i2c_devs = ARRAY_SIZE(wm6230_i2c_devs) },
199 { .id = 0x21, .name = "1275-EV1 Mortlach" }, 206 { .id = 0x21, .name = "1275-EV1 Mortlach" },
200 { .id = 0x25, .name = "1274-EV1 Glencadam" }, 207 { .id = 0x25, .name = "1274-EV1 Glencadam" },
201 { .id = 0x31, .name = "1253-EV1 Tomatin", 208 { .id = 0x31, .name = "1253-EV1 Tomatin",
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index eda5e027b109..d0c352d861f8 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -151,26 +151,29 @@ static struct platform_device crag6410_lcd_powerdev = {
151 151
152/* 640x480 URT */ 152/* 640x480 URT */
153static struct s3c_fb_pd_win crag6410_fb_win0 = { 153static struct s3c_fb_pd_win crag6410_fb_win0 = {
154 /* this is to ensure we use win0 */
155 .win_mode = {
156 .left_margin = 150,
157 .right_margin = 80,
158 .upper_margin = 40,
159 .lower_margin = 5,
160 .hsync_len = 40,
161 .vsync_len = 5,
162 .xres = 640,
163 .yres = 480,
164 },
165 .max_bpp = 32, 154 .max_bpp = 32,
166 .default_bpp = 16, 155 .default_bpp = 16,
156 .xres = 640,
157 .yres = 480,
167 .virtual_y = 480 * 2, 158 .virtual_y = 480 * 2,
168 .virtual_x = 640, 159 .virtual_x = 640,
169}; 160};
170 161
162static struct fb_videomode crag6410_lcd_timing = {
163 .left_margin = 150,
164 .right_margin = 80,
165 .upper_margin = 40,
166 .lower_margin = 5,
167 .hsync_len = 40,
168 .vsync_len = 5,
169 .xres = 640,
170 .yres = 480,
171};
172
171/* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */ 173/* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
172static struct s3c_fb_platdata crag6410_lcd_pdata __initdata = { 174static struct s3c_fb_platdata crag6410_lcd_pdata __initdata = {
173 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, 175 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
176 .vtiming = &crag6410_lcd_timing,
174 .win[0] = &crag6410_fb_win0, 177 .win[0] = &crag6410_fb_win0,
175 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 178 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
176 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, 179 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
@@ -671,6 +674,7 @@ static struct i2c_board_info i2c_devs1[] __initdata = {
671 .irq = S3C_EINT(0), 674 .irq = S3C_EINT(0),
672 .platform_data = &glenfarclas_pmic_pdata }, 675 .platform_data = &glenfarclas_pmic_pdata },
673 676
677 { I2C_BOARD_INFO("wlf-gf-module", 0x22) },
674 { I2C_BOARD_INFO("wlf-gf-module", 0x24) }, 678 { I2C_BOARD_INFO("wlf-gf-module", 0x24) },
675 { I2C_BOARD_INFO("wlf-gf-module", 0x25) }, 679 { I2C_BOARD_INFO("wlf-gf-module", 0x25) },
676 { I2C_BOARD_INFO("wlf-gf-module", 0x26) }, 680 { I2C_BOARD_INFO("wlf-gf-module", 0x26) },
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index 1bf6b9da20fc..689088162f77 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -129,23 +129,27 @@ static struct platform_device hmt_backlight_device = {
129}; 129};
130 130
131static struct s3c_fb_pd_win hmt_fb_win0 = { 131static struct s3c_fb_pd_win hmt_fb_win0 = {
132 .win_mode = {
133 .left_margin = 8,
134 .right_margin = 13,
135 .upper_margin = 7,
136 .lower_margin = 5,
137 .hsync_len = 3,
138 .vsync_len = 1,
139 .xres = 800,
140 .yres = 480,
141 },
142 .max_bpp = 32, 132 .max_bpp = 32,
143 .default_bpp = 16, 133 .default_bpp = 16,
134 .xres = 800,
135 .yres = 480,
136};
137
138static struct fb_videomode hmt_lcd_timing = {
139 .left_margin = 8,
140 .right_margin = 13,
141 .upper_margin = 7,
142 .lower_margin = 5,
143 .hsync_len = 3,
144 .vsync_len = 1,
145 .xres = 800,
146 .yres = 480,
144}; 147};
145 148
146/* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */ 149/* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
147static struct s3c_fb_platdata hmt_lcd_pdata __initdata = { 150static struct s3c_fb_platdata hmt_lcd_pdata __initdata = {
148 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, 151 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
152 .vtiming = &hmt_lcd_timing,
149 .win[0] = &hmt_fb_win0, 153 .win[0] = &hmt_fb_win0,
150 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 154 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
151 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, 155 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index f8ea61ea3b33..5539a255a704 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -140,41 +140,59 @@ static struct s3c2410_platform_nand mini6410_nand_info = {
140 .sets = mini6410_nand_sets, 140 .sets = mini6410_nand_sets,
141}; 141};
142 142
143static struct s3c_fb_pd_win mini6410_fb_win[] = { 143static struct s3c_fb_pd_win mini6410_lcd_type0_fb_win = {
144 .max_bpp = 32,
145 .default_bpp = 16,
146 .xres = 480,
147 .yres = 272,
148};
149
150static struct fb_videomode mini6410_lcd_type0_timing = {
151 /* 4.3" 480x272 */
152 .left_margin = 3,
153 .right_margin = 2,
154 .upper_margin = 1,
155 .lower_margin = 1,
156 .hsync_len = 40,
157 .vsync_len = 1,
158 .xres = 480,
159 .yres = 272,
160};
161
162static struct s3c_fb_pd_win mini6410_lcd_type1_fb_win = {
163 .max_bpp = 32,
164 .default_bpp = 16,
165 .xres = 800,
166 .yres = 480,
167};
168
169static struct fb_videomode mini6410_lcd_type1_timing = {
170 /* 7.0" 800x480 */
171 .left_margin = 8,
172 .right_margin = 13,
173 .upper_margin = 7,
174 .lower_margin = 5,
175 .hsync_len = 3,
176 .vsync_len = 1,
177 .xres = 800,
178 .yres = 480,
179};
180
181static struct s3c_fb_platdata mini6410_lcd_pdata[] __initdata = {
144 { 182 {
145 .win_mode = { /* 4.3" 480x272 */ 183 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
146 .left_margin = 3, 184 .vtiming = &mini6410_lcd_type0_timing,
147 .right_margin = 2, 185 .win[0] = &mini6410_lcd_type0_fb_win,
148 .upper_margin = 1, 186 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
149 .lower_margin = 1, 187 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
150 .hsync_len = 40,
151 .vsync_len = 1,
152 .xres = 480,
153 .yres = 272,
154 },
155 .max_bpp = 32,
156 .default_bpp = 16,
157 }, { 188 }, {
158 .win_mode = { /* 7.0" 800x480 */ 189 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
159 .left_margin = 8, 190 .vtiming = &mini6410_lcd_type1_timing,
160 .right_margin = 13, 191 .win[0] = &mini6410_lcd_type1_fb_win,
161 .upper_margin = 7, 192 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
162 .lower_margin = 5, 193 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
163 .hsync_len = 3,
164 .vsync_len = 1,
165 .xres = 800,
166 .yres = 480,
167 },
168 .max_bpp = 32,
169 .default_bpp = 16,
170 }, 194 },
171}; 195 { },
172
173static struct s3c_fb_platdata mini6410_lcd_pdata __initdata = {
174 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
175 .win[0] = &mini6410_fb_win[0],
176 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
177 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
178}; 196};
179 197
180static void mini6410_lcd_power_set(struct plat_lcd_data *pd, 198static void mini6410_lcd_power_set(struct plat_lcd_data *pd,
@@ -272,7 +290,7 @@ static void mini6410_parse_features(
272 "screen type already set\n", f); 290 "screen type already set\n", f);
273 } else { 291 } else {
274 int li = f - '0'; 292 int li = f - '0';
275 if (li >= ARRAY_SIZE(mini6410_fb_win)) 293 if (li >= ARRAY_SIZE(mini6410_lcd_pdata))
276 printk(KERN_INFO "MINI6410: '%c' out " 294 printk(KERN_INFO "MINI6410: '%c' out "
277 "of range LCD mode\n", f); 295 "of range LCD mode\n", f);
278 else { 296 else {
@@ -296,14 +314,12 @@ static void __init mini6410_machine_init(void)
296 /* Parse the feature string */ 314 /* Parse the feature string */
297 mini6410_parse_features(&features, mini6410_features_str); 315 mini6410_parse_features(&features, mini6410_features_str);
298 316
299 mini6410_lcd_pdata.win[0] = &mini6410_fb_win[features.lcd_index];
300
301 printk(KERN_INFO "MINI6410: selected LCD display is %dx%d\n", 317 printk(KERN_INFO "MINI6410: selected LCD display is %dx%d\n",
302 mini6410_lcd_pdata.win[0]->win_mode.xres, 318 mini6410_lcd_pdata[features.lcd_index].win[0]->xres,
303 mini6410_lcd_pdata.win[0]->win_mode.yres); 319 mini6410_lcd_pdata[features.lcd_index].win[0]->yres);
304 320
305 s3c_nand_set_platdata(&mini6410_nand_info); 321 s3c_nand_set_platdata(&mini6410_nand_info);
306 s3c_fb_set_platdata(&mini6410_lcd_pdata); 322 s3c_fb_set_platdata(&mini6410_lcd_pdata[features.lcd_index]);
307 s3c24xx_ts_set_platdata(NULL); 323 s3c24xx_ts_set_platdata(NULL);
308 324
309 /* configure nCS1 width to 16 bits */ 325 /* configure nCS1 width to 16 bits */
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index b92d8e17d502..326b21604bc3 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -106,41 +106,57 @@ static struct platform_device real6410_device_eth = {
106 }, 106 },
107}; 107};
108 108
109static struct s3c_fb_pd_win real6410_fb_win[] = { 109static struct s3c_fb_pd_win real6410_lcd_type0_fb_win = {
110 .max_bpp = 32,
111 .default_bpp = 16,
112 .xres = 480,
113 .yres = 272,
114};
115
116static struct fb_videomode real6410_lcd_type0_timing = {
117 /* 4.3" 480x272 */
118 .left_margin = 3,
119 .right_margin = 2,
120 .upper_margin = 1,
121 .lower_margin = 1,
122 .hsync_len = 40,
123 .vsync_len = 1,
124};
125
126static struct s3c_fb_pd_win real6410_lcd_type1_fb_win = {
127 .max_bpp = 32,
128 .default_bpp = 16,
129 .xres = 800,
130 .yres = 480,
131};
132
133static struct fb_videomode real6410_lcd_type1_timing = {
134 /* 7.0" 800x480 */
135 .left_margin = 8,
136 .right_margin = 13,
137 .upper_margin = 7,
138 .lower_margin = 5,
139 .hsync_len = 3,
140 .vsync_len = 1,
141 .xres = 800,
142 .yres = 480,
143};
144
145static struct s3c_fb_platdata real6410_lcd_pdata[] __initdata = {
110 { 146 {
111 .win_mode = { /* 4.3" 480x272 */ 147 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
112 .left_margin = 3, 148 .vtiming = &real6410_lcd_type0_timing,
113 .right_margin = 2, 149 .win[0] = &real6410_lcd_type0_fb_win,
114 .upper_margin = 1, 150 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
115 .lower_margin = 1, 151 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
116 .hsync_len = 40,
117 .vsync_len = 1,
118 .xres = 480,
119 .yres = 272,
120 },
121 .max_bpp = 32,
122 .default_bpp = 16,
123 }, { 152 }, {
124 .win_mode = { /* 7.0" 800x480 */ 153 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
125 .left_margin = 8, 154 .vtiming = &real6410_lcd_type1_timing,
126 .right_margin = 13, 155 .win[0] = &real6410_lcd_type1_fb_win,
127 .upper_margin = 7, 156 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
128 .lower_margin = 5, 157 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
129 .hsync_len = 3,
130 .vsync_len = 1,
131 .xres = 800,
132 .yres = 480,
133 },
134 .max_bpp = 32,
135 .default_bpp = 16,
136 }, 158 },
137}; 159 { },
138
139static struct s3c_fb_platdata real6410_lcd_pdata __initdata = {
140 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
141 .win[0] = &real6410_fb_win[0],
142 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
143 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
144}; 160};
145 161
146static struct mtd_partition real6410_nand_part[] = { 162static struct mtd_partition real6410_nand_part[] = {
@@ -253,7 +269,7 @@ static void real6410_parse_features(
253 "screen type already set\n", f); 269 "screen type already set\n", f);
254 } else { 270 } else {
255 int li = f - '0'; 271 int li = f - '0';
256 if (li >= ARRAY_SIZE(real6410_fb_win)) 272 if (li >= ARRAY_SIZE(real6410_lcd_pdata))
257 printk(KERN_INFO "REAL6410: '%c' out " 273 printk(KERN_INFO "REAL6410: '%c' out "
258 "of range LCD mode\n", f); 274 "of range LCD mode\n", f);
259 else { 275 else {
@@ -277,13 +293,11 @@ static void __init real6410_machine_init(void)
277 /* Parse the feature string */ 293 /* Parse the feature string */
278 real6410_parse_features(&features, real6410_features_str); 294 real6410_parse_features(&features, real6410_features_str);
279 295
280 real6410_lcd_pdata.win[0] = &real6410_fb_win[features.lcd_index];
281
282 printk(KERN_INFO "REAL6410: selected LCD display is %dx%d\n", 296 printk(KERN_INFO "REAL6410: selected LCD display is %dx%d\n",
283 real6410_lcd_pdata.win[0]->win_mode.xres, 297 real6410_lcd_pdata[features.lcd_index].win[0]->xres,
284 real6410_lcd_pdata.win[0]->win_mode.yres); 298 real6410_lcd_pdata[features.lcd_index].win[0]->yres);
285 299
286 s3c_fb_set_platdata(&real6410_lcd_pdata); 300 s3c_fb_set_platdata(&real6410_lcd_pdata[features.lcd_index]);
287 s3c_nand_set_platdata(&real6410_nand_info); 301 s3c_nand_set_platdata(&real6410_nand_info);
288 s3c24xx_ts_set_platdata(NULL); 302 s3c24xx_ts_set_platdata(NULL);
289 303
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
index c5021d0335c6..d6266d8b43c9 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq5.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
@@ -108,23 +108,27 @@ static struct platform_device smartq5_buttons_device = {
108}; 108};
109 109
110static struct s3c_fb_pd_win smartq5_fb_win0 = { 110static struct s3c_fb_pd_win smartq5_fb_win0 = {
111 .win_mode = {
112 .left_margin = 216,
113 .right_margin = 40,
114 .upper_margin = 35,
115 .lower_margin = 10,
116 .hsync_len = 1,
117 .vsync_len = 1,
118 .xres = 800,
119 .yres = 480,
120 .refresh = 80,
121 },
122 .max_bpp = 32, 111 .max_bpp = 32,
123 .default_bpp = 16, 112 .default_bpp = 16,
113 .xres = 800,
114 .yres = 480,
115};
116
117static struct fb_videomode smartq5_lcd_timing = {
118 .left_margin = 216,
119 .right_margin = 40,
120 .upper_margin = 35,
121 .lower_margin = 10,
122 .hsync_len = 1,
123 .vsync_len = 1,
124 .xres = 800,
125 .yres = 480,
126 .refresh = 80,
124}; 127};
125 128
126static struct s3c_fb_platdata smartq5_lcd_pdata __initdata = { 129static struct s3c_fb_platdata smartq5_lcd_pdata __initdata = {
127 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, 130 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
131 .vtiming = &smartq5_lcd_timing,
128 .win[0] = &smartq5_fb_win0, 132 .win[0] = &smartq5_fb_win0,
129 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 133 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
130 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC | 134 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
index aa9072a4cbef..0957d2a980e1 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq7.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
@@ -124,23 +124,27 @@ static struct platform_device smartq7_buttons_device = {
124}; 124};
125 125
126static struct s3c_fb_pd_win smartq7_fb_win0 = { 126static struct s3c_fb_pd_win smartq7_fb_win0 = {
127 .win_mode = {
128 .left_margin = 3,
129 .right_margin = 5,
130 .upper_margin = 1,
131 .lower_margin = 20,
132 .hsync_len = 10,
133 .vsync_len = 3,
134 .xres = 800,
135 .yres = 480,
136 .refresh = 80,
137 },
138 .max_bpp = 32, 127 .max_bpp = 32,
139 .default_bpp = 16, 128 .default_bpp = 16,
129 .xres = 800,
130 .yres = 480,
131};
132
133static struct fb_videomode smartq7_lcd_timing = {
134 .left_margin = 3,
135 .right_margin = 5,
136 .upper_margin = 1,
137 .lower_margin = 20,
138 .hsync_len = 10,
139 .vsync_len = 3,
140 .xres = 800,
141 .yres = 480,
142 .refresh = 80,
140}; 143};
141 144
142static struct s3c_fb_platdata smartq7_lcd_pdata __initdata = { 145static struct s3c_fb_platdata smartq7_lcd_pdata __initdata = {
143 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, 146 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
147 .vtiming = &smartq7_lcd_timing,
144 .win[0] = &smartq7_fb_win0, 148 .win[0] = &smartq7_fb_win0,
145 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 149 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
146 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC | 150 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index d44319b09412..df3103d450e2 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -146,26 +146,29 @@ static struct platform_device smdk6410_lcd_powerdev = {
146}; 146};
147 147
148static struct s3c_fb_pd_win smdk6410_fb_win0 = { 148static struct s3c_fb_pd_win smdk6410_fb_win0 = {
149 /* this is to ensure we use win0 */
150 .win_mode = {
151 .left_margin = 8,
152 .right_margin = 13,
153 .upper_margin = 7,
154 .lower_margin = 5,
155 .hsync_len = 3,
156 .vsync_len = 1,
157 .xres = 800,
158 .yres = 480,
159 },
160 .max_bpp = 32, 149 .max_bpp = 32,
161 .default_bpp = 16, 150 .default_bpp = 16,
151 .xres = 800,
152 .yres = 480,
162 .virtual_y = 480 * 2, 153 .virtual_y = 480 * 2,
163 .virtual_x = 800, 154 .virtual_x = 800,
164}; 155};
165 156
157static struct fb_videomode smdk6410_lcd_timing = {
158 .left_margin = 8,
159 .right_margin = 13,
160 .upper_margin = 7,
161 .lower_margin = 5,
162 .hsync_len = 3,
163 .vsync_len = 1,
164 .xres = 800,
165 .yres = 480,
166};
167
166/* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */ 168/* 405566 clocks per frame => 60Hz refresh requires 24333960Hz clock */
167static struct s3c_fb_platdata smdk6410_lcd_pdata __initdata = { 169static struct s3c_fb_platdata smdk6410_lcd_pdata __initdata = {
168 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp, 170 .setup_gpio = s3c64xx_fb_gpio_setup_24bpp,
171 .vtiming = &smdk6410_lcd_timing,
169 .win[0] = &smdk6410_fb_win0, 172 .win[0] = &smdk6410_fb_win0,
170 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 173 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
171 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, 174 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6440.c b/arch/arm/mach-s5p64x0/mach-smdk6440.c
index a40e325d62c8..92fefad505cc 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6440.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6440.c
@@ -103,22 +103,26 @@ static struct s3c2410_uartcfg smdk6440_uartcfgs[] __initdata = {
103 103
104/* Frame Buffer */ 104/* Frame Buffer */
105static struct s3c_fb_pd_win smdk6440_fb_win0 = { 105static struct s3c_fb_pd_win smdk6440_fb_win0 = {
106 .win_mode = {
107 .left_margin = 8,
108 .right_margin = 13,
109 .upper_margin = 7,
110 .lower_margin = 5,
111 .hsync_len = 3,
112 .vsync_len = 1,
113 .xres = 800,
114 .yres = 480,
115 },
116 .max_bpp = 32, 106 .max_bpp = 32,
117 .default_bpp = 24, 107 .default_bpp = 24,
108 .xres = 800,
109 .yres = 480,
110};
111
112static struct fb_videomode smdk6440_lcd_timing = {
113 .left_margin = 8,
114 .right_margin = 13,
115 .upper_margin = 7,
116 .lower_margin = 5,
117 .hsync_len = 3,
118 .vsync_len = 1,
119 .xres = 800,
120 .yres = 480,
118}; 121};
119 122
120static struct s3c_fb_platdata smdk6440_lcd_pdata __initdata = { 123static struct s3c_fb_platdata smdk6440_lcd_pdata __initdata = {
121 .win[0] = &smdk6440_fb_win0, 124 .win[0] = &smdk6440_fb_win0,
125 .vtiming = &smdk6440_lcd_timing,
122 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 126 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
123 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, 127 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
124 .setup_gpio = s5p64x0_fb_gpio_setup_24bpp, 128 .setup_gpio = s5p64x0_fb_gpio_setup_24bpp,
diff --git a/arch/arm/mach-s5p64x0/mach-smdk6450.c b/arch/arm/mach-s5p64x0/mach-smdk6450.c
index efb69e2f2afe..e2335ecf6eae 100644
--- a/arch/arm/mach-s5p64x0/mach-smdk6450.c
+++ b/arch/arm/mach-s5p64x0/mach-smdk6450.c
@@ -121,22 +121,26 @@ static struct s3c2410_uartcfg smdk6450_uartcfgs[] __initdata = {
121 121
122/* Frame Buffer */ 122/* Frame Buffer */
123static struct s3c_fb_pd_win smdk6450_fb_win0 = { 123static struct s3c_fb_pd_win smdk6450_fb_win0 = {
124 .win_mode = {
125 .left_margin = 8,
126 .right_margin = 13,
127 .upper_margin = 7,
128 .lower_margin = 5,
129 .hsync_len = 3,
130 .vsync_len = 1,
131 .xres = 800,
132 .yres = 480,
133 },
134 .max_bpp = 32, 124 .max_bpp = 32,
135 .default_bpp = 24, 125 .default_bpp = 24,
126 .xres = 800,
127 .yres = 480,
128};
129
130static struct fb_videomode smdk6450_lcd_timing = {
131 .left_margin = 8,
132 .right_margin = 13,
133 .upper_margin = 7,
134 .lower_margin = 5,
135 .hsync_len = 3,
136 .vsync_len = 1,
137 .xres = 800,
138 .yres = 480,
136}; 139};
137 140
138static struct s3c_fb_platdata smdk6450_lcd_pdata __initdata = { 141static struct s3c_fb_platdata smdk6450_lcd_pdata __initdata = {
139 .win[0] = &smdk6450_fb_win0, 142 .win[0] = &smdk6450_fb_win0,
143 .vtiming = &smdk6450_lcd_timing,
140 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 144 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
141 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, 145 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
142 .setup_gpio = s5p64x0_fb_gpio_setup_24bpp, 146 .setup_gpio = s5p64x0_fb_gpio_setup_24bpp,
diff --git a/arch/arm/mach-s5pc100/mach-smdkc100.c b/arch/arm/mach-s5pc100/mach-smdkc100.c
index 674d22992f3c..0c3ae38d27ca 100644
--- a/arch/arm/mach-s5pc100/mach-smdkc100.c
+++ b/arch/arm/mach-s5pc100/mach-smdkc100.c
@@ -136,24 +136,27 @@ static struct platform_device smdkc100_lcd_powerdev = {
136 136
137/* Frame Buffer */ 137/* Frame Buffer */
138static struct s3c_fb_pd_win smdkc100_fb_win0 = { 138static struct s3c_fb_pd_win smdkc100_fb_win0 = {
139 /* this is to ensure we use win0 */
140 .win_mode = {
141 .left_margin = 8,
142 .right_margin = 13,
143 .upper_margin = 7,
144 .lower_margin = 5,
145 .hsync_len = 3,
146 .vsync_len = 1,
147 .xres = 800,
148 .yres = 480,
149 .refresh = 80,
150 },
151 .max_bpp = 32, 139 .max_bpp = 32,
152 .default_bpp = 16, 140 .default_bpp = 16,
141 .xres = 800,
142 .yres = 480,
143};
144
145static struct fb_videomode smdkc100_lcd_timing = {
146 .left_margin = 8,
147 .right_margin = 13,
148 .upper_margin = 7,
149 .lower_margin = 5,
150 .hsync_len = 3,
151 .vsync_len = 1,
152 .xres = 800,
153 .yres = 480,
154 .refresh = 80,
153}; 155};
154 156
155static struct s3c_fb_platdata smdkc100_lcd_pdata __initdata = { 157static struct s3c_fb_platdata smdkc100_lcd_pdata __initdata = {
156 .win[0] = &smdkc100_fb_win0, 158 .win[0] = &smdkc100_fb_win0,
159 .vtiming = &smdkc100_lcd_timing,
157 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 160 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
158 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, 161 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
159 .setup_gpio = s5pc100_fb_gpio_setup_24bpp, 162 .setup_gpio = s5pc100_fb_gpio_setup_24bpp,
diff --git a/arch/arm/mach-s5pv210/mach-aquila.c b/arch/arm/mach-s5pv210/mach-aquila.c
index 48d018f2332b..af528f9e97f9 100644
--- a/arch/arm/mach-s5pv210/mach-aquila.c
+++ b/arch/arm/mach-s5pv210/mach-aquila.c
@@ -96,38 +96,34 @@ static struct s3c2410_uartcfg aquila_uartcfgs[] __initdata = {
96 96
97/* Frame Buffer */ 97/* Frame Buffer */
98static struct s3c_fb_pd_win aquila_fb_win0 = { 98static struct s3c_fb_pd_win aquila_fb_win0 = {
99 .win_mode = {
100 .left_margin = 16,
101 .right_margin = 16,
102 .upper_margin = 3,
103 .lower_margin = 28,
104 .hsync_len = 2,
105 .vsync_len = 2,
106 .xres = 480,
107 .yres = 800,
108 },
109 .max_bpp = 32, 99 .max_bpp = 32,
110 .default_bpp = 16, 100 .default_bpp = 16,
101 .xres = 480,
102 .yres = 800,
111}; 103};
112 104
113static struct s3c_fb_pd_win aquila_fb_win1 = { 105static struct s3c_fb_pd_win aquila_fb_win1 = {
114 .win_mode = {
115 .left_margin = 16,
116 .right_margin = 16,
117 .upper_margin = 3,
118 .lower_margin = 28,
119 .hsync_len = 2,
120 .vsync_len = 2,
121 .xres = 480,
122 .yres = 800,
123 },
124 .max_bpp = 32, 106 .max_bpp = 32,
125 .default_bpp = 16, 107 .default_bpp = 16,
108 .xres = 480,
109 .yres = 800,
110};
111
112static struct fb_videomode aquila_lcd_timing = {
113 .left_margin = 16,
114 .right_margin = 16,
115 .upper_margin = 3,
116 .lower_margin = 28,
117 .hsync_len = 2,
118 .vsync_len = 2,
119 .xres = 480,
120 .yres = 800,
126}; 121};
127 122
128static struct s3c_fb_platdata aquila_lcd_pdata __initdata = { 123static struct s3c_fb_platdata aquila_lcd_pdata __initdata = {
129 .win[0] = &aquila_fb_win0, 124 .win[0] = &aquila_fb_win0,
130 .win[1] = &aquila_fb_win1, 125 .win[1] = &aquila_fb_win1,
126 .vtiming = &aquila_lcd_timing,
131 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 127 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
132 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC | 128 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC |
133 VIDCON1_INV_VCLK | VIDCON1_INV_VDEN, 129 VIDCON1_INV_VCLK | VIDCON1_INV_VDEN,
diff --git a/arch/arm/mach-s5pv210/mach-goni.c b/arch/arm/mach-s5pv210/mach-goni.c
index f20a97c8e411..bf5087c2b7fe 100644
--- a/arch/arm/mach-s5pv210/mach-goni.c
+++ b/arch/arm/mach-s5pv210/mach-goni.c
@@ -107,25 +107,29 @@ static struct s3c2410_uartcfg goni_uartcfgs[] __initdata = {
107 107
108/* Frame Buffer */ 108/* Frame Buffer */
109static struct s3c_fb_pd_win goni_fb_win0 = { 109static struct s3c_fb_pd_win goni_fb_win0 = {
110 .win_mode = {
111 .left_margin = 16,
112 .right_margin = 16,
113 .upper_margin = 2,
114 .lower_margin = 28,
115 .hsync_len = 2,
116 .vsync_len = 1,
117 .xres = 480,
118 .yres = 800,
119 .refresh = 55,
120 },
121 .max_bpp = 32, 110 .max_bpp = 32,
122 .default_bpp = 16, 111 .default_bpp = 16,
112 .xres = 480,
113 .yres = 800,
123 .virtual_x = 480, 114 .virtual_x = 480,
124 .virtual_y = 2 * 800, 115 .virtual_y = 2 * 800,
125}; 116};
126 117
118static struct fb_videomode goni_lcd_timing = {
119 .left_margin = 16,
120 .right_margin = 16,
121 .upper_margin = 2,
122 .lower_margin = 28,
123 .hsync_len = 2,
124 .vsync_len = 1,
125 .xres = 480,
126 .yres = 800,
127 .refresh = 55,
128};
129
127static struct s3c_fb_platdata goni_lcd_pdata __initdata = { 130static struct s3c_fb_platdata goni_lcd_pdata __initdata = {
128 .win[0] = &goni_fb_win0, 131 .win[0] = &goni_fb_win0,
132 .vtiming = &goni_lcd_timing,
129 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB | 133 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB |
130 VIDCON0_CLKSEL_LCD, 134 VIDCON0_CLKSEL_LCD,
131 .vidcon1 = VIDCON1_INV_VCLK | VIDCON1_INV_VDEN 135 .vidcon1 = VIDCON1_INV_VCLK | VIDCON1_INV_VDEN
diff --git a/arch/arm/mach-s5pv210/mach-smdkv210.c b/arch/arm/mach-s5pv210/mach-smdkv210.c
index fa1b61209fd9..0d7ddec88eb7 100644
--- a/arch/arm/mach-s5pv210/mach-smdkv210.c
+++ b/arch/arm/mach-s5pv210/mach-smdkv210.c
@@ -178,22 +178,26 @@ static struct platform_device smdkv210_lcd_lte480wv = {
178}; 178};
179 179
180static struct s3c_fb_pd_win smdkv210_fb_win0 = { 180static struct s3c_fb_pd_win smdkv210_fb_win0 = {
181 .win_mode = {
182 .left_margin = 13,
183 .right_margin = 8,
184 .upper_margin = 7,
185 .lower_margin = 5,
186 .hsync_len = 3,
187 .vsync_len = 1,
188 .xres = 800,
189 .yres = 480,
190 },
191 .max_bpp = 32, 181 .max_bpp = 32,
192 .default_bpp = 24, 182 .default_bpp = 24,
183 .xres = 800,
184 .yres = 480,
185};
186
187static struct fb_videomode smdkv210_lcd_timing = {
188 .left_margin = 13,
189 .right_margin = 8,
190 .upper_margin = 7,
191 .lower_margin = 5,
192 .hsync_len = 3,
193 .vsync_len = 1,
194 .xres = 800,
195 .yres = 480,
193}; 196};
194 197
195static struct s3c_fb_platdata smdkv210_lcd0_pdata __initdata = { 198static struct s3c_fb_platdata smdkv210_lcd0_pdata __initdata = {
196 .win[0] = &smdkv210_fb_win0, 199 .win[0] = &smdkv210_fb_win0,
200 .vtiming = &smdkv210_lcd_timing,
197 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, 201 .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB,
198 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, 202 .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC,
199 .setup_gpio = s5pv210_fb_gpio_setup_24bpp, 203 .setup_gpio = s5pv210_fb_gpio_setup_24bpp,
diff --git a/arch/arm/mach-shmobile/Kconfig b/arch/arm/mach-shmobile/Kconfig
index f31383c32f9c..df33909205e2 100644
--- a/arch/arm/mach-shmobile/Kconfig
+++ b/arch/arm/mach-shmobile/Kconfig
@@ -186,6 +186,12 @@ config SH_TIMER_TMU
186 help 186 help
187 This enables build of the TMU timer driver. 187 This enables build of the TMU timer driver.
188 188
189config EM_TIMER_STI
190 bool "STI timer driver"
191 default y
192 help
193 This enables build of the STI timer driver.
194
189endmenu 195endmenu
190 196
191config SH_CLK_CPG 197config SH_CLK_CPG
diff --git a/arch/arm/mach-spear13xx/include/mach/debug-macro.S b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
index ea1564609bd4..9e3ae6bfe50d 100644
--- a/arch/arm/mach-spear13xx/include/mach/debug-macro.S
+++ b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
@@ -4,7 +4,7 @@
4 * Debugging macro include header spear13xx machine family 4 * Debugging macro include header spear13xx machine family
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/dma.h b/arch/arm/mach-spear13xx/include/mach/dma.h
index 383ab04dc6c9..d50bdb605925 100644
--- a/arch/arm/mach-spear13xx/include/mach/dma.h
+++ b/arch/arm/mach-spear13xx/include/mach/dma.h
@@ -4,7 +4,7 @@
4 * DMA information for SPEAr13xx machine family 4 * DMA information for SPEAr13xx machine family
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/generic.h b/arch/arm/mach-spear13xx/include/mach/generic.h
index 6d8c45b9f298..dac57fd0cdfd 100644
--- a/arch/arm/mach-spear13xx/include/mach/generic.h
+++ b/arch/arm/mach-spear13xx/include/mach/generic.h
@@ -4,7 +4,7 @@
4 * spear13xx machine family generic header file 4 * spear13xx machine family generic header file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/gpio.h b/arch/arm/mach-spear13xx/include/mach/gpio.h
index cd6f4f86a56b..85f176311f63 100644
--- a/arch/arm/mach-spear13xx/include/mach/gpio.h
+++ b/arch/arm/mach-spear13xx/include/mach/gpio.h
@@ -4,7 +4,7 @@
4 * GPIO macros for SPEAr13xx machine family 4 * GPIO macros for SPEAr13xx machine family
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/irqs.h b/arch/arm/mach-spear13xx/include/mach/irqs.h
index f542a24aa5f2..271a62b4cd31 100644
--- a/arch/arm/mach-spear13xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear13xx/include/mach/irqs.h
@@ -4,7 +4,7 @@
4 * IRQ helper macros for spear13xx machine family 4 * IRQ helper macros for spear13xx machine family
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/spear.h b/arch/arm/mach-spear13xx/include/mach/spear.h
index 30c57ef72686..65f27def239b 100644
--- a/arch/arm/mach-spear13xx/include/mach/spear.h
+++ b/arch/arm/mach-spear13xx/include/mach/spear.h
@@ -4,7 +4,7 @@
4 * spear13xx Machine family specific definition 4 * spear13xx Machine family specific definition
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/timex.h b/arch/arm/mach-spear13xx/include/mach/timex.h
index 31af3e8d976e..3a58b8284a6a 100644
--- a/arch/arm/mach-spear13xx/include/mach/timex.h
+++ b/arch/arm/mach-spear13xx/include/mach/timex.h
@@ -4,7 +4,7 @@
4 * SPEAr3XX machine family specific timex definitions 4 * SPEAr3XX machine family specific timex definitions
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/include/mach/uncompress.h b/arch/arm/mach-spear13xx/include/mach/uncompress.h
index c7840896ae6e..70fe72f05dea 100644
--- a/arch/arm/mach-spear13xx/include/mach/uncompress.h
+++ b/arch/arm/mach-spear13xx/include/mach/uncompress.h
@@ -4,7 +4,7 @@
4 * Serial port stubs for kernel decompress status messages 4 * Serial port stubs for kernel decompress status messages
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/spear1310.c b/arch/arm/mach-spear13xx/spear1310.c
index fefd15b2f380..732d29bc7330 100644
--- a/arch/arm/mach-spear13xx/spear1310.c
+++ b/arch/arm/mach-spear13xx/spear1310.c
@@ -4,7 +4,7 @@
4 * SPEAr1310 machine source file 4 * SPEAr1310 machine source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/spear1340.c b/arch/arm/mach-spear13xx/spear1340.c
index ee38cbc56869..81e4ed76ad06 100644
--- a/arch/arm/mach-spear13xx/spear1340.c
+++ b/arch/arm/mach-spear13xx/spear1340.c
@@ -4,7 +4,7 @@
4 * SPEAr1340 machine source file 4 * SPEAr1340 machine source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear13xx/spear13xx.c b/arch/arm/mach-spear13xx/spear13xx.c
index 50b349ae863d..cf936b106e27 100644
--- a/arch/arm/mach-spear13xx/spear13xx.c
+++ b/arch/arm/mach-spear13xx/spear13xx.c
@@ -4,7 +4,7 @@
4 * SPEAr13XX machines common source file 4 * SPEAr13XX machines common source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/debug-macro.S b/arch/arm/mach-spear3xx/include/mach/debug-macro.S
index 590519f10d6e..0a6381fad5d9 100644
--- a/arch/arm/mach-spear3xx/include/mach/debug-macro.S
+++ b/arch/arm/mach-spear3xx/include/mach/debug-macro.S
@@ -4,7 +4,7 @@
4 * Debugging macro include header spear3xx machine family 4 * Debugging macro include header spear3xx machine family
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar<viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/generic.h b/arch/arm/mach-spear3xx/include/mach/generic.h
index 4a95b9453c2a..ce19113ca791 100644
--- a/arch/arm/mach-spear3xx/include/mach/generic.h
+++ b/arch/arm/mach-spear3xx/include/mach/generic.h
@@ -4,7 +4,7 @@
4 * SPEAr3XX machine family generic header file 4 * SPEAr3XX machine family generic header file
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar<viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/gpio.h b/arch/arm/mach-spear3xx/include/mach/gpio.h
index 451b2081bfc9..2ac74c6db7f1 100644
--- a/arch/arm/mach-spear3xx/include/mach/gpio.h
+++ b/arch/arm/mach-spear3xx/include/mach/gpio.h
@@ -4,7 +4,7 @@
4 * GPIO macros for SPEAr3xx machine family 4 * GPIO macros for SPEAr3xx machine family
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar<viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/irqs.h b/arch/arm/mach-spear3xx/include/mach/irqs.h
index 51bd62a0254c..803de76f5f36 100644
--- a/arch/arm/mach-spear3xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear3xx/include/mach/irqs.h
@@ -4,7 +4,7 @@
4 * IRQ helper macros for SPEAr3xx machine family 4 * IRQ helper macros for SPEAr3xx machine family
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/misc_regs.h b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
index 18e2ac576f25..6309bf68d6f8 100644
--- a/arch/arm/mach-spear3xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
@@ -4,7 +4,7 @@
4 * Miscellaneous registers definitions for SPEAr3xx machine family 4 * Miscellaneous registers definitions for SPEAr3xx machine family
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/spear.h b/arch/arm/mach-spear3xx/include/mach/spear.h
index 51eb953148a9..8cca95193d4d 100644
--- a/arch/arm/mach-spear3xx/include/mach/spear.h
+++ b/arch/arm/mach-spear3xx/include/mach/spear.h
@@ -4,7 +4,7 @@
4 * SPEAr3xx Machine family specific definition 4 * SPEAr3xx Machine family specific definition
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/timex.h b/arch/arm/mach-spear3xx/include/mach/timex.h
index a38cc9de876f..9f5d08bd0c44 100644
--- a/arch/arm/mach-spear3xx/include/mach/timex.h
+++ b/arch/arm/mach-spear3xx/include/mach/timex.h
@@ -4,7 +4,7 @@
4 * SPEAr3XX machine family specific timex definitions 4 * SPEAr3XX machine family specific timex definitions
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/include/mach/uncompress.h b/arch/arm/mach-spear3xx/include/mach/uncompress.h
index 53ba8bbc0dfa..b909b011f7c8 100644
--- a/arch/arm/mach-spear3xx/include/mach/uncompress.h
+++ b/arch/arm/mach-spear3xx/include/mach/uncompress.h
@@ -4,7 +4,7 @@
4 * Serial port stubs for kernel decompress status messages 4 * Serial port stubs for kernel decompress status messages
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index f74a05bdb829..0f882ecb7d81 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -4,7 +4,7 @@
4 * SPEAr300 machine source file 4 * SPEAr300 machine source file
5 * 5 *
6 * Copyright (C) 2009-2012 ST Microelectronics 6 * Copyright (C) 2009-2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index 84dfb0900747..bbcf4571d361 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -4,7 +4,7 @@
4 * SPEAr310 machine source file 4 * SPEAr310 machine source file
5 * 5 *
6 * Copyright (C) 2009-2012 ST Microelectronics 6 * Copyright (C) 2009-2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index a88fa841d29d..88d483bcd66a 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -4,7 +4,7 @@
4 * SPEAr320 machine source file 4 * SPEAr320 machine source file
5 * 5 *
6 * Copyright (C) 2009-2012 ST Microelectronics 6 * Copyright (C) 2009-2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index f22419ed74a8..0f41bd1c47c3 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -4,7 +4,7 @@
4 * SPEAr3XX machines common source file 4 * SPEAr3XX machines common source file
5 * 5 *
6 * Copyright (C) 2009-2012 ST Microelectronics 6 * Copyright (C) 2009-2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear6xx/include/mach/gpio.h b/arch/arm/mach-spear6xx/include/mach/gpio.h
index 3a789dbb69f7..d42cefc0356d 100644
--- a/arch/arm/mach-spear6xx/include/mach/gpio.h
+++ b/arch/arm/mach-spear6xx/include/mach/gpio.h
@@ -4,7 +4,7 @@
4 * GPIO macros for SPEAr6xx machine family 4 * GPIO macros for SPEAr6xx machine family
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-spear6xx/include/mach/misc_regs.h b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
index 179e45774b3a..c34acc201d34 100644
--- a/arch/arm/mach-spear6xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
@@ -4,7 +4,7 @@
4 * Miscellaneous registers definitions for SPEAr6xx machine family 4 * Miscellaneous registers definitions for SPEAr6xx machine family
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/mach-tegra/reset.c b/arch/arm/mach-tegra/reset.c
index 4d6a2ee99c3b..5beb7ebe2948 100644
--- a/arch/arm/mach-tegra/reset.c
+++ b/arch/arm/mach-tegra/reset.c
@@ -33,7 +33,7 @@
33 33
34static bool is_enabled; 34static bool is_enabled;
35 35
36static void tegra_cpu_reset_handler_enable(void) 36static void __init tegra_cpu_reset_handler_enable(void)
37{ 37{
38 void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_RESET_BASE); 38 void __iomem *iram_base = IO_ADDRESS(TEGRA_IRAM_RESET_BASE);
39 void __iomem *evp_cpu_reset = 39 void __iomem *evp_cpu_reset =
diff --git a/arch/arm/mach-ux500/board-mop500-uib.c b/arch/arm/mach-ux500/board-mop500-uib.c
index b29a788f498c..1f47d962e3a1 100644
--- a/arch/arm/mach-ux500/board-mop500-uib.c
+++ b/arch/arm/mach-ux500/board-mop500-uib.c
@@ -96,7 +96,7 @@ static void __init __mop500_uib_init(struct uib *uib, const char *why)
96/* 96/*
97 * Detect the UIB attached based on the presence or absence of i2c devices. 97 * Detect the UIB attached based on the presence or absence of i2c devices.
98 */ 98 */
99static int __init mop500_uib_init(void) 99int __init mop500_uib_init(void)
100{ 100{
101 struct uib *uib = mop500_uib; 101 struct uib *uib = mop500_uib;
102 struct i2c_adapter *i2c0; 102 struct i2c_adapter *i2c0;
@@ -131,5 +131,3 @@ static int __init mop500_uib_init(void)
131 131
132 return 0; 132 return 0;
133} 133}
134
135module_init(mop500_uib_init);
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index fba8adea421e..1509a3cb5833 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -580,43 +580,12 @@ static void ux500_uart0_reset(void)
580 udelay(1); 580 udelay(1);
581} 581}
582 582
583/* This needs to be referenced by callbacks */
584struct pinctrl *u0_p;
585struct pinctrl_state *u0_def;
586struct pinctrl_state *u0_sleep;
587
588static void ux500_uart0_init(void)
589{
590 int ret;
591
592 if (IS_ERR(u0_p) || IS_ERR(u0_def))
593 return;
594
595 ret = pinctrl_select_state(u0_p, u0_def);
596 if (ret)
597 pr_err("could not set UART0 defstate\n");
598}
599
600static void ux500_uart0_exit(void)
601{
602 int ret;
603
604 if (IS_ERR(u0_p) || IS_ERR(u0_sleep))
605 return;
606
607 ret = pinctrl_select_state(u0_p, u0_sleep);
608 if (ret)
609 pr_err("could not set UART0 idlestate\n");
610}
611
612static struct amba_pl011_data uart0_plat = { 583static struct amba_pl011_data uart0_plat = {
613#ifdef CONFIG_STE_DMA40 584#ifdef CONFIG_STE_DMA40
614 .dma_filter = stedma40_filter, 585 .dma_filter = stedma40_filter,
615 .dma_rx_param = &uart0_dma_cfg_rx, 586 .dma_rx_param = &uart0_dma_cfg_rx,
616 .dma_tx_param = &uart0_dma_cfg_tx, 587 .dma_tx_param = &uart0_dma_cfg_tx,
617#endif 588#endif
618 .init = ux500_uart0_init,
619 .exit = ux500_uart0_exit,
620 .reset = ux500_uart0_reset, 589 .reset = ux500_uart0_reset,
621}; 590};
622 591
@@ -638,28 +607,7 @@ static struct amba_pl011_data uart2_plat = {
638 607
639static void __init mop500_uart_init(struct device *parent) 608static void __init mop500_uart_init(struct device *parent)
640{ 609{
641 struct amba_device *uart0_device; 610 db8500_add_uart0(parent, &uart0_plat);
642
643 uart0_device = db8500_add_uart0(parent, &uart0_plat);
644 if (uart0_device) {
645 u0_p = pinctrl_get(&uart0_device->dev);
646 if (IS_ERR(u0_p))
647 dev_err(&uart0_device->dev,
648 "could not get UART0 pinctrl\n");
649 else {
650 u0_def = pinctrl_lookup_state(u0_p,
651 PINCTRL_STATE_DEFAULT);
652 if (IS_ERR(u0_def)) {
653 dev_err(&uart0_device->dev,
654 "could not get UART0 defstate\n");
655 }
656 u0_sleep = pinctrl_lookup_state(u0_p,
657 PINCTRL_STATE_SLEEP);
658 if (IS_ERR(u0_sleep))
659 dev_err(&uart0_device->dev,
660 "could not get UART0 idlestate\n");
661 }
662 }
663 db8500_add_uart1(parent, &uart1_plat); 611 db8500_add_uart1(parent, &uart1_plat);
664 db8500_add_uart2(parent, &uart2_plat); 612 db8500_add_uart2(parent, &uart2_plat);
665} 613}
@@ -673,9 +621,15 @@ static void __init u8500_cryp1_hash1_init(struct device *parent)
673static struct platform_device *snowball_platform_devs[] __initdata = { 621static struct platform_device *snowball_platform_devs[] __initdata = {
674 &snowball_led_dev, 622 &snowball_led_dev,
675 &snowball_key_dev, 623 &snowball_key_dev,
624 &snowball_sbnet_dev,
676 &ab8500_device, 625 &ab8500_device,
677}; 626};
678 627
628static struct platform_device *snowball_of_platform_devs[] __initdata = {
629 &snowball_led_dev,
630 &snowball_key_dev,
631};
632
679static void __init mop500_init_machine(void) 633static void __init mop500_init_machine(void)
680{ 634{
681 struct device *parent = NULL; 635 struct device *parent = NULL;
@@ -710,6 +664,8 @@ static void __init mop500_init_machine(void)
710 664
711 /* This board has full regulator constraints */ 665 /* This board has full regulator constraints */
712 regulator_has_full_constraints(); 666 regulator_has_full_constraints();
667
668 mop500_uib_init();
713} 669}
714 670
715static void __init snowball_init_machine(void) 671static void __init snowball_init_machine(void)
@@ -774,6 +730,8 @@ static void __init hrefv60_init_machine(void)
774 730
775 /* This board has full regulator constraints */ 731 /* This board has full regulator constraints */
776 regulator_has_full_constraints(); 732 regulator_has_full_constraints();
733
734 mop500_uib_init();
777} 735}
778 736
779MACHINE_START(U8500, "ST-Ericsson MOP500 platform") 737MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
@@ -834,6 +792,10 @@ struct of_dev_auxdata u8500_auxdata_lookup[] __initdata = {
834static const struct of_device_id u8500_local_bus_nodes[] = { 792static const struct of_device_id u8500_local_bus_nodes[] = {
835 /* only create devices below soc node */ 793 /* only create devices below soc node */
836 { .compatible = "stericsson,db8500", }, 794 { .compatible = "stericsson,db8500", },
795 { .compatible = "stericsson,db8500-prcmu", },
796 { .compatible = "stericsson,db8500-prcmu-regulator", },
797 { .compatible = "stericsson,ab8500", },
798 { .compatible = "stericsson,ab8500-regulator", },
837 { .compatible = "simple-bus"}, 799 { .compatible = "simple-bus"},
838 { }, 800 { },
839}; 801};
@@ -852,7 +814,7 @@ static void __init u8500_init_machine(void)
852 else if (of_machine_is_compatible("st-ericsson,hrefv60+")) 814 else if (of_machine_is_compatible("st-ericsson,hrefv60+"))
853 hrefv60_pinmaps_init(); 815 hrefv60_pinmaps_init();
854 816
855 parent = u8500_init_devices(); 817 parent = u8500_of_init_devices();
856 818
857 for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++) 819 for (i = 0; i < ARRAY_SIZE(mop500_platform_devs); i++)
858 mop500_platform_devs[i]->dev.parent = parent; 820 mop500_platform_devs[i]->dev.parent = parent;
@@ -869,15 +831,23 @@ static void __init u8500_init_machine(void)
869 ARRAY_SIZE(mop500_platform_devs)); 831 ARRAY_SIZE(mop500_platform_devs));
870 832
871 mop500_sdi_init(parent); 833 mop500_sdi_init(parent);
872
873 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices); 834 i2c0_devs = ARRAY_SIZE(mop500_i2c0_devices);
874 i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); 835 i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
875 i2c_register_board_info(2, mop500_i2c2_devices, 836 i2c_register_board_info(2, mop500_i2c2_devices,
876 ARRAY_SIZE(mop500_i2c2_devices)); 837 ARRAY_SIZE(mop500_i2c2_devices));
877 838
839 mop500_uib_init();
840
878 } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) { 841 } else if (of_machine_is_compatible("calaosystems,snowball-a9500")) {
879 platform_add_devices(snowball_platform_devs, 842 /*
880 ARRAY_SIZE(snowball_platform_devs)); 843 * Devices to be DT:ed:
844 * snowball_led_dev = todo
845 * snowball_key_dev = todo
846 * snowball_sbnet_dev = done
847 * ab8500_device = done
848 */
849 platform_add_devices(snowball_of_platform_devs,
850 ARRAY_SIZE(snowball_of_platform_devs));
881 851
882 snowball_sdi_init(parent); 852 snowball_sdi_init(parent);
883 } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) { 853 } else if (of_machine_is_compatible("st-ericsson,hrefv60+")) {
@@ -898,6 +868,8 @@ static void __init u8500_init_machine(void)
898 i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs); 868 i2c_register_board_info(0, mop500_i2c0_devices, i2c0_devs);
899 i2c_register_board_info(2, mop500_i2c2_devices, 869 i2c_register_board_info(2, mop500_i2c2_devices,
900 ARRAY_SIZE(mop500_i2c2_devices)); 870 ARRAY_SIZE(mop500_i2c2_devices));
871
872 mop500_uib_init();
901 } 873 }
902 mop500_i2c_init(parent); 874 mop500_i2c_init(parent);
903 875
diff --git a/arch/arm/mach-ux500/board-mop500.h b/arch/arm/mach-ux500/board-mop500.h
index bc44c07c71a9..2f87b25a908a 100644
--- a/arch/arm/mach-ux500/board-mop500.h
+++ b/arch/arm/mach-ux500/board-mop500.h
@@ -89,7 +89,11 @@ void __init mop500_pinmaps_init(void);
89void __init snowball_pinmaps_init(void); 89void __init snowball_pinmaps_init(void);
90void __init hrefv60_pinmaps_init(void); 90void __init hrefv60_pinmaps_init(void);
91 91
92int __init mop500_uib_init(void);
92void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info, 93void mop500_uib_i2c_add(int busnum, struct i2c_board_info *info,
93 unsigned n); 94 unsigned n);
94 95
96/* TODO: Once all pieces are DT:ed, remove completely. */
97struct device * __init u8500_of_init_devices(void);
98
95#endif 99#endif
diff --git a/arch/arm/mach-ux500/cpu-db8500.c b/arch/arm/mach-ux500/cpu-db8500.c
index 16169c4bf6ca..33275eb4c689 100644
--- a/arch/arm/mach-ux500/cpu-db8500.c
+++ b/arch/arm/mach-ux500/cpu-db8500.c
@@ -140,7 +140,6 @@ static struct platform_device *platform_devs[] __initdata = {
140static struct platform_device *of_platform_devs[] __initdata = { 140static struct platform_device *of_platform_devs[] __initdata = {
141 &u8500_dma40_device, 141 &u8500_dma40_device,
142 &db8500_pmu_device, 142 &db8500_pmu_device,
143 &db8500_prcmu_device,
144}; 143};
145 144
146static resource_size_t __initdata db8500_gpio_base[] = { 145static resource_size_t __initdata db8500_gpio_base[] = {
@@ -222,6 +221,28 @@ struct device * __init u8500_init_devices(void)
222 platform_device_register_data(parent, 221 platform_device_register_data(parent,
223 "cpufreq-u8500", -1, NULL, 0); 222 "cpufreq-u8500", -1, NULL, 0);
224 223
224 for (i = 0; i < ARRAY_SIZE(platform_devs); i++)
225 platform_devs[i]->dev.parent = parent;
226
227 platform_add_devices(platform_devs, ARRAY_SIZE(platform_devs));
228
229 return parent;
230}
231
232/* TODO: Once all pieces are DT:ed, remove completely. */
233struct device * __init u8500_of_init_devices(void)
234{
235 struct device *parent;
236 int i;
237
238 parent = db8500_soc_device_init();
239
240 db8500_add_rtc(parent);
241 db8500_add_usb(parent, usb_db8500_rx_dma_cfg, usb_db8500_tx_dma_cfg);
242
243 platform_device_register_data(parent,
244 "cpufreq-u8500", -1, NULL, 0);
245
225 for (i = 0; i < ARRAY_SIZE(of_platform_devs); i++) 246 for (i = 0; i < ARRAY_SIZE(of_platform_devs); i++)
226 of_platform_devs[i]->dev.parent = parent; 247 of_platform_devs[i]->dev.parent = parent;
227 248
@@ -229,7 +250,7 @@ struct device * __init u8500_init_devices(void)
229 * Devices to be DT:ed: 250 * Devices to be DT:ed:
230 * u8500_dma40_device = todo 251 * u8500_dma40_device = todo
231 * db8500_pmu_device = todo 252 * db8500_pmu_device = todo
232 * db8500_prcmu_device = todo 253 * db8500_prcmu_device = done
233 */ 254 */
234 platform_add_devices(of_platform_devs, ARRAY_SIZE(of_platform_devs)); 255 platform_add_devices(of_platform_devs, ARRAY_SIZE(of_platform_devs));
235 256
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index cf4687ee2a7b..cd8ea3588f93 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -169,26 +169,13 @@ static struct map_desc versatile_io_desc[] __initdata = {
169 .pfn = __phys_to_pfn(VERSATILE_PCI_CFG_BASE), 169 .pfn = __phys_to_pfn(VERSATILE_PCI_CFG_BASE),
170 .length = VERSATILE_PCI_CFG_BASE_SIZE, 170 .length = VERSATILE_PCI_CFG_BASE_SIZE,
171 .type = MT_DEVICE 171 .type = MT_DEVICE
172 },
173#if 0
174 {
175 .virtual = VERSATILE_PCI_VIRT_MEM_BASE0,
176 .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0),
177 .length = SZ_16M,
178 .type = MT_DEVICE
179 }, { 172 }, {
180 .virtual = VERSATILE_PCI_VIRT_MEM_BASE1, 173 .virtual = (unsigned long)VERSATILE_PCI_VIRT_MEM_BASE0,
181 .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE1), 174 .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE0),
182 .length = SZ_16M, 175 .length = IO_SPACE_LIMIT,
183 .type = MT_DEVICE
184 }, {
185 .virtual = VERSATILE_PCI_VIRT_MEM_BASE2,
186 .pfn = __phys_to_pfn(VERSATILE_PCI_MEM_BASE2),
187 .length = SZ_16M,
188 .type = MT_DEVICE 176 .type = MT_DEVICE
189 }, 177 },
190#endif 178#endif
191#endif
192}; 179};
193 180
194void __init versatile_map_io(void) 181void __init versatile_map_io(void)
diff --git a/arch/arm/mach-versatile/include/mach/hardware.h b/arch/arm/mach-versatile/include/mach/hardware.h
index 4d4973dd8fba..408e58da46c6 100644
--- a/arch/arm/mach-versatile/include/mach/hardware.h
+++ b/arch/arm/mach-versatile/include/mach/hardware.h
@@ -29,8 +29,9 @@
29 */ 29 */
30#define VERSATILE_PCI_VIRT_BASE (void __iomem *)0xe8000000ul 30#define VERSATILE_PCI_VIRT_BASE (void __iomem *)0xe8000000ul
31#define VERSATILE_PCI_CFG_VIRT_BASE (void __iomem *)0xe9000000ul 31#define VERSATILE_PCI_CFG_VIRT_BASE (void __iomem *)0xe9000000ul
32#define VERSATILE_PCI_VIRT_MEM_BASE0 (void __iomem *)PCIO_BASE
32 33
33/* macro to get at IO space when running virtually */ 34/* macro to get at MMIO space when running virtually */
34#define IO_ADDRESS(x) (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000) 35#define IO_ADDRESS(x) (((x) & 0x0fffffff) + (((x) >> 4) & 0x0f000000) + 0xf0000000)
35 36
36#define __io_address(n) ((void __iomem __force *)IO_ADDRESS(n)) 37#define __io_address(n) ((void __iomem __force *)IO_ADDRESS(n))
diff --git a/arch/arm/mach-versatile/include/mach/io.h b/arch/arm/mach-versatile/include/mach/io.h
new file mode 100644
index 000000000000..0406513be7d8
--- /dev/null
+++ b/arch/arm/mach-versatile/include/mach/io.h
@@ -0,0 +1,27 @@
1/*
2 * arch/arm/mach-versatile/include/mach/io.h
3 *
4 * Copyright (C) 2003 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#ifndef __ASM_ARM_ARCH_IO_H
21#define __ASM_ARM_ARCH_IO_H
22
23#define PCIO_BASE 0xeb000000ul
24
25#define __io(a) ((a) + PCIO_BASE)
26
27#endif
diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c
index 15c6a00000ec..bec933b04ef0 100644
--- a/arch/arm/mach-versatile/pci.c
+++ b/arch/arm/mach-versatile/pci.c
@@ -169,11 +169,18 @@ static struct pci_ops pci_versatile_ops = {
169 .write = versatile_write_config, 169 .write = versatile_write_config,
170}; 170};
171 171
172static struct resource io_port = {
173 .name = "PCI",
174 .start = 0,
175 .end = IO_SPACE_LIMIT,
176 .flags = IORESOURCE_IO,
177};
178
172static struct resource io_mem = { 179static struct resource io_mem = {
173 .name = "PCI I/O space", 180 .name = "PCI I/O space",
174 .start = VERSATILE_PCI_MEM_BASE0, 181 .start = VERSATILE_PCI_MEM_BASE0,
175 .end = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1, 182 .end = VERSATILE_PCI_MEM_BASE0+VERSATILE_PCI_MEM_BASE0_SIZE-1,
176 .flags = IORESOURCE_IO, 183 .flags = IORESOURCE_MEM,
177}; 184};
178 185
179static struct resource non_mem = { 186static struct resource non_mem = {
@@ -200,6 +207,12 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
200 "memory region (%d)\n", ret); 207 "memory region (%d)\n", ret);
201 goto out; 208 goto out;
202 } 209 }
210 ret = request_resource(&ioport_resource, &io_port);
211 if (ret) {
212 printk(KERN_ERR "PCI: unable to allocate I/O "
213 "port region (%d)\n", ret);
214 goto out;
215 }
203 ret = request_resource(&iomem_resource, &non_mem); 216 ret = request_resource(&iomem_resource, &non_mem);
204 if (ret) { 217 if (ret) {
205 printk(KERN_ERR "PCI: unable to allocate non-prefetchable " 218 printk(KERN_ERR "PCI: unable to allocate non-prefetchable "
@@ -218,7 +231,7 @@ static int __init pci_versatile_setup_resources(struct pci_sys_data *sys)
218 * the mem resource for this bus 231 * the mem resource for this bus
219 * the prefetch mem resource for this bus 232 * the prefetch mem resource for this bus
220 */ 233 */
221 pci_add_resource_offset(&sys->resources, &io_mem, sys->io_offset); 234 pci_add_resource_offset(&sys->resources, &io_port, sys->io_offset);
222 pci_add_resource_offset(&sys->resources, &non_mem, sys->mem_offset); 235 pci_add_resource_offset(&sys->resources, &non_mem, sys->mem_offset);
223 pci_add_resource_offset(&sys->resources, &pre_mem, sys->mem_offset); 236 pci_add_resource_offset(&sys->resources, &pre_mem, sys->mem_offset);
224 237
@@ -249,6 +262,7 @@ int __init pci_versatile_setup(int nr, struct pci_sys_data *sys)
249 262
250 if (nr == 0) { 263 if (nr == 0) {
251 sys->mem_offset = 0; 264 sys->mem_offset = 0;
265 sys->io_offset = 0;
252 ret = pci_versatile_setup_resources(sys); 266 ret = pci_versatile_setup_resources(sys);
253 if (ret < 0) { 267 if (ret < 0) {
254 printk("pci_versatile_setup: resources... oops?\n"); 268 printk("pci_versatile_setup: resources... oops?\n");
diff --git a/arch/arm/mach-vexpress/v2m.c b/arch/arm/mach-vexpress/v2m.c
index 04dd092211b8..fde26adaef32 100644
--- a/arch/arm/mach-vexpress/v2m.c
+++ b/arch/arm/mach-vexpress/v2m.c
@@ -14,7 +14,6 @@
14#include <linux/ata_platform.h> 14#include <linux/ata_platform.h>
15#include <linux/smsc911x.h> 15#include <linux/smsc911x.h>
16#include <linux/spinlock.h> 16#include <linux/spinlock.h>
17#include <linux/device.h>
18#include <linux/usb/isp1760.h> 17#include <linux/usb/isp1760.h>
19#include <linux/clkdev.h> 18#include <linux/clkdev.h>
20#include <linux/mtd/physmap.h> 19#include <linux/mtd/physmap.h>
@@ -31,7 +30,6 @@
31#include <asm/hardware/gic.h> 30#include <asm/hardware/gic.h>
32#include <asm/hardware/timer-sp.h> 31#include <asm/hardware/timer-sp.h>
33#include <asm/hardware/sp810.h> 32#include <asm/hardware/sp810.h>
34#include <asm/hardware/gic.h>
35 33
36#include <mach/ct-ca9x4.h> 34#include <mach/ct-ca9x4.h>
37#include <mach/motherboard.h> 35#include <mach/motherboard.h>
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index ea6b43154090..d766e4256b74 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -228,7 +228,7 @@ static pte_t **consistent_pte;
228 228
229#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M 229#define DEFAULT_CONSISTENT_DMA_SIZE SZ_2M
230 230
231unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE; 231static unsigned long consistent_base = CONSISTENT_END - DEFAULT_CONSISTENT_DMA_SIZE;
232 232
233void __init init_consistent_dma_size(unsigned long size) 233void __init init_consistent_dma_size(unsigned long size)
234{ 234{
@@ -268,10 +268,8 @@ static int __init consistent_init(void)
268 unsigned long base = consistent_base; 268 unsigned long base = consistent_base;
269 unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; 269 unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
270 270
271#ifndef CONFIG_ARM_DMA_USE_IOMMU 271 if (IS_ENABLED(CONFIG_CMA) && !IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU))
272 if (cpu_architecture() >= CPU_ARCH_ARMv6)
273 return 0; 272 return 0;
274#endif
275 273
276 consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); 274 consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
277 if (!consistent_pte) { 275 if (!consistent_pte) {
@@ -323,7 +321,7 @@ static struct arm_vmregion_head coherent_head = {
323 .vm_list = LIST_HEAD_INIT(coherent_head.vm_list), 321 .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
324}; 322};
325 323
326size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8; 324static size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
327 325
328static int __init early_coherent_pool(char *p) 326static int __init early_coherent_pool(char *p)
329{ 327{
@@ -342,7 +340,7 @@ static int __init coherent_init(void)
342 struct page *page; 340 struct page *page;
343 void *ptr; 341 void *ptr;
344 342
345 if (cpu_architecture() < CPU_ARCH_ARMv6) 343 if (!IS_ENABLED(CONFIG_CMA))
346 return 0; 344 return 0;
347 345
348 ptr = __alloc_from_contiguous(NULL, size, prot, &page); 346 ptr = __alloc_from_contiguous(NULL, size, prot, &page);
@@ -704,7 +702,7 @@ static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
704 702
705 if (arch_is_coherent() || nommu()) 703 if (arch_is_coherent() || nommu())
706 addr = __alloc_simple_buffer(dev, size, gfp, &page); 704 addr = __alloc_simple_buffer(dev, size, gfp, &page);
707 else if (cpu_architecture() < CPU_ARCH_ARMv6) 705 else if (!IS_ENABLED(CONFIG_CMA))
708 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller); 706 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
709 else if (gfp & GFP_ATOMIC) 707 else if (gfp & GFP_ATOMIC)
710 addr = __alloc_from_pool(dev, size, &page, caller); 708 addr = __alloc_from_pool(dev, size, &page, caller);
@@ -773,7 +771,7 @@ void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
773 771
774 if (arch_is_coherent() || nommu()) { 772 if (arch_is_coherent() || nommu()) {
775 __dma_free_buffer(page, size); 773 __dma_free_buffer(page, size);
776 } else if (cpu_architecture() < CPU_ARCH_ARMv6) { 774 } else if (!IS_ENABLED(CONFIG_CMA)) {
777 __dma_free_remap(cpu_addr, size); 775 __dma_free_remap(cpu_addr, size);
778 __dma_free_buffer(page, size); 776 __dma_free_buffer(page, size);
779 } else { 777 } else {
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index c21d06c7dd7e..f54d59219764 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -212,7 +212,7 @@ EXPORT_SYMBOL(arm_dma_zone_size);
212 * allocations. This must be the smallest DMA mask in the system, 212 * allocations. This must be the smallest DMA mask in the system,
213 * so a successful GFP_DMA allocation will always satisfy this. 213 * so a successful GFP_DMA allocation will always satisfy this.
214 */ 214 */
215u32 arm_dma_limit; 215phys_addr_t arm_dma_limit;
216 216
217static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole, 217static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
218 unsigned long dma_size) 218 unsigned long dma_size)
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 93dc0c17cdcb..c471436c7952 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -62,7 +62,7 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
62#endif 62#endif
63 63
64#ifdef CONFIG_ZONE_DMA 64#ifdef CONFIG_ZONE_DMA
65extern u32 arm_dma_limit; 65extern phys_addr_t arm_dma_limit;
66#else 66#else
67#define arm_dma_limit ((u32)~0) 67#define arm_dma_limit ((u32)~0)
68#endif 68#endif
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 62135849f48b..c641fb685017 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -762,6 +762,11 @@ b_epilogue:
762 update_on_xread(ctx); 762 update_on_xread(ctx);
763 emit(ARM_MOV_R(r_A, r_X), ctx); 763 emit(ARM_MOV_R(r_A, r_X), ctx);
764 break; 764 break;
765 case BPF_S_ANC_ALU_XOR_X:
766 /* A ^= X */
767 update_on_xread(ctx);
768 emit(ARM_EOR_R(r_A, r_A, r_X), ctx);
769 break;
765 case BPF_S_ANC_PROTOCOL: 770 case BPF_S_ANC_PROTOCOL:
766 /* A = ntohs(skb->protocol) */ 771 /* A = ntohs(skb->protocol) */
767 ctx->seen |= SEEN_SKB; 772 ctx->seen |= SEEN_SKB;
diff --git a/arch/arm/net/bpf_jit_32.h b/arch/arm/net/bpf_jit_32.h
index 99ae5e3f46d2..7fa2f7d3cb90 100644
--- a/arch/arm/net/bpf_jit_32.h
+++ b/arch/arm/net/bpf_jit_32.h
@@ -68,6 +68,8 @@
68#define ARM_INST_CMP_R 0x01500000 68#define ARM_INST_CMP_R 0x01500000
69#define ARM_INST_CMP_I 0x03500000 69#define ARM_INST_CMP_I 0x03500000
70 70
71#define ARM_INST_EOR_R 0x00200000
72
71#define ARM_INST_LDRB_I 0x05d00000 73#define ARM_INST_LDRB_I 0x05d00000
72#define ARM_INST_LDRB_R 0x07d00000 74#define ARM_INST_LDRB_R 0x07d00000
73#define ARM_INST_LDRH_I 0x01d000b0 75#define ARM_INST_LDRH_I 0x01d000b0
@@ -132,6 +134,8 @@
132#define ARM_CMP_R(rn, rm) _AL3_R(ARM_INST_CMP, 0, rn, rm) 134#define ARM_CMP_R(rn, rm) _AL3_R(ARM_INST_CMP, 0, rn, rm)
133#define ARM_CMP_I(rn, imm) _AL3_I(ARM_INST_CMP, 0, rn, imm) 135#define ARM_CMP_I(rn, imm) _AL3_I(ARM_INST_CMP, 0, rn, imm)
134 136
137#define ARM_EOR_R(rd, rn, rm) _AL3_R(ARM_INST_EOR, rd, rn, rm)
138
135#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \ 139#define ARM_LDR_I(rt, rn, off) (ARM_INST_LDR_I | (rt) << 12 | (rn) << 16 \
136 | (off)) 140 | (off))
137#define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \ 141#define ARM_LDRB_I(rt, rn, off) (ARM_INST_LDRB_I | (rt) << 12 | (rn) << 16 \
diff --git a/arch/arm/plat-mxc/epit.c b/arch/arm/plat-mxc/epit.c
index 9129c9e7d532..88726f4dbbfa 100644
--- a/arch/arm/plat-mxc/epit.c
+++ b/arch/arm/plat-mxc/epit.c
@@ -50,6 +50,7 @@
50#include <linux/irq.h> 50#include <linux/irq.h>
51#include <linux/clockchips.h> 51#include <linux/clockchips.h>
52#include <linux/clk.h> 52#include <linux/clk.h>
53#include <linux/err.h>
53 54
54#include <mach/hardware.h> 55#include <mach/hardware.h>
55#include <asm/mach/time.h> 56#include <asm/mach/time.h>
@@ -201,8 +202,16 @@ static int __init epit_clockevent_init(struct clk *timer_clk)
201 return 0; 202 return 0;
202} 203}
203 204
204void __init epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq) 205void __init epit_timer_init(void __iomem *base, int irq)
205{ 206{
207 struct clk *timer_clk;
208
209 timer_clk = clk_get_sys("imx-epit.0", NULL);
210 if (IS_ERR(timer_clk)) {
211 pr_err("i.MX epit: unable to get clk\n");
212 return;
213 }
214
206 clk_prepare_enable(timer_clk); 215 clk_prepare_enable(timer_clk);
207 216
208 timer_base = base; 217 timer_base = base;
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index cf663d84e7c1..e429ca1b814a 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -54,8 +54,8 @@ extern void imx50_soc_init(void);
54extern void imx51_soc_init(void); 54extern void imx51_soc_init(void);
55extern void imx53_soc_init(void); 55extern void imx53_soc_init(void);
56extern void imx51_init_late(void); 56extern void imx51_init_late(void);
57extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq); 57extern void epit_timer_init(void __iomem *base, int irq);
58extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int); 58extern void mxc_timer_init(void __iomem *, int);
59extern int mx1_clocks_init(unsigned long fref); 59extern int mx1_clocks_init(unsigned long fref);
60extern int mx21_clocks_init(unsigned long lref, unsigned long fref); 60extern int mx21_clocks_init(unsigned long lref, unsigned long fref);
61extern int mx25_clocks_init(void); 61extern int mx25_clocks_init(void);
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index 99f958ca6cb8..00e8e659e667 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -58,6 +58,7 @@
58/* MX31, MX35, MX25, MX5 */ 58/* MX31, MX35, MX25, MX5 */
59#define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */ 59#define V2_TCTL_WAITEN (1 << 3) /* Wait enable mode */
60#define V2_TCTL_CLK_IPG (1 << 6) 60#define V2_TCTL_CLK_IPG (1 << 6)
61#define V2_TCTL_CLK_PER (2 << 6)
61#define V2_TCTL_FRR (1 << 9) 62#define V2_TCTL_FRR (1 << 9)
62#define V2_IR 0x0c 63#define V2_IR 0x0c
63#define V2_TSTAT 0x08 64#define V2_TSTAT 0x08
@@ -280,23 +281,22 @@ static int __init mxc_clockevent_init(struct clk *timer_clk)
280 return 0; 281 return 0;
281} 282}
282 283
283void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq) 284void __init mxc_timer_init(void __iomem *base, int irq)
284{ 285{
285 uint32_t tctl_val; 286 uint32_t tctl_val;
287 struct clk *timer_clk;
286 struct clk *timer_ipg_clk; 288 struct clk *timer_ipg_clk;
287 289
288 if (!timer_clk) { 290 timer_clk = clk_get_sys("imx-gpt.0", "per");
289 timer_clk = clk_get_sys("imx-gpt.0", "per"); 291 if (IS_ERR(timer_clk)) {
290 if (IS_ERR(timer_clk)) { 292 pr_err("i.MX timer: unable to get clk\n");
291 pr_err("i.MX timer: unable to get clk\n"); 293 return;
292 return;
293 }
294
295 timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
296 if (!IS_ERR(timer_ipg_clk))
297 clk_prepare_enable(timer_ipg_clk);
298 } 294 }
299 295
296 timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
297 if (!IS_ERR(timer_ipg_clk))
298 clk_prepare_enable(timer_ipg_clk);
299
300 clk_prepare_enable(timer_clk); 300 clk_prepare_enable(timer_clk);
301 301
302 timer_base = base; 302 timer_base = base;
@@ -309,7 +309,7 @@ void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
309 __raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */ 309 __raw_writel(0, timer_base + MXC_TPRER); /* see datasheet note */
310 310
311 if (timer_is_v2()) 311 if (timer_is_v2())
312 tctl_val = V2_TCTL_CLK_IPG | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN; 312 tctl_val = V2_TCTL_CLK_PER | V2_TCTL_FRR | V2_TCTL_WAITEN | MXC_TCTL_TEN;
313 else 313 else
314 tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN; 314 tctl_val = MX1_2_TCTL_FRR | MX1_2_TCTL_CLK_PCLK1 | MXC_TCTL_TEN;
315 315
diff --git a/arch/arm/plat-omap/clock.c b/arch/arm/plat-omap/clock.c
index 62ec5c452792..706b7e29397f 100644
--- a/arch/arm/plat-omap/clock.c
+++ b/arch/arm/plat-omap/clock.c
@@ -461,6 +461,7 @@ static int clk_dbg_show_summary(struct seq_file *s, void *unused)
461 struct clk *c; 461 struct clk *c;
462 struct clk *pa; 462 struct clk *pa;
463 463
464 mutex_lock(&clocks_mutex);
464 seq_printf(s, "%-30s %-30s %-10s %s\n", 465 seq_printf(s, "%-30s %-30s %-10s %s\n",
465 "clock-name", "parent-name", "rate", "use-count"); 466 "clock-name", "parent-name", "rate", "use-count");
466 467
@@ -469,6 +470,7 @@ static int clk_dbg_show_summary(struct seq_file *s, void *unused)
469 seq_printf(s, "%-30s %-30s %-10lu %d\n", 470 seq_printf(s, "%-30s %-30s %-10lu %d\n",
470 c->name, pa ? pa->name : "none", c->rate, c->usecount); 471 c->name, pa ? pa->name : "none", c->rate, c->usecount);
471 } 472 }
473 mutex_unlock(&clocks_mutex);
472 474
473 return 0; 475 return 0;
474} 476}
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 297245dba66e..de6c0a08f461 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -252,8 +252,6 @@ IS_AM_SUBCLASS(335x, 0x335)
252 * cpu_is_omap2423(): True for OMAP2423 252 * cpu_is_omap2423(): True for OMAP2423
253 * cpu_is_omap2430(): True for OMAP2430 253 * cpu_is_omap2430(): True for OMAP2430
254 * cpu_is_omap3430(): True for OMAP3430 254 * cpu_is_omap3430(): True for OMAP3430
255 * cpu_is_omap3505(): True for OMAP3505
256 * cpu_is_omap3517(): True for OMAP3517
257 */ 255 */
258#define GET_OMAP_TYPE ((omap_rev() >> 16) & 0xffff) 256#define GET_OMAP_TYPE ((omap_rev() >> 16) & 0xffff)
259 257
@@ -277,8 +275,6 @@ IS_OMAP_TYPE(2422, 0x2422)
277IS_OMAP_TYPE(2423, 0x2423) 275IS_OMAP_TYPE(2423, 0x2423)
278IS_OMAP_TYPE(2430, 0x2430) 276IS_OMAP_TYPE(2430, 0x2430)
279IS_OMAP_TYPE(3430, 0x3430) 277IS_OMAP_TYPE(3430, 0x3430)
280IS_OMAP_TYPE(3505, 0x3517)
281IS_OMAP_TYPE(3517, 0x3517)
282 278
283#define cpu_is_omap310() 0 279#define cpu_is_omap310() 0
284#define cpu_is_omap730() 0 280#define cpu_is_omap730() 0
@@ -293,12 +289,6 @@ IS_OMAP_TYPE(3517, 0x3517)
293#define cpu_is_omap2422() 0 289#define cpu_is_omap2422() 0
294#define cpu_is_omap2423() 0 290#define cpu_is_omap2423() 0
295#define cpu_is_omap2430() 0 291#define cpu_is_omap2430() 0
296#define cpu_is_omap3503() 0
297#define cpu_is_omap3515() 0
298#define cpu_is_omap3525() 0
299#define cpu_is_omap3530() 0
300#define cpu_is_omap3505() 0
301#define cpu_is_omap3517() 0
302#define cpu_is_omap3430() 0 292#define cpu_is_omap3430() 0
303#define cpu_is_omap3630() 0 293#define cpu_is_omap3630() 0
304 294
@@ -350,12 +340,6 @@ IS_OMAP_TYPE(3517, 0x3517)
350 340
351#if defined(CONFIG_ARCH_OMAP3) 341#if defined(CONFIG_ARCH_OMAP3)
352# undef cpu_is_omap3430 342# undef cpu_is_omap3430
353# undef cpu_is_omap3503
354# undef cpu_is_omap3515
355# undef cpu_is_omap3525
356# undef cpu_is_omap3530
357# undef cpu_is_omap3505
358# undef cpu_is_omap3517
359# undef cpu_is_ti81xx 343# undef cpu_is_ti81xx
360# undef cpu_is_ti816x 344# undef cpu_is_ti816x
361# undef cpu_is_ti814x 345# undef cpu_is_ti814x
@@ -363,19 +347,6 @@ IS_OMAP_TYPE(3517, 0x3517)
363# undef cpu_is_am33xx 347# undef cpu_is_am33xx
364# undef cpu_is_am335x 348# undef cpu_is_am335x
365# define cpu_is_omap3430() is_omap3430() 349# define cpu_is_omap3430() is_omap3430()
366# define cpu_is_omap3503() (cpu_is_omap3430() && \
367 (!omap3_has_iva()) && \
368 (!omap3_has_sgx()))
369# define cpu_is_omap3515() (cpu_is_omap3430() && \
370 (!omap3_has_iva()) && \
371 (omap3_has_sgx()))
372# define cpu_is_omap3525() (cpu_is_omap3430() && \
373 (!omap3_has_sgx()) && \
374 (omap3_has_iva()))
375# define cpu_is_omap3530() (cpu_is_omap3430())
376# define cpu_is_omap3517() is_omap3517()
377# define cpu_is_omap3505() (cpu_is_omap3517() && \
378 !omap3_has_sgx())
379# undef cpu_is_omap3630 350# undef cpu_is_omap3630
380# define cpu_is_omap3630() is_omap363x() 351# define cpu_is_omap3630() is_omap363x()
381# define cpu_is_ti81xx() is_ti81xx() 352# define cpu_is_ti81xx() is_ti81xx()
@@ -424,10 +395,6 @@ IS_OMAP_TYPE(3517, 0x3517)
424#define OMAP3630_REV_ES1_1 (OMAP363X_CLASS | (0x1 << 8)) 395#define OMAP3630_REV_ES1_1 (OMAP363X_CLASS | (0x1 << 8))
425#define OMAP3630_REV_ES1_2 (OMAP363X_CLASS | (0x2 << 8)) 396#define OMAP3630_REV_ES1_2 (OMAP363X_CLASS | (0x2 << 8))
426 397
427#define OMAP3517_CLASS 0x35170034
428#define OMAP3517_REV_ES1_0 OMAP3517_CLASS
429#define OMAP3517_REV_ES1_1 (OMAP3517_CLASS | (0x1 << 8))
430
431#define TI816X_CLASS 0x81600034 398#define TI816X_CLASS 0x81600034
432#define TI8168_REV_ES1_0 TI816X_CLASS 399#define TI8168_REV_ES1_0 TI816X_CLASS
433#define TI8168_REV_ES1_1 (TI816X_CLASS | (0x1 << 8)) 400#define TI8168_REV_ES1_1 (TI816X_CLASS | (0x1 << 8))
diff --git a/arch/arm/plat-omap/include/plat/gpmc.h b/arch/arm/plat-omap/include/plat/gpmc.h
index 1527929b445a..f37764a36072 100644
--- a/arch/arm/plat-omap/include/plat/gpmc.h
+++ b/arch/arm/plat-omap/include/plat/gpmc.h
@@ -92,6 +92,8 @@ enum omap_ecc {
92 OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */ 92 OMAP_ECC_HAMMING_CODE_HW, /* gpmc to detect the error */
93 /* 1-bit ecc: stored at beginning of spare area as romcode */ 93 /* 1-bit ecc: stored at beginning of spare area as romcode */
94 OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */ 94 OMAP_ECC_HAMMING_CODE_HW_ROMCODE, /* gpmc method & romcode layout */
95 OMAP_ECC_BCH4_CODE_HW, /* 4-bit BCH ecc code */
96 OMAP_ECC_BCH8_CODE_HW, /* 8-bit BCH ecc code */
95}; 97};
96 98
97/* 99/*
@@ -157,4 +159,13 @@ extern int gpmc_nand_write(int cs, int cmd, int wval);
157 159
158int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size); 160int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size);
159int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code); 161int gpmc_calculate_ecc(int cs, const u_char *dat, u_char *ecc_code);
162
163#ifdef CONFIG_ARCH_OMAP3
164int gpmc_init_hwecc_bch(int cs, int nsectors, int nerrors);
165int gpmc_enable_hwecc_bch(int cs, int mode, int dev_width, int nsectors,
166 int nerrors);
167int gpmc_calculate_ecc_bch4(int cs, const u_char *dat, u_char *ecc);
168int gpmc_calculate_ecc_bch8(int cs, const u_char *dat, u_char *ecc);
169#endif /* CONFIG_ARCH_OMAP3 */
170
160#endif 171#endif
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index a7754a886d42..5493bd95da5e 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -172,8 +172,7 @@ struct omap_mmc_platform_data {
172extern void omap_mmc_notify_cover_event(struct device *dev, int slot, 172extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
173 int is_closed); 173 int is_closed);
174 174
175#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \ 175#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE)
176 defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
177void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data, 176void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
178 int nr_controllers); 177 int nr_controllers);
179void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data); 178void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data);
@@ -185,7 +184,6 @@ static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
185static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data) 184static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
186{ 185{
187} 186}
188
189#endif 187#endif
190 188
191extern int omap_msdi_reset(struct omap_hwmod *oh); 189extern int omap_msdi_reset(struct omap_hwmod *oh);
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 61fd837624a8..c1793786aea9 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -582,7 +582,7 @@ void __init orion_spi_1_init(unsigned long mapbase)
582 * Watchdog 582 * Watchdog
583 ****************************************************************************/ 583 ****************************************************************************/
584static struct resource orion_wdt_resource = 584static struct resource orion_wdt_resource =
585 DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28); 585 DEFINE_RES_MEM(TIMER_PHYS_BASE, 0x28);
586 586
587static struct platform_device orion_wdt_device = { 587static struct platform_device orion_wdt_device = {
588 .name = "orion_wdt", 588 .name = "orion_wdt",
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index 58b79809d20c..584c9bf8ed2d 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -193,6 +193,7 @@ static const struct platform_device_id ssp_id_table[] = {
193 { "pxa25x-nssp", PXA25x_NSSP }, 193 { "pxa25x-nssp", PXA25x_NSSP },
194 { "pxa27x-ssp", PXA27x_SSP }, 194 { "pxa27x-ssp", PXA27x_SSP },
195 { "pxa168-ssp", PXA168_SSP }, 195 { "pxa168-ssp", PXA168_SSP },
196 { "pxa910-ssp", PXA910_SSP },
196 { }, 197 { },
197}; 198};
198 199
diff --git a/arch/arm/plat-samsung/include/plat/fb.h b/arch/arm/plat-samsung/include/plat/fb.h
index 0fedf47fa502..536002ff2ab8 100644
--- a/arch/arm/plat-samsung/include/plat/fb.h
+++ b/arch/arm/plat-samsung/include/plat/fb.h
@@ -24,15 +24,16 @@
24 24
25/** 25/**
26 * struct s3c_fb_pd_win - per window setup data 26 * struct s3c_fb_pd_win - per window setup data
27 * @win_mode: The display parameters to initialise (not for window 0) 27 * @xres : The window X size.
28 * @yres : The window Y size.
28 * @virtual_x: The virtual X size. 29 * @virtual_x: The virtual X size.
29 * @virtual_y: The virtual Y size. 30 * @virtual_y: The virtual Y size.
30 */ 31 */
31struct s3c_fb_pd_win { 32struct s3c_fb_pd_win {
32 struct fb_videomode win_mode;
33
34 unsigned short default_bpp; 33 unsigned short default_bpp;
35 unsigned short max_bpp; 34 unsigned short max_bpp;
35 unsigned short xres;
36 unsigned short yres;
36 unsigned short virtual_x; 37 unsigned short virtual_x;
37 unsigned short virtual_y; 38 unsigned short virtual_y;
38}; 39};
@@ -45,6 +46,7 @@ struct s3c_fb_pd_win {
45 * @default_win: default window layer number to be used for UI layer. 46 * @default_win: default window layer number to be used for UI layer.
46 * @vidcon0: The base vidcon0 values to control the panel data format. 47 * @vidcon0: The base vidcon0 values to control the panel data format.
47 * @vidcon1: The base vidcon1 values to control the panel data output. 48 * @vidcon1: The base vidcon1 values to control the panel data output.
49 * @vtiming: Video timing when connected to a RGB type panel.
48 * @win: The setup data for each hardware window, or NULL for unused. 50 * @win: The setup data for each hardware window, or NULL for unused.
49 * @display_mode: The LCD output display mode. 51 * @display_mode: The LCD output display mode.
50 * 52 *
@@ -58,8 +60,7 @@ struct s3c_fb_platdata {
58 void (*setup_gpio)(void); 60 void (*setup_gpio)(void);
59 61
60 struct s3c_fb_pd_win *win[S3C_FB_MAX_WIN]; 62 struct s3c_fb_pd_win *win[S3C_FB_MAX_WIN];
61 63 struct fb_videomode *vtiming;
62 u32 default_win;
63 64
64 u32 vidcon0; 65 u32 vidcon0;
65 u32 vidcon1; 66 u32 vidcon1;
diff --git a/arch/arm/plat-samsung/include/plat/s3c2416.h b/arch/arm/plat-samsung/include/plat/s3c2416.h
index de2b5bdc5ebd..7178e338e25e 100644
--- a/arch/arm/plat-samsung/include/plat/s3c2416.h
+++ b/arch/arm/plat-samsung/include/plat/s3c2416.h
@@ -24,6 +24,9 @@ extern void s3c2416_init_clocks(int xtal);
24extern int s3c2416_baseclk_add(void); 24extern int s3c2416_baseclk_add(void);
25 25
26extern void s3c2416_restart(char mode, const char *cmd); 26extern void s3c2416_restart(char mode, const char *cmd);
27
28extern struct syscore_ops s3c2416_irq_syscore_ops;
29
27#else 30#else
28#define s3c2416_init_clocks NULL 31#define s3c2416_init_clocks NULL
29#define s3c2416_init_uarts NULL 32#define s3c2416_init_uarts NULL
diff --git a/arch/arm/plat-spear/include/plat/debug-macro.S b/arch/arm/plat-spear/include/plat/debug-macro.S
index ab3de721c5db..75b05ad0fbad 100644
--- a/arch/arm/plat-spear/include/plat/debug-macro.S
+++ b/arch/arm/plat-spear/include/plat/debug-macro.S
@@ -4,7 +4,7 @@
4 * Debugging macro include header for spear platform 4 * Debugging macro include header for spear platform
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/include/plat/pl080.h b/arch/arm/plat-spear/include/plat/pl080.h
index e14a3e4932f9..2bc6b54460a8 100644
--- a/arch/arm/plat-spear/include/plat/pl080.h
+++ b/arch/arm/plat-spear/include/plat/pl080.h
@@ -4,7 +4,7 @@
4 * DMAC pl080 definitions for SPEAr platform 4 * DMAC pl080 definitions for SPEAr platform
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/include/plat/shirq.h b/arch/arm/plat-spear/include/plat/shirq.h
index 03ed8b585dcf..88a7fbd24793 100644
--- a/arch/arm/plat-spear/include/plat/shirq.h
+++ b/arch/arm/plat-spear/include/plat/shirq.h
@@ -4,7 +4,7 @@
4 * SPEAr platform shared irq layer header file 4 * SPEAr platform shared irq layer header file
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/include/plat/timex.h b/arch/arm/plat-spear/include/plat/timex.h
index 914d09dd50fd..ef95e5b780bd 100644
--- a/arch/arm/plat-spear/include/plat/timex.h
+++ b/arch/arm/plat-spear/include/plat/timex.h
@@ -4,7 +4,7 @@
4 * SPEAr platform specific timex definitions 4 * SPEAr platform specific timex definitions
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/include/plat/uncompress.h b/arch/arm/plat-spear/include/plat/uncompress.h
index 6dd455bafdfd..2ce6cb17a98b 100644
--- a/arch/arm/plat-spear/include/plat/uncompress.h
+++ b/arch/arm/plat-spear/include/plat/uncompress.h
@@ -4,7 +4,7 @@
4 * Serial port stubs for kernel decompress status messages 4 * Serial port stubs for kernel decompress status messages
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/pl080.c b/arch/arm/plat-spear/pl080.c
index a56a067717c1..12cf27f935f9 100644
--- a/arch/arm/plat-spear/pl080.c
+++ b/arch/arm/plat-spear/pl080.c
@@ -4,7 +4,7 @@
4 * DMAC pl080 definitions for SPEAr platform 4 * DMAC pl080 definitions for SPEAr platform
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/restart.c b/arch/arm/plat-spear/restart.c
index ea0a61302b7e..4f990115b1bd 100644
--- a/arch/arm/plat-spear/restart.c
+++ b/arch/arm/plat-spear/restart.c
@@ -4,7 +4,7 @@
4 * SPEAr platform specific restart functions 4 * SPEAr platform specific restart functions
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/arm/plat-spear/shirq.c b/arch/arm/plat-spear/shirq.c
index 961fb7261243..853e891e1184 100644
--- a/arch/arm/plat-spear/shirq.c
+++ b/arch/arm/plat-spear/shirq.c
@@ -4,7 +4,7 @@
4 * SPEAr platform shared irq layer source file 4 * SPEAr platform shared irq layer source file
5 * 5 *
6 * Copyright (C) 2009 ST Microelectronics 6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/arch/avr32/include/asm/posix_types.h b/arch/avr32/include/asm/posix_types.h
index 74667bfc88cc..9ba9e749b3f3 100644
--- a/arch/avr32/include/asm/posix_types.h
+++ b/arch/avr32/include/asm/posix_types.h
@@ -17,9 +17,6 @@
17typedef unsigned short __kernel_mode_t; 17typedef unsigned short __kernel_mode_t;
18#define __kernel_mode_t __kernel_mode_t 18#define __kernel_mode_t __kernel_mode_t
19 19
20typedef unsigned short __kernel_nlink_t;
21#define __kernel_nlink_t __kernel_nlink_t
22
23typedef unsigned short __kernel_ipc_pid_t; 20typedef unsigned short __kernel_ipc_pid_t;
24#define __kernel_ipc_pid_t __kernel_ipc_pid_t 21#define __kernel_ipc_pid_t __kernel_ipc_pid_t
25 22
diff --git a/arch/avr32/kernel/entry-avr32b.S b/arch/avr32/kernel/entry-avr32b.S
index 169268c40ae2..df2884181313 100644
--- a/arch/avr32/kernel/entry-avr32b.S
+++ b/arch/avr32/kernel/entry-avr32b.S
@@ -281,7 +281,7 @@ syscall_exit_work:
281 ld.w r1, r0[TI_flags] 281 ld.w r1, r0[TI_flags]
282 rjmp 1b 282 rjmp 1b
283 283
2842: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK | _TIF_NOTIFY_RESUME 2842: mov r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
285 tst r1, r2 285 tst r1, r2
286 breq 3f 286 breq 3f
287 unmask_interrupts 287 unmask_interrupts
@@ -587,7 +587,7 @@ fault_exit_work:
587 ld.w r1, r0[TI_flags] 587 ld.w r1, r0[TI_flags]
588 rjmp fault_exit_work 588 rjmp fault_exit_work
589 589
5901: mov r2, _TIF_SIGPENDING | _TIF_RESTORE_SIGMASK 5901: mov r2, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
591 tst r1, r2 591 tst r1, r2
592 breq 2f 592 breq 2f
593 unmask_interrupts 593 unmask_interrupts
diff --git a/arch/avr32/kernel/signal.c b/arch/avr32/kernel/signal.c
index ae386c304bee..d552a854dacc 100644
--- a/arch/avr32/kernel/signal.c
+++ b/arch/avr32/kernel/signal.c
@@ -22,8 +22,6 @@
22#include <asm/ucontext.h> 22#include <asm/ucontext.h>
23#include <asm/syscalls.h> 23#include <asm/syscalls.h>
24 24
25#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
26
27asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 25asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
28 struct pt_regs *regs) 26 struct pt_regs *regs)
29{ 27{
@@ -89,7 +87,6 @@ asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
89 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 87 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
90 goto badframe; 88 goto badframe;
91 89
92 sigdelsetmask(&set, ~_BLOCKABLE);
93 set_current_blocked(&set); 90 set_current_blocked(&set);
94 91
95 if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 92 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -224,30 +221,27 @@ static inline void setup_syscall_restart(struct pt_regs *regs)
224 221
225static inline void 222static inline void
226handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, 223handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
227 sigset_t *oldset, struct pt_regs *regs, int syscall) 224 struct pt_regs *regs, int syscall)
228{ 225{
229 int ret; 226 int ret;
230 227
231 /* 228 /*
232 * Set up the stack frame 229 * Set up the stack frame
233 */ 230 */
234 ret = setup_rt_frame(sig, ka, info, oldset, regs); 231 ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs);
235 232
236 /* 233 /*
237 * Check that the resulting registers are sane 234 * Check that the resulting registers are sane
238 */ 235 */
239 ret |= !valid_user_regs(regs); 236 ret |= !valid_user_regs(regs);
240 237
241 if (ret != 0) {
242 force_sigsegv(sig, current);
243 return;
244 }
245
246 /* 238 /*
247 * Block the signal if we were successful. 239 * Block the signal if we were successful.
248 */ 240 */
249 block_sigmask(ka, sig); 241 if (ret != 0)
250 clear_thread_flag(TIF_RESTORE_SIGMASK); 242 force_sigsegv(sig, current);
243 else
244 signal_delivered(sig, info, ka, regs, 0);
251} 245}
252 246
253/* 247/*
@@ -255,7 +249,7 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
255 * doesn't want to handle. Thus you cannot kill init even with a 249 * doesn't want to handle. Thus you cannot kill init even with a
256 * SIGKILL even by mistake. 250 * SIGKILL even by mistake.
257 */ 251 */
258int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall) 252static void do_signal(struct pt_regs *regs, int syscall)
259{ 253{
260 siginfo_t info; 254 siginfo_t info;
261 int signr; 255 int signr;
@@ -267,12 +261,7 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall)
267 * without doing anything if so. 261 * without doing anything if so.
268 */ 262 */
269 if (!user_mode(regs)) 263 if (!user_mode(regs))
270 return 0; 264 return;
271
272 if (test_thread_flag(TIF_RESTORE_SIGMASK))
273 oldset = &current->saved_sigmask;
274 else if (!oldset)
275 oldset = &current->blocked;
276 265
277 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 266 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
278 if (syscall) { 267 if (syscall) {
@@ -297,15 +286,11 @@ int do_signal(struct pt_regs *regs, sigset_t *oldset, int syscall)
297 286
298 if (signr == 0) { 287 if (signr == 0) {
299 /* No signal to deliver -- put the saved sigmask back */ 288 /* No signal to deliver -- put the saved sigmask back */
300 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 289 restore_saved_sigmask();
301 clear_thread_flag(TIF_RESTORE_SIGMASK); 290 return;
302 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
303 }
304 return 0;
305 } 291 }
306 292
307 handle_signal(signr, &ka, &info, oldset, regs, syscall); 293 handle_signal(signr, &ka, &info, regs, syscall);
308 return 1;
309} 294}
310 295
311asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti) 296asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
@@ -315,13 +300,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, struct thread_info *ti)
315 if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR) 300 if ((sysreg_read(SR) & MODE_MASK) == MODE_SUPERVISOR)
316 syscall = 1; 301 syscall = 1;
317 302
318 if (ti->flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 303 if (ti->flags & _TIF_SIGPENDING)
319 do_signal(regs, &current->blocked, syscall); 304 do_signal(regs, syscall);
320 305
321 if (ti->flags & _TIF_NOTIFY_RESUME) { 306 if (ti->flags & _TIF_NOTIFY_RESUME) {
322 clear_thread_flag(TIF_NOTIFY_RESUME); 307 clear_thread_flag(TIF_NOTIFY_RESUME);
323 tracehook_notify_resume(regs); 308 tracehook_notify_resume(regs);
324 if (current->replacement_session_keyring)
325 key_replace_session_keyring();
326 } 309 }
327} 310}
diff --git a/arch/blackfin/include/asm/posix_types.h b/arch/blackfin/include/asm/posix_types.h
index 41bc1875c4d7..1bd3436db6a7 100644
--- a/arch/blackfin/include/asm/posix_types.h
+++ b/arch/blackfin/include/asm/posix_types.h
@@ -10,9 +10,6 @@
10typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t 11#define __kernel_mode_t __kernel_mode_t
12 12
13typedef unsigned short __kernel_nlink_t;
14#define __kernel_nlink_t __kernel_nlink_t
15
16typedef unsigned int __kernel_ipc_pid_t; 13typedef unsigned int __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t 14#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18 15
diff --git a/arch/blackfin/include/asm/thread_info.h b/arch/blackfin/include/asm/thread_info.h
index 02560fd8a121..53ad10005ae3 100644
--- a/arch/blackfin/include/asm/thread_info.h
+++ b/arch/blackfin/include/asm/thread_info.h
@@ -100,7 +100,6 @@ static inline struct thread_info *current_thread_info(void)
100 TIF_NEED_RESCHED */ 100 TIF_NEED_RESCHED */
101#define TIF_MEMDIE 4 /* is terminating due to OOM killer */ 101#define TIF_MEMDIE 4 /* is terminating due to OOM killer */
102#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 102#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
103#define TIF_FREEZE 6 /* is freezing for suspend */
104#define TIF_IRQ_SYNC 7 /* sync pipeline stage */ 103#define TIF_IRQ_SYNC 7 /* sync pipeline stage */
105#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */ 104#define TIF_NOTIFY_RESUME 8 /* callback before returning to user */
106#define TIF_SINGLESTEP 9 105#define TIF_SINGLESTEP 9
@@ -111,7 +110,6 @@ static inline struct thread_info *current_thread_info(void)
111#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED) 110#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
112#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 111#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
113#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK) 112#define _TIF_RESTORE_SIGMASK (1<<TIF_RESTORE_SIGMASK)
114#define _TIF_FREEZE (1<<TIF_FREEZE)
115#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC) 113#define _TIF_IRQ_SYNC (1<<TIF_IRQ_SYNC)
116#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME) 114#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
117#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP) 115#define _TIF_SINGLESTEP (1<<TIF_SINGLESTEP)
diff --git a/arch/blackfin/kernel/process.c b/arch/blackfin/kernel/process.c
index 2e3994b20169..62bcea7dcc6d 100644
--- a/arch/blackfin/kernel/process.c
+++ b/arch/blackfin/kernel/process.c
@@ -173,7 +173,7 @@ asmlinkage int bfin_clone(struct pt_regs *regs)
173 unsigned long newsp; 173 unsigned long newsp;
174 174
175#ifdef __ARCH_SYNC_CORE_DCACHE 175#ifdef __ARCH_SYNC_CORE_DCACHE
176 if (current->rt.nr_cpus_allowed == num_possible_cpus()) 176 if (current->nr_cpus_allowed == num_possible_cpus())
177 set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id())); 177 set_cpus_allowed_ptr(current, cpumask_of(smp_processor_id()));
178#endif 178#endif
179 179
diff --git a/arch/blackfin/kernel/signal.c b/arch/blackfin/kernel/signal.c
index e5bbc1a5edc2..6682b73a8523 100644
--- a/arch/blackfin/kernel/signal.c
+++ b/arch/blackfin/kernel/signal.c
@@ -19,8 +19,6 @@
19#include <asm/fixed_code.h> 19#include <asm/fixed_code.h>
20#include <asm/syscall.h> 20#include <asm/syscall.h>
21 21
22#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
23
24/* Location of the trace bit in SYSCFG. */ 22/* Location of the trace bit in SYSCFG. */
25#define TRACE_BITS 0x0001 23#define TRACE_BITS 0x0001
26 24
@@ -98,7 +96,6 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused)
98 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 96 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
99 goto badframe; 97 goto badframe;
100 98
101 sigdelsetmask(&set, ~_BLOCKABLE);
102 set_current_blocked(&set); 99 set_current_blocked(&set);
103 100
104 if (rt_restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) 101 if (rt_restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
@@ -190,17 +187,22 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info,
190 err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); 187 err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
191 188
192 if (err) 189 if (err)
193 goto give_sigsegv; 190 return -EFAULT;
194 191
195 /* Set up registers for signal handler */ 192 /* Set up registers for signal handler */
196 wrusp((unsigned long)frame);
197 if (current->personality & FDPIC_FUNCPTRS) { 193 if (current->personality & FDPIC_FUNCPTRS) {
198 struct fdpic_func_descriptor __user *funcptr = 194 struct fdpic_func_descriptor __user *funcptr =
199 (struct fdpic_func_descriptor *) ka->sa.sa_handler; 195 (struct fdpic_func_descriptor *) ka->sa.sa_handler;
200 __get_user(regs->pc, &funcptr->text); 196 u32 pc, p3;
201 __get_user(regs->p3, &funcptr->GOT); 197 err |= __get_user(pc, &funcptr->text);
198 err |= __get_user(p3, &funcptr->GOT);
199 if (err)
200 return -EFAULT;
201 regs->pc = pc;
202 regs->p3 = p3;
202 } else 203 } else
203 regs->pc = (unsigned long)ka->sa.sa_handler; 204 regs->pc = (unsigned long)ka->sa.sa_handler;
205 wrusp((unsigned long)frame);
204 regs->rets = SIGRETURN_STUB; 206 regs->rets = SIGRETURN_STUB;
205 207
206 regs->r0 = frame->sig; 208 regs->r0 = frame->sig;
@@ -208,10 +210,6 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t * info,
208 regs->r2 = (unsigned long)(&frame->uc); 210 regs->r2 = (unsigned long)(&frame->uc);
209 211
210 return 0; 212 return 0;
211
212 give_sigsegv:
213 force_sigsegv(sig, current);
214 return -EFAULT;
215} 213}
216 214
217static inline void 215static inline void
@@ -247,24 +245,21 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
247/* 245/*
248 * OK, we're invoking a handler 246 * OK, we're invoking a handler
249 */ 247 */
250static int 248static void
251handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, 249handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
252 sigset_t *oldset, struct pt_regs *regs) 250 struct pt_regs *regs)
253{ 251{
254 int ret;
255
256 /* are we from a system call? to see pt_regs->orig_p0 */ 252 /* are we from a system call? to see pt_regs->orig_p0 */
257 if (regs->orig_p0 >= 0) 253 if (regs->orig_p0 >= 0)
258 /* If so, check system call restarting.. */ 254 /* If so, check system call restarting.. */
259 handle_restart(regs, ka, 1); 255 handle_restart(regs, ka, 1);
260 256
261 /* set up the stack frame */ 257 /* set up the stack frame */
262 ret = setup_rt_frame(sig, ka, info, oldset, regs); 258 if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
263 259 force_sigsegv(sig, current);
264 if (ret == 0) 260 else
265 block_sigmask(ka, sig); 261 signal_delivered(sig, info, ka, regs,
266 262 test_thread_flag(TIF_SINGLESTEP));
267 return ret;
268} 263}
269 264
270/* 265/*
@@ -281,37 +276,16 @@ asmlinkage void do_signal(struct pt_regs *regs)
281 siginfo_t info; 276 siginfo_t info;
282 int signr; 277 int signr;
283 struct k_sigaction ka; 278 struct k_sigaction ka;
284 sigset_t *oldset;
285 279
286 current->thread.esp0 = (unsigned long)regs; 280 current->thread.esp0 = (unsigned long)regs;
287 281
288 if (try_to_freeze())
289 goto no_signal;
290
291 if (test_thread_flag(TIF_RESTORE_SIGMASK))
292 oldset = &current->saved_sigmask;
293 else
294 oldset = &current->blocked;
295
296 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 282 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
297 if (signr > 0) { 283 if (signr > 0) {
298 /* Whee! Actually deliver the signal. */ 284 /* Whee! Actually deliver the signal. */
299 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 285 handle_signal(signr, &info, &ka, regs);
300 /* a signal was successfully delivered; the saved
301 * sigmask will have been stored in the signal frame,
302 * and will be restored by sigreturn, so we can simply
303 * clear the TIF_RESTORE_SIGMASK flag */
304 if (test_thread_flag(TIF_RESTORE_SIGMASK))
305 clear_thread_flag(TIF_RESTORE_SIGMASK);
306
307 tracehook_signal_handler(signr, &info, &ka, regs,
308 test_thread_flag(TIF_SINGLESTEP));
309 }
310
311 return; 286 return;
312 } 287 }
313 288
314 no_signal:
315 /* Did we come from a system call? */ 289 /* Did we come from a system call? */
316 if (regs->orig_p0 >= 0) 290 if (regs->orig_p0 >= 0)
317 /* Restart the system call - no handlers present */ 291 /* Restart the system call - no handlers present */
@@ -319,10 +293,7 @@ asmlinkage void do_signal(struct pt_regs *regs)
319 293
320 /* if there's no signal to deliver, we just put the saved sigmask 294 /* if there's no signal to deliver, we just put the saved sigmask
321 * back */ 295 * back */
322 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 296 restore_saved_sigmask();
323 clear_thread_flag(TIF_RESTORE_SIGMASK);
324 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
325 }
326} 297}
327 298
328/* 299/*
@@ -330,14 +301,12 @@ asmlinkage void do_signal(struct pt_regs *regs)
330 */ 301 */
331asmlinkage void do_notify_resume(struct pt_regs *regs) 302asmlinkage void do_notify_resume(struct pt_regs *regs)
332{ 303{
333 if (test_thread_flag(TIF_SIGPENDING) || test_thread_flag(TIF_RESTORE_SIGMASK)) 304 if (test_thread_flag(TIF_SIGPENDING))
334 do_signal(regs); 305 do_signal(regs);
335 306
336 if (test_thread_flag(TIF_NOTIFY_RESUME)) { 307 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
337 clear_thread_flag(TIF_NOTIFY_RESUME); 308 clear_thread_flag(TIF_NOTIFY_RESUME);
338 tracehook_notify_resume(regs); 309 tracehook_notify_resume(regs);
339 if (current->replacement_session_keyring)
340 key_replace_session_keyring();
341 } 310 }
342} 311}
343 312
diff --git a/arch/blackfin/kernel/trace.c b/arch/blackfin/kernel/trace.c
index 44bbf2f564cb..f7f7a18abca9 100644
--- a/arch/blackfin/kernel/trace.c
+++ b/arch/blackfin/kernel/trace.c
@@ -10,6 +10,8 @@
10#include <linux/hardirq.h> 10#include <linux/hardirq.h>
11#include <linux/thread_info.h> 11#include <linux/thread_info.h>
12#include <linux/mm.h> 12#include <linux/mm.h>
13#include <linux/oom.h>
14#include <linux/sched.h>
13#include <linux/uaccess.h> 15#include <linux/uaccess.h>
14#include <linux/module.h> 16#include <linux/module.h>
15#include <linux/kallsyms.h> 17#include <linux/kallsyms.h>
@@ -27,8 +29,7 @@ void decode_address(char *buf, unsigned long address)
27{ 29{
28 struct task_struct *p; 30 struct task_struct *p;
29 struct mm_struct *mm; 31 struct mm_struct *mm;
30 unsigned long flags, offset; 32 unsigned long offset;
31 unsigned char in_atomic = (bfin_read_IPEND() & 0x10) || in_atomic();
32 struct rb_node *n; 33 struct rb_node *n;
33 34
34#ifdef CONFIG_KALLSYMS 35#ifdef CONFIG_KALLSYMS
@@ -112,17 +113,17 @@ void decode_address(char *buf, unsigned long address)
112 * mappings of all our processes and see if we can't be a whee 113 * mappings of all our processes and see if we can't be a whee
113 * bit more specific 114 * bit more specific
114 */ 115 */
115 write_lock_irqsave(&tasklist_lock, flags); 116 read_lock(&tasklist_lock);
116 for_each_process(p) { 117 for_each_process(p) {
117 mm = (in_atomic ? p->mm : get_task_mm(p)); 118 struct task_struct *t;
118 if (!mm)
119 continue;
120 119
121 if (!down_read_trylock(&mm->mmap_sem)) { 120 t = find_lock_task_mm(p);
122 if (!in_atomic) 121 if (!t)
123 mmput(mm);
124 continue; 122 continue;
125 } 123
124 mm = t->mm;
125 if (!down_read_trylock(&mm->mmap_sem))
126 goto __continue;
126 127
127 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) { 128 for (n = rb_first(&mm->mm_rb); n; n = rb_next(n)) {
128 struct vm_area_struct *vma; 129 struct vm_area_struct *vma;
@@ -131,7 +132,7 @@ void decode_address(char *buf, unsigned long address)
131 132
132 if (address >= vma->vm_start && address < vma->vm_end) { 133 if (address >= vma->vm_start && address < vma->vm_end) {
133 char _tmpbuf[256]; 134 char _tmpbuf[256];
134 char *name = p->comm; 135 char *name = t->comm;
135 struct file *file = vma->vm_file; 136 struct file *file = vma->vm_file;
136 137
137 if (file) { 138 if (file) {
@@ -164,8 +165,7 @@ void decode_address(char *buf, unsigned long address)
164 name, vma->vm_start, vma->vm_end); 165 name, vma->vm_start, vma->vm_end);
165 166
166 up_read(&mm->mmap_sem); 167 up_read(&mm->mmap_sem);
167 if (!in_atomic) 168 task_unlock(t);
168 mmput(mm);
169 169
170 if (buf[0] == '\0') 170 if (buf[0] == '\0')
171 sprintf(buf, "[ %s ] dynamic memory", name); 171 sprintf(buf, "[ %s ] dynamic memory", name);
@@ -175,8 +175,8 @@ void decode_address(char *buf, unsigned long address)
175 } 175 }
176 176
177 up_read(&mm->mmap_sem); 177 up_read(&mm->mmap_sem);
178 if (!in_atomic) 178__continue:
179 mmput(mm); 179 task_unlock(t);
180 } 180 }
181 181
182 /* 182 /*
@@ -186,7 +186,7 @@ void decode_address(char *buf, unsigned long address)
186 sprintf(buf, "/* kernel dynamic memory */"); 186 sprintf(buf, "/* kernel dynamic memory */");
187 187
188done: 188done:
189 write_unlock_irqrestore(&tasklist_lock, flags); 189 read_unlock(&tasklist_lock);
190} 190}
191 191
192#define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1) 192#define EXPAND_LEN ((1 << CONFIG_DEBUG_BFIN_HWTRACE_EXPAND_LEN) * 256 - 1)
diff --git a/arch/blackfin/mach-bf561/boards/acvilon.c b/arch/blackfin/mach-bf561/boards/acvilon.c
index f6ffd6f054c3..0b74218fdd3a 100644
--- a/arch/blackfin/mach-bf561/boards/acvilon.c
+++ b/arch/blackfin/mach-bf561/boards/acvilon.c
@@ -248,8 +248,6 @@ static struct platform_device bfin_uart0_device = {
248 248
249#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE) 249#if defined(CONFIG_MTD_NAND_PLATFORM) || defined(CONFIG_MTD_NAND_PLATFORM_MODULE)
250 250
251const char *part_probes[] = { "cmdlinepart", NULL };
252
253static struct mtd_partition bfin_plat_nand_partitions[] = { 251static struct mtd_partition bfin_plat_nand_partitions[] = {
254 { 252 {
255 .name = "params(nand)", 253 .name = "params(nand)",
@@ -289,7 +287,6 @@ static struct platform_nand_data bfin_plat_nand_data = {
289 .chip = { 287 .chip = {
290 .nr_chips = 1, 288 .nr_chips = 1,
291 .chip_delay = 30, 289 .chip_delay = 30,
292 .part_probe_types = part_probes,
293 .partitions = bfin_plat_nand_partitions, 290 .partitions = bfin_plat_nand_partitions,
294 .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions), 291 .nr_partitions = ARRAY_SIZE(bfin_plat_nand_partitions),
295 }, 292 },
diff --git a/arch/blackfin/mach-common/entry.S b/arch/blackfin/mach-common/entry.S
index 80aa2535e2c9..04c2fbe41a7f 100644
--- a/arch/blackfin/mach-common/entry.S
+++ b/arch/blackfin/mach-common/entry.S
@@ -711,8 +711,6 @@ ENTRY(_system_call)
711 jump .Lresume_userspace_1; 711 jump .Lresume_userspace_1;
712 712
713.Lsyscall_sigpending: 713.Lsyscall_sigpending:
714 cc = BITTST(r7, TIF_RESTORE_SIGMASK);
715 if cc jump .Lsyscall_do_signals;
716 cc = BITTST(r7, TIF_SIGPENDING); 714 cc = BITTST(r7, TIF_SIGPENDING);
717 if cc jump .Lsyscall_do_signals; 715 if cc jump .Lsyscall_do_signals;
718 cc = BITTST(r7, TIF_NOTIFY_RESUME); 716 cc = BITTST(r7, TIF_NOTIFY_RESUME);
diff --git a/arch/c6x/kernel/signal.c b/arch/c6x/kernel/signal.c
index cf37478c1169..3d8f3c22a94f 100644
--- a/arch/c6x/kernel/signal.c
+++ b/arch/c6x/kernel/signal.c
@@ -20,8 +20,6 @@
20#include <asm/cacheflush.h> 20#include <asm/cacheflush.h>
21 21
22 22
23#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
24
25/* 23/*
26 * Do a signal return, undo the signal stack. 24 * Do a signal return, undo the signal stack.
27 */ 25 */
@@ -87,7 +85,6 @@ asmlinkage int do_rt_sigreturn(struct pt_regs *regs)
87 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 85 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
88 goto badframe; 86 goto badframe;
89 87
90 sigdelsetmask(&set, ~_BLOCKABLE);
91 set_current_blocked(&set); 88 set_current_blocked(&set);
92 89
93 if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 90 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -248,10 +245,9 @@ do_restart:
248/* 245/*
249 * handle the actual delivery of a signal to userspace 246 * handle the actual delivery of a signal to userspace
250 */ 247 */
251static int handle_signal(int sig, 248static void handle_signal(int sig,
252 siginfo_t *info, struct k_sigaction *ka, 249 siginfo_t *info, struct k_sigaction *ka,
253 sigset_t *oldset, struct pt_regs *regs, 250 struct pt_regs *regs, int syscall)
254 int syscall)
255{ 251{
256 int ret; 252 int ret;
257 253
@@ -278,11 +274,9 @@ static int handle_signal(int sig,
278 } 274 }
279 275
280 /* Set up the stack frame */ 276 /* Set up the stack frame */
281 ret = setup_rt_frame(sig, ka, info, oldset, regs); 277 if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
282 if (ret == 0) 278 return;
283 block_sigmask(ka, sig); 279 signal_delivered(sig, info, ka, regs, 0);
284
285 return ret;
286} 280}
287 281
288/* 282/*
@@ -292,7 +286,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
292{ 286{
293 struct k_sigaction ka; 287 struct k_sigaction ka;
294 siginfo_t info; 288 siginfo_t info;
295 sigset_t *oldset;
296 int signr; 289 int signr;
297 290
298 /* we want the common case to go fast, which is why we may in certain 291 /* we want the common case to go fast, which is why we may in certain
@@ -300,25 +293,9 @@ static void do_signal(struct pt_regs *regs, int syscall)
300 if (!user_mode(regs)) 293 if (!user_mode(regs))
301 return; 294 return;
302 295
303 if (test_thread_flag(TIF_RESTORE_SIGMASK))
304 oldset = &current->saved_sigmask;
305 else
306 oldset = &current->blocked;
307
308 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 296 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
309 if (signr > 0) { 297 if (signr > 0) {
310 if (handle_signal(signr, &info, &ka, oldset, 298 handle_signal(signr, &info, &ka, regs, syscall);
311 regs, syscall) == 0) {
312 /* a signal was successfully delivered; the saved
313 * sigmask will have been stored in the signal frame,
314 * and will be restored by sigreturn, so we can simply
315 * clear the TIF_RESTORE_SIGMASK flag */
316 if (test_thread_flag(TIF_RESTORE_SIGMASK))
317 clear_thread_flag(TIF_RESTORE_SIGMASK);
318
319 tracehook_signal_handler(signr, &info, &ka, regs, 0);
320 }
321
322 return; 299 return;
323 } 300 }
324 301
@@ -343,10 +320,7 @@ static void do_signal(struct pt_regs *regs, int syscall)
343 320
344 /* if there's no signal to deliver, we just put the saved sigmask 321 /* if there's no signal to deliver, we just put the saved sigmask
345 * back */ 322 * back */
346 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 323 restore_saved_sigmask();
347 clear_thread_flag(TIF_RESTORE_SIGMASK);
348 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
349 }
350} 324}
351 325
352/* 326/*
@@ -357,14 +331,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags,
357 int syscall) 331 int syscall)
358{ 332{
359 /* deal with pending signal delivery */ 333 /* deal with pending signal delivery */
360 if (thread_info_flags & ((1 << TIF_SIGPENDING) | 334 if (thread_info_flags & (1 << TIF_SIGPENDING))
361 (1 << TIF_RESTORE_SIGMASK)))
362 do_signal(regs, syscall); 335 do_signal(regs, syscall);
363 336
364 if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) { 337 if (thread_info_flags & (1 << TIF_NOTIFY_RESUME)) {
365 clear_thread_flag(TIF_NOTIFY_RESUME); 338 clear_thread_flag(TIF_NOTIFY_RESUME);
366 tracehook_notify_resume(regs); 339 tracehook_notify_resume(regs);
367 if (current->replacement_session_keyring)
368 key_replace_session_keyring();
369 } 340 }
370} 341}
diff --git a/arch/cris/arch-v10/kernel/signal.c b/arch/cris/arch-v10/kernel/signal.c
index e16f8f297f61..0bb477c13a4e 100644
--- a/arch/cris/arch-v10/kernel/signal.c
+++ b/arch/cris/arch-v10/kernel/signal.c
@@ -31,8 +31,6 @@
31 31
32#define DEBUG_SIG 0 32#define DEBUG_SIG 0
33 33
34#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
35
36/* a syscall in Linux/CRIS is a break 13 instruction which is 2 bytes */ 34/* a syscall in Linux/CRIS is a break 13 instruction which is 2 bytes */
37/* manipulate regs so that upon return, it will be re-executed */ 35/* manipulate regs so that upon return, it will be re-executed */
38 36
@@ -176,7 +174,6 @@ asmlinkage int sys_sigreturn(long r10, long r11, long r12, long r13, long mof,
176 sizeof(frame->extramask)))) 174 sizeof(frame->extramask))))
177 goto badframe; 175 goto badframe;
178 176
179 sigdelsetmask(&set, ~_BLOCKABLE);
180 set_current_blocked(&set); 177 set_current_blocked(&set);
181 178
182 if (restore_sigcontext(regs, &frame->sc)) 179 if (restore_sigcontext(regs, &frame->sc))
@@ -212,7 +209,6 @@ asmlinkage int sys_rt_sigreturn(long r10, long r11, long r12, long r13,
212 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 209 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
213 goto badframe; 210 goto badframe;
214 211
215 sigdelsetmask(&set, ~_BLOCKABLE);
216 set_current_blocked(&set); 212 set_current_blocked(&set);
217 213
218 if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 214 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -415,10 +411,11 @@ give_sigsegv:
415 * OK, we're invoking a handler 411 * OK, we're invoking a handler
416 */ 412 */
417 413
418static inline int handle_signal(int canrestart, unsigned long sig, 414static inline void handle_signal(int canrestart, unsigned long sig,
419 siginfo_t *info, struct k_sigaction *ka, 415 siginfo_t *info, struct k_sigaction *ka,
420 sigset_t *oldset, struct pt_regs *regs) 416 struct pt_regs *regs)
421{ 417{
418 sigset_t *oldset = sigmask_to_save();
422 int ret; 419 int ret;
423 420
424 /* Are we from a system call? */ 421 /* Are we from a system call? */
@@ -456,9 +453,7 @@ static inline int handle_signal(int canrestart, unsigned long sig,
456 ret = setup_frame(sig, ka, oldset, regs); 453 ret = setup_frame(sig, ka, oldset, regs);
457 454
458 if (ret == 0) 455 if (ret == 0)
459 block_sigmask(ka, sig); 456 signal_delivered(sig, info, ka, regs, 0);
460
461 return ret;
462} 457}
463 458
464/* 459/*
@@ -478,7 +473,6 @@ void do_signal(int canrestart, struct pt_regs *regs)
478 siginfo_t info; 473 siginfo_t info;
479 int signr; 474 int signr;
480 struct k_sigaction ka; 475 struct k_sigaction ka;
481 sigset_t *oldset;
482 476
483 /* 477 /*
484 * We want the common case to go fast, which 478 * We want the common case to go fast, which
@@ -489,23 +483,10 @@ void do_signal(int canrestart, struct pt_regs *regs)
489 if (!user_mode(regs)) 483 if (!user_mode(regs))
490 return; 484 return;
491 485
492 if (test_thread_flag(TIF_RESTORE_SIGMASK))
493 oldset = &current->saved_sigmask;
494 else
495 oldset = &current->blocked;
496
497 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 486 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
498 if (signr > 0) { 487 if (signr > 0) {
499 /* Whee! Actually deliver the signal. */ 488 /* Whee! Actually deliver the signal. */
500 if (handle_signal(canrestart, signr, &info, &ka, 489 handle_signal(canrestart, signr, &info, &ka, regs);
501 oldset, regs)) {
502 /* a signal was successfully delivered; the saved
503 * sigmask will have been stored in the signal frame,
504 * and will be restored by sigreturn, so we can simply
505 * clear the TIF_RESTORE_SIGMASK flag */
506 if (test_thread_flag(TIF_RESTORE_SIGMASK))
507 clear_thread_flag(TIF_RESTORE_SIGMASK);
508 }
509 return; 490 return;
510 } 491 }
511 492
@@ -525,8 +506,5 @@ void do_signal(int canrestart, struct pt_regs *regs)
525 506
526 /* if there's no signal to deliver, we just put the saved sigmask 507 /* if there's no signal to deliver, we just put the saved sigmask
527 * back */ 508 * back */
528 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 509 restore_saved_sigmask();
529 clear_thread_flag(TIF_RESTORE_SIGMASK);
530 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
531 }
532} 510}
diff --git a/arch/cris/arch-v32/kernel/signal.c b/arch/cris/arch-v32/kernel/signal.c
index b338d8fc0c12..b60d1b65a426 100644
--- a/arch/cris/arch-v32/kernel/signal.c
+++ b/arch/cris/arch-v32/kernel/signal.c
@@ -24,9 +24,6 @@
24 24
25extern unsigned long cris_signal_return_page; 25extern unsigned long cris_signal_return_page;
26 26
27/* Flag to check if a signal is blockable. */
28#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
29
30/* 27/*
31 * A syscall in CRIS is really a "break 13" instruction, which is 2 28 * A syscall in CRIS is really a "break 13" instruction, which is 2
32 * bytes. The registers is manipulated so upon return the instruction 29 * bytes. The registers is manipulated so upon return the instruction
@@ -167,7 +164,6 @@ sys_sigreturn(long r10, long r11, long r12, long r13, long mof, long srp,
167 sizeof(frame->extramask)))) 164 sizeof(frame->extramask))))
168 goto badframe; 165 goto badframe;
169 166
170 sigdelsetmask(&set, ~_BLOCKABLE);
171 set_current_blocked(&set); 167 set_current_blocked(&set);
172 168
173 if (restore_sigcontext(regs, &frame->sc)) 169 if (restore_sigcontext(regs, &frame->sc))
@@ -208,7 +204,6 @@ sys_rt_sigreturn(long r10, long r11, long r12, long r13, long mof, long srp,
208 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 204 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
209 goto badframe; 205 goto badframe;
210 206
211 sigdelsetmask(&set, ~_BLOCKABLE);
212 set_current_blocked(&set); 207 set_current_blocked(&set);
213 208
214 if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 209 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -434,11 +429,12 @@ give_sigsegv:
434} 429}
435 430
436/* Invoke a signal handler to, well, handle the signal. */ 431/* Invoke a signal handler to, well, handle the signal. */
437static inline int 432static inline void
438handle_signal(int canrestart, unsigned long sig, 433handle_signal(int canrestart, unsigned long sig,
439 siginfo_t *info, struct k_sigaction *ka, 434 siginfo_t *info, struct k_sigaction *ka,
440 sigset_t *oldset, struct pt_regs * regs) 435 struct pt_regs * regs)
441{ 436{
437 sigset_t *oldset = sigmask_to_save();
442 int ret; 438 int ret;
443 439
444 /* Check if this got called from a system call. */ 440 /* Check if this got called from a system call. */
@@ -489,9 +485,7 @@ handle_signal(int canrestart, unsigned long sig,
489 ret = setup_frame(sig, ka, oldset, regs); 485 ret = setup_frame(sig, ka, oldset, regs);
490 486
491 if (ret == 0) 487 if (ret == 0)
492 block_sigmask(ka, sig); 488 signal_delivered(sig, info, ka, regs, 0);
493
494 return ret;
495} 489}
496 490
497/* 491/*
@@ -511,7 +505,6 @@ do_signal(int canrestart, struct pt_regs *regs)
511 int signr; 505 int signr;
512 siginfo_t info; 506 siginfo_t info;
513 struct k_sigaction ka; 507 struct k_sigaction ka;
514 sigset_t *oldset;
515 508
516 /* 509 /*
517 * The common case should go fast, which is why this point is 510 * The common case should go fast, which is why this point is
@@ -521,25 +514,11 @@ do_signal(int canrestart, struct pt_regs *regs)
521 if (!user_mode(regs)) 514 if (!user_mode(regs))
522 return; 515 return;
523 516
524 if (test_thread_flag(TIF_RESTORE_SIGMASK))
525 oldset = &current->saved_sigmask;
526 else
527 oldset = &current->blocked;
528
529 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 517 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
530 518
531 if (signr > 0) { 519 if (signr > 0) {
532 /* Whee! Actually deliver the signal. */ 520 /* Whee! Actually deliver the signal. */
533 if (handle_signal(canrestart, signr, &info, &ka, 521 handle_signal(canrestart, signr, &info, &ka, regs);
534 oldset, regs)) {
535 /* a signal was successfully delivered; the saved
536 * sigmask will have been stored in the signal frame,
537 * and will be restored by sigreturn, so we can simply
538 * clear the TIF_RESTORE_SIGMASK flag */
539 if (test_thread_flag(TIF_RESTORE_SIGMASK))
540 clear_thread_flag(TIF_RESTORE_SIGMASK);
541 }
542
543 return; 522 return;
544 } 523 }
545 524
@@ -560,10 +539,7 @@ do_signal(int canrestart, struct pt_regs *regs)
560 539
561 /* if there's no signal to deliver, we just put the saved sigmask 540 /* if there's no signal to deliver, we just put the saved sigmask
562 * back */ 541 * back */
563 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 542 restore_saved_sigmask();
564 clear_thread_flag(TIF_RESTORE_SIGMASK);
565 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
566 }
567} 543}
568 544
569asmlinkage void 545asmlinkage void
diff --git a/arch/cris/include/asm/posix_types.h b/arch/cris/include/asm/posix_types.h
index 234891c74e2b..ce4e51793151 100644
--- a/arch/cris/include/asm/posix_types.h
+++ b/arch/cris/include/asm/posix_types.h
@@ -15,9 +15,6 @@
15typedef unsigned short __kernel_mode_t; 15typedef unsigned short __kernel_mode_t;
16#define __kernel_mode_t __kernel_mode_t 16#define __kernel_mode_t __kernel_mode_t
17 17
18typedef unsigned short __kernel_nlink_t;
19#define __kernel_nlink_t __kernel_nlink_t
20
21typedef unsigned short __kernel_ipc_pid_t; 18typedef unsigned short __kernel_ipc_pid_t;
22#define __kernel_ipc_pid_t __kernel_ipc_pid_t 19#define __kernel_ipc_pid_t __kernel_ipc_pid_t
23 20
diff --git a/arch/cris/kernel/ptrace.c b/arch/cris/kernel/ptrace.c
index d114ad3da9b1..58d44ee1a71f 100644
--- a/arch/cris/kernel/ptrace.c
+++ b/arch/cris/kernel/ptrace.c
@@ -40,7 +40,5 @@ void do_notify_resume(int canrestart, struct pt_regs *regs,
40 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 40 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
41 clear_thread_flag(TIF_NOTIFY_RESUME); 41 clear_thread_flag(TIF_NOTIFY_RESUME);
42 tracehook_notify_resume(regs); 42 tracehook_notify_resume(regs);
43 if (current->replacement_session_keyring)
44 key_replace_session_keyring();
45 } 43 }
46} 44}
diff --git a/arch/frv/include/asm/posix_types.h b/arch/frv/include/asm/posix_types.h
index 3f34cb45fbb3..fe512af74a5a 100644
--- a/arch/frv/include/asm/posix_types.h
+++ b/arch/frv/include/asm/posix_types.h
@@ -10,9 +10,6 @@
10typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t 11#define __kernel_mode_t __kernel_mode_t
12 12
13typedef unsigned short __kernel_nlink_t;
14#define __kernel_nlink_t __kernel_nlink_t
15
16typedef unsigned short __kernel_ipc_pid_t; 13typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t 14#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18 15
diff --git a/arch/frv/include/asm/thread_info.h b/arch/frv/include/asm/thread_info.h
index 54ab13a0de41..0ff03a33c81e 100644
--- a/arch/frv/include/asm/thread_info.h
+++ b/arch/frv/include/asm/thread_info.h
@@ -94,8 +94,8 @@ register struct thread_info *__current_thread_info asm("gr15");
94#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ 94#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
95#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */ 95#define TIF_SINGLESTEP 4 /* restore singlestep on return to user mode */
96#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */ 96#define TIF_RESTORE_SIGMASK 5 /* restore signal mask in do_signal() */
97#define TIF_POLLING_NRFLAG 16 /* true if poll_idle() is polling TIF_NEED_RESCHED */ 97#define TIF_POLLING_NRFLAG 6 /* true if poll_idle() is polling TIF_NEED_RESCHED */
98#define TIF_MEMDIE 17 /* is terminating due to OOM killer */ 98#define TIF_MEMDIE 7 /* is terminating due to OOM killer */
99 99
100#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) 100#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
101#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) 101#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
@@ -105,8 +105,16 @@ register struct thread_info *__current_thread_info asm("gr15");
105#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK) 105#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
106#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 106#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
107 107
108#define _TIF_WORK_MASK 0x0000FFFE /* work to do on interrupt/exception return */ 108/* work to do on interrupt/exception return */
109#define _TIF_ALLWORK_MASK 0x0000FFFF /* work to do on any return to u-space */ 109#define _TIF_WORK_MASK \
110 (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_SINGLESTEP)
111
112/* work to do on any return to u-space */
113#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_SYSCALL_TRACE)
114
115#if _TIF_ALLWORK_MASK >= 0x2000
116#error "_TIF_ALLWORK_MASK won't fit in an ANDI now (see entry.S)"
117#endif
110 118
111/* 119/*
112 * Thread-synchronous status. 120 * Thread-synchronous status.
diff --git a/arch/frv/kernel/entry.S b/arch/frv/kernel/entry.S
index 5ba23f715ea5..7d5e000fd32e 100644
--- a/arch/frv/kernel/entry.S
+++ b/arch/frv/kernel/entry.S
@@ -905,18 +905,19 @@ __syscall_call:
905__syscall_exit: 905__syscall_exit:
906 LEDS 0x6300 906 LEDS 0x6300
907 907
908 sti gr8,@(gr28,#REG_GR(8)) ; save return value 908 # keep current PSR in GR23
909 movsg psr,gr23
909 910
910 # rebuild saved psr - execve will change it for init/main.c
911 ldi @(gr28,#REG_PSR),gr22 911 ldi @(gr28,#REG_PSR),gr22
912
913 sti.p gr8,@(gr28,#REG_GR(8)) ; save return value
914
915 # rebuild saved psr - execve will change it for init/main.c
912 srli gr22,#1,gr5 916 srli gr22,#1,gr5
913 andi.p gr22,#~PSR_PS,gr22 917 andi.p gr22,#~PSR_PS,gr22
914 andi gr5,#PSR_PS,gr5 918 andi gr5,#PSR_PS,gr5
915 or gr5,gr22,gr22 919 or gr5,gr22,gr22
916 ori gr22,#PSR_S,gr22 920 ori.p gr22,#PSR_S,gr22
917
918 # keep current PSR in GR23
919 movsg psr,gr23
920 921
921 # make sure we don't miss an interrupt setting need_resched or sigpending between 922 # make sure we don't miss an interrupt setting need_resched or sigpending between
922 # sampling and the RETT 923 # sampling and the RETT
@@ -924,9 +925,7 @@ __syscall_exit:
924 movgs gr23,psr 925 movgs gr23,psr
925 926
926 ldi @(gr15,#TI_FLAGS),gr4 927 ldi @(gr15,#TI_FLAGS),gr4
927 sethi.p %hi(_TIF_ALLWORK_MASK),gr5 928 andicc gr4,#_TIF_ALLWORK_MASK,gr0,icc0
928 setlo %lo(_TIF_ALLWORK_MASK),gr5
929 andcc gr4,gr5,gr0,icc0
930 bne icc0,#0,__syscall_exit_work 929 bne icc0,#0,__syscall_exit_work
931 930
932 # restore all registers and return 931 # restore all registers and return
@@ -1111,9 +1110,7 @@ __entry_resume_userspace:
1111__entry_return_from_user_interrupt: 1110__entry_return_from_user_interrupt:
1112 LEDS 0x6402 1111 LEDS 0x6402
1113 ldi @(gr15,#TI_FLAGS),gr4 1112 ldi @(gr15,#TI_FLAGS),gr4
1114 sethi.p %hi(_TIF_WORK_MASK),gr5 1113 andicc gr4,#_TIF_WORK_MASK,gr0,icc0
1115 setlo %lo(_TIF_WORK_MASK),gr5
1116 andcc gr4,gr5,gr0,icc0
1117 beq icc0,#1,__entry_return_direct 1114 beq icc0,#1,__entry_return_direct
1118 1115
1119__entry_work_pending: 1116__entry_work_pending:
@@ -1133,9 +1130,7 @@ __entry_work_resched:
1133 1130
1134 LEDS 0x6401 1131 LEDS 0x6401
1135 ldi @(gr15,#TI_FLAGS),gr4 1132 ldi @(gr15,#TI_FLAGS),gr4
1136 sethi.p %hi(_TIF_WORK_MASK),gr5 1133 andicc gr4,#_TIF_WORK_MASK,gr0,icc0
1137 setlo %lo(_TIF_WORK_MASK),gr5
1138 andcc gr4,gr5,gr0,icc0
1139 beq icc0,#1,__entry_return_direct 1134 beq icc0,#1,__entry_return_direct
1140 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0 1135 andicc gr4,#_TIF_NEED_RESCHED,gr0,icc0
1141 bne icc0,#1,__entry_work_resched 1136 bne icc0,#1,__entry_work_resched
@@ -1163,7 +1158,9 @@ __syscall_trace_entry:
1163 # perform syscall exit tracing 1158 # perform syscall exit tracing
1164__syscall_exit_work: 1159__syscall_exit_work:
1165 LEDS 0x6340 1160 LEDS 0x6340
1166 andicc gr4,#_TIF_SYSCALL_TRACE,gr0,icc0 1161 andicc gr22,#PSR_PS,gr0,icc1 ; don't handle on return to kernel mode
1162 andicc.p gr4,#_TIF_SYSCALL_TRACE,gr0,icc0
1163 bne icc1,#0,__entry_return_direct
1167 beq icc0,#1,__entry_work_pending 1164 beq icc0,#1,__entry_work_pending
1168 1165
1169 movsg psr,gr23 1166 movsg psr,gr23
diff --git a/arch/frv/kernel/signal.c b/arch/frv/kernel/signal.c
index 8cf5dca01758..864c2f0d497b 100644
--- a/arch/frv/kernel/signal.c
+++ b/arch/frv/kernel/signal.c
@@ -28,8 +28,6 @@
28 28
29#define DEBUG_SIG 0 29#define DEBUG_SIG 0
30 30
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
32
33struct fdpic_func_descriptor { 31struct fdpic_func_descriptor {
34 unsigned long text; 32 unsigned long text;
35 unsigned long GOT; 33 unsigned long GOT;
@@ -149,7 +147,6 @@ asmlinkage int sys_sigreturn(void)
149 __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask))) 147 __copy_from_user(&set.sig[1], &frame->extramask, sizeof(frame->extramask)))
150 goto badframe; 148 goto badframe;
151 149
152 sigdelsetmask(&set, ~_BLOCKABLE);
153 set_current_blocked(&set); 150 set_current_blocked(&set);
154 151
155 if (restore_sigcontext(&frame->sc, &gr8)) 152 if (restore_sigcontext(&frame->sc, &gr8))
@@ -172,7 +169,6 @@ asmlinkage int sys_rt_sigreturn(void)
172 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 169 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
173 goto badframe; 170 goto badframe;
174 171
175 sigdelsetmask(&set, ~_BLOCKABLE);
176 set_current_blocked(&set); 172 set_current_blocked(&set);
177 173
178 if (restore_sigcontext(&frame->uc.uc_mcontext, &gr8)) 174 if (restore_sigcontext(&frame->uc.uc_mcontext, &gr8))
@@ -426,9 +422,10 @@ give_sigsegv:
426/* 422/*
427 * OK, we're invoking a handler 423 * OK, we're invoking a handler
428 */ 424 */
429static int handle_signal(unsigned long sig, siginfo_t *info, 425static void handle_signal(unsigned long sig, siginfo_t *info,
430 struct k_sigaction *ka, sigset_t *oldset) 426 struct k_sigaction *ka)
431{ 427{
428 sigset_t *oldset = sigmask_to_save();
432 int ret; 429 int ret;
433 430
434 /* Are we from a system call? */ 431 /* Are we from a system call? */
@@ -460,11 +457,11 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
460 else 457 else
461 ret = setup_frame(sig, ka, oldset); 458 ret = setup_frame(sig, ka, oldset);
462 459
463 if (ret == 0) 460 if (ret)
464 block_sigmask(ka, sig); 461 return;
465
466 return ret;
467 462
463 signal_delivered(sig, info, ka, __frame,
464 test_thread_flag(TIF_SINGLESTEP));
468} /* end handle_signal() */ 465} /* end handle_signal() */
469 466
470/*****************************************************************************/ 467/*****************************************************************************/
@@ -477,44 +474,14 @@ static void do_signal(void)
477{ 474{
478 struct k_sigaction ka; 475 struct k_sigaction ka;
479 siginfo_t info; 476 siginfo_t info;
480 sigset_t *oldset;
481 int signr; 477 int signr;
482 478
483 /*
484 * We want the common case to go fast, which
485 * is why we may in certain cases get here from
486 * kernel mode. Just return without doing anything
487 * if so.
488 */
489 if (!user_mode(__frame))
490 return;
491
492 if (try_to_freeze())
493 goto no_signal;
494
495 if (test_thread_flag(TIF_RESTORE_SIGMASK))
496 oldset = &current->saved_sigmask;
497 else
498 oldset = &current->blocked;
499
500 signr = get_signal_to_deliver(&info, &ka, __frame, NULL); 479 signr = get_signal_to_deliver(&info, &ka, __frame, NULL);
501 if (signr > 0) { 480 if (signr > 0) {
502 if (handle_signal(signr, &info, &ka, oldset) == 0) { 481 handle_signal(signr, &info, &ka);
503 /* a signal was successfully delivered; the saved
504 * sigmask will have been stored in the signal frame,
505 * and will be restored by sigreturn, so we can simply
506 * clear the TIF_RESTORE_SIGMASK flag */
507 if (test_thread_flag(TIF_RESTORE_SIGMASK))
508 clear_thread_flag(TIF_RESTORE_SIGMASK);
509
510 tracehook_signal_handler(signr, &info, &ka, __frame,
511 test_thread_flag(TIF_SINGLESTEP));
512 }
513
514 return; 482 return;
515 } 483 }
516 484
517no_signal:
518 /* Did we come from a system call? */ 485 /* Did we come from a system call? */
519 if (__frame->syscallno != -1) { 486 if (__frame->syscallno != -1) {
520 /* Restart the system call - no handlers present */ 487 /* Restart the system call - no handlers present */
@@ -536,11 +503,7 @@ no_signal:
536 503
537 /* if there's no signal to deliver, we just put the saved sigmask 504 /* if there's no signal to deliver, we just put the saved sigmask
538 * back */ 505 * back */
539 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 506 restore_saved_sigmask();
540 clear_thread_flag(TIF_RESTORE_SIGMASK);
541 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
542 }
543
544} /* end do_signal() */ 507} /* end do_signal() */
545 508
546/*****************************************************************************/ 509/*****************************************************************************/
@@ -555,15 +518,13 @@ asmlinkage void do_notify_resume(__u32 thread_info_flags)
555 clear_thread_flag(TIF_SINGLESTEP); 518 clear_thread_flag(TIF_SINGLESTEP);
556 519
557 /* deal with pending signal delivery */ 520 /* deal with pending signal delivery */
558 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 521 if (thread_info_flags & _TIF_SIGPENDING)
559 do_signal(); 522 do_signal();
560 523
561 /* deal with notification on about to resume userspace execution */ 524 /* deal with notification on about to resume userspace execution */
562 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 525 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
563 clear_thread_flag(TIF_NOTIFY_RESUME); 526 clear_thread_flag(TIF_NOTIFY_RESUME);
564 tracehook_notify_resume(__frame); 527 tracehook_notify_resume(__frame);
565 if (current->replacement_session_keyring)
566 key_replace_session_keyring();
567 } 528 }
568 529
569} /* end do_notify_resume() */ 530} /* end do_notify_resume() */
diff --git a/arch/h8300/include/asm/posix_types.h b/arch/h8300/include/asm/posix_types.h
index bc4c34efb1ad..91e62ba4c7b0 100644
--- a/arch/h8300/include/asm/posix_types.h
+++ b/arch/h8300/include/asm/posix_types.h
@@ -10,9 +10,6 @@
10typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t 11#define __kernel_mode_t __kernel_mode_t
12 12
13typedef unsigned short __kernel_nlink_t;
14#define __kernel_nlink_t __kernel_nlink_t
15
16typedef unsigned short __kernel_ipc_pid_t; 13typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t 14#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18 15
diff --git a/arch/h8300/kernel/setup.c b/arch/h8300/kernel/setup.c
index 68d651081bd3..d0b1607f2711 100644
--- a/arch/h8300/kernel/setup.c
+++ b/arch/h8300/kernel/setup.c
@@ -35,6 +35,7 @@
35#include <asm/setup.h> 35#include <asm/setup.h>
36#include <asm/irq.h> 36#include <asm/irq.h>
37#include <asm/pgtable.h> 37#include <asm/pgtable.h>
38#include <asm/sections.h>
38 39
39#if defined(__H8300H__) 40#if defined(__H8300H__)
40#define CPU "H8/300H" 41#define CPU "H8/300H"
@@ -54,7 +55,6 @@ unsigned long memory_end;
54 55
55char __initdata command_line[COMMAND_LINE_SIZE]; 56char __initdata command_line[COMMAND_LINE_SIZE];
56 57
57extern int _stext, _etext, _sdata, _edata, _sbss, _ebss, _end;
58extern int _ramstart, _ramend; 58extern int _ramstart, _ramend;
59extern char _target_name[]; 59extern char _target_name[];
60extern void h8300_gpio_init(void); 60extern void h8300_gpio_init(void);
@@ -119,9 +119,9 @@ void __init setup_arch(char **cmdline_p)
119 memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS; 119 memory_end = CONFIG_BLKDEV_RESERVE_ADDRESS;
120#endif 120#endif
121 121
122 init_mm.start_code = (unsigned long) &_stext; 122 init_mm.start_code = (unsigned long) _stext;
123 init_mm.end_code = (unsigned long) &_etext; 123 init_mm.end_code = (unsigned long) _etext;
124 init_mm.end_data = (unsigned long) &_edata; 124 init_mm.end_data = (unsigned long) _edata;
125 init_mm.brk = (unsigned long) 0; 125 init_mm.brk = (unsigned long) 0;
126 126
127#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT) 127#if (defined(CONFIG_H8300H_SIM) || defined(CONFIG_H8S_SIM)) && defined(CONFIG_GDB_MAGICPRINT)
@@ -134,15 +134,12 @@ void __init setup_arch(char **cmdline_p)
134 printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n"); 134 printk(KERN_INFO "H8/300 series support by Yoshinori Sato <ysato@users.sourceforge.jp>\n");
135 135
136#ifdef DEBUG 136#ifdef DEBUG
137 printk(KERN_DEBUG "KERNEL -> TEXT=0x%06x-0x%06x DATA=0x%06x-0x%06x " 137 printk(KERN_DEBUG "KERNEL -> TEXT=0x%p-0x%p DATA=0x%p-0x%p "
138 "BSS=0x%06x-0x%06x\n", (int) &_stext, (int) &_etext, 138 "BSS=0x%p-0x%p\n", _stext, _etext, _sdata, _edata, __bss_start,
139 (int) &_sdata, (int) &_edata, 139 __bss_stop);
140 (int) &_sbss, (int) &_ebss); 140 printk(KERN_DEBUG "KERNEL -> ROMFS=0x%p-0x%06lx MEM=0x%06lx-0x%06lx "
141 printk(KERN_DEBUG "KERNEL -> ROMFS=0x%06x-0x%06x MEM=0x%06x-0x%06x " 141 "STACK=0x%06lx-0x%p\n", __bss_stop, memory_start, memory_start,
142 "STACK=0x%06x-0x%06x\n", 142 memory_end, memory_end, &_ramend);
143 (int) &_ebss, (int) memory_start,
144 (int) memory_start, (int) memory_end,
145 (int) memory_end, (int) &_ramend);
146#endif 143#endif
147 144
148#ifdef CONFIG_DEFAULT_CMDLINE 145#ifdef CONFIG_DEFAULT_CMDLINE
diff --git a/arch/h8300/kernel/signal.c b/arch/h8300/kernel/signal.c
index d4b0555d2904..fca10378701b 100644
--- a/arch/h8300/kernel/signal.c
+++ b/arch/h8300/kernel/signal.c
@@ -47,8 +47,6 @@
47#include <asm/traps.h> 47#include <asm/traps.h>
48#include <asm/ucontext.h> 48#include <asm/ucontext.h>
49 49
50#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
51
52/* 50/*
53 * Atomically swap in the new signal mask, and wait for a signal. 51 * Atomically swap in the new signal mask, and wait for a signal.
54 */ 52 */
@@ -186,7 +184,6 @@ asmlinkage int do_sigreturn(unsigned long __unused,...)
186 sizeof(frame->extramask)))) 184 sizeof(frame->extramask))))
187 goto badframe; 185 goto badframe;
188 186
189 sigdelsetmask(&set, ~_BLOCKABLE);
190 set_current_blocked(&set); 187 set_current_blocked(&set);
191 188
192 if (restore_sigcontext(regs, &frame->sc, &er0)) 189 if (restore_sigcontext(regs, &frame->sc, &er0))
@@ -211,7 +208,6 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused,...)
211 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 208 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
212 goto badframe; 209 goto badframe;
213 210
214 sigdelsetmask(&set, ~_BLOCKABLE);
215 set_current_blocked(&set); 211 set_current_blocked(&set);
216 212
217 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &er0)) 213 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &er0))
@@ -412,8 +408,9 @@ give_sigsegv:
412 */ 408 */
413static void 409static void
414handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 410handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
415 sigset_t *oldset, struct pt_regs * regs) 411 struct pt_regs * regs)
416{ 412{
413 sigset_t *oldset = sigmask_to_save();
417 int ret; 414 int ret;
418 /* are we from a system call? */ 415 /* are we from a system call? */
419 if (regs->orig_er0 >= 0) { 416 if (regs->orig_er0 >= 0) {
@@ -441,10 +438,8 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
441 else 438 else
442 ret = setup_frame(sig, ka, oldset, regs); 439 ret = setup_frame(sig, ka, oldset, regs);
443 440
444 if (!ret) { 441 if (!ret)
445 block_sigmask(ka, sig); 442 signal_delivered(sig, info, ka, regs, 0);
446 clear_thread_flag(TIF_RESTORE_SIGMASK);
447 }
448} 443}
449 444
450/* 445/*
@@ -457,7 +452,6 @@ statis void do_signal(struct pt_regs *regs)
457 siginfo_t info; 452 siginfo_t info;
458 int signr; 453 int signr;
459 struct k_sigaction ka; 454 struct k_sigaction ka;
460 sigset_t *oldset;
461 455
462 /* 456 /*
463 * We want the common case to go fast, which 457 * We want the common case to go fast, which
@@ -468,23 +462,14 @@ statis void do_signal(struct pt_regs *regs)
468 if ((regs->ccr & 0x10)) 462 if ((regs->ccr & 0x10))
469 return; 463 return;
470 464
471 if (try_to_freeze())
472 goto no_signal;
473
474 current->thread.esp0 = (unsigned long) regs; 465 current->thread.esp0 = (unsigned long) regs;
475 466
476 if (test_thread_flag(TIF_RESTORE_SIGMASK))
477 oldset = &current->saved_sigmask;
478 else
479 oldset = &current->blocked;
480
481 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 467 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
482 if (signr > 0) { 468 if (signr > 0) {
483 /* Whee! Actually deliver the signal. */ 469 /* Whee! Actually deliver the signal. */
484 handle_signal(signr, &info, &ka, oldset, regs); 470 handle_signal(signr, &info, &ka, regs);
485 return; 471 return;
486 } 472 }
487 no_signal:
488 /* Did we come from a system call? */ 473 /* Did we come from a system call? */
489 if (regs->orig_er0 >= 0) { 474 if (regs->orig_er0 >= 0) {
490 /* Restart the system call - no handlers present */ 475 /* Restart the system call - no handlers present */
@@ -501,8 +486,7 @@ statis void do_signal(struct pt_regs *regs)
501 } 486 }
502 487
503 /* If there's no signal to deliver, we just restore the saved mask. */ 488 /* If there's no signal to deliver, we just restore the saved mask. */
504 if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK)) 489 restore_saved_sigmask();
505 set_current_blocked(&current->saved_sigmask);
506} 490}
507 491
508asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags) 492asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
@@ -513,7 +497,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
513 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 497 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
514 clear_thread_flag(TIF_NOTIFY_RESUME); 498 clear_thread_flag(TIF_NOTIFY_RESUME);
515 tracehook_notify_resume(regs); 499 tracehook_notify_resume(regs);
516 if (current->replacement_session_keyring)
517 key_replace_session_keyring();
518 } 500 }
519} 501}
diff --git a/arch/h8300/mm/init.c b/arch/h8300/mm/init.c
index 973369c32a95..981e25094b1a 100644
--- a/arch/h8300/mm/init.c
+++ b/arch/h8300/mm/init.c
@@ -36,6 +36,7 @@
36#include <asm/segment.h> 36#include <asm/segment.h>
37#include <asm/page.h> 37#include <asm/page.h>
38#include <asm/pgtable.h> 38#include <asm/pgtable.h>
39#include <asm/sections.h>
39 40
40#undef DEBUG 41#undef DEBUG
41 42
@@ -123,7 +124,6 @@ void __init mem_init(void)
123 int codek = 0, datak = 0, initk = 0; 124 int codek = 0, datak = 0, initk = 0;
124 /* DAVIDM look at setup memory map generically with reserved area */ 125 /* DAVIDM look at setup memory map generically with reserved area */
125 unsigned long tmp; 126 unsigned long tmp;
126 extern char _etext, _stext, _sdata, _ebss, __init_begin, __init_end;
127 extern unsigned long _ramend, _ramstart; 127 extern unsigned long _ramend, _ramstart;
128 unsigned long len = &_ramend - &_ramstart; 128 unsigned long len = &_ramend - &_ramstart;
129 unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */ 129 unsigned long start_mem = memory_start; /* DAVIDM - these must start at end of kernel */
@@ -142,9 +142,9 @@ void __init mem_init(void)
142 /* this will put all memory onto the freelists */ 142 /* this will put all memory onto the freelists */
143 totalram_pages = free_all_bootmem(); 143 totalram_pages = free_all_bootmem();
144 144
145 codek = (&_etext - &_stext) >> 10; 145 codek = (_etext - _stext) >> 10;
146 datak = (&_ebss - &_sdata) >> 10; 146 datak = (__bss_stop - _sdata) >> 10;
147 initk = (&__init_begin - &__init_end) >> 10; 147 initk = (__init_begin - __init_end) >> 10;
148 148
149 tmp = nr_free_pages() << PAGE_SHIFT; 149 tmp = nr_free_pages() << PAGE_SHIFT;
150 printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n", 150 printk(KERN_INFO "Memory available: %luk/%luk RAM, %luk/%luk ROM (%dk kernel code, %dk data)\n",
@@ -178,22 +178,21 @@ free_initmem(void)
178{ 178{
179#ifdef CONFIG_RAMKERNEL 179#ifdef CONFIG_RAMKERNEL
180 unsigned long addr; 180 unsigned long addr;
181 extern char __init_begin, __init_end;
182/* 181/*
183 * the following code should be cool even if these sections 182 * the following code should be cool even if these sections
184 * are not page aligned. 183 * are not page aligned.
185 */ 184 */
186 addr = PAGE_ALIGN((unsigned long)(&__init_begin)); 185 addr = PAGE_ALIGN((unsigned long)(__init_begin));
187 /* next to check that the page we free is not a partial page */ 186 /* next to check that the page we free is not a partial page */
188 for (; addr + PAGE_SIZE < (unsigned long)(&__init_end); addr +=PAGE_SIZE) { 187 for (; addr + PAGE_SIZE < (unsigned long)__init_end; addr +=PAGE_SIZE) {
189 ClearPageReserved(virt_to_page(addr)); 188 ClearPageReserved(virt_to_page(addr));
190 init_page_count(virt_to_page(addr)); 189 init_page_count(virt_to_page(addr));
191 free_page(addr); 190 free_page(addr);
192 totalram_pages++; 191 totalram_pages++;
193 } 192 }
194 printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n", 193 printk(KERN_INFO "Freeing unused kernel memory: %ldk freed (0x%x - 0x%x)\n",
195 (addr - PAGE_ALIGN((long) &__init_begin)) >> 10, 194 (addr - PAGE_ALIGN((long) __init_begin)) >> 10,
196 (int)(PAGE_ALIGN((unsigned long)(&__init_begin))), 195 (int)(PAGE_ALIGN((unsigned long)__init_begin)),
197 (int)(addr - PAGE_SIZE)); 196 (int)(addr - PAGE_SIZE));
198#endif 197#endif
199} 198}
diff --git a/arch/hexagon/kernel/signal.c b/arch/hexagon/kernel/signal.c
index 434866eb0f1c..304b0808d072 100644
--- a/arch/hexagon/kernel/signal.c
+++ b/arch/hexagon/kernel/signal.c
@@ -31,8 +31,6 @@
31#include <asm/signal.h> 31#include <asm/signal.h>
32#include <asm/vdso.h> 32#include <asm/vdso.h>
33 33
34#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
35
36struct rt_sigframe { 34struct rt_sigframe {
37 unsigned long tramp[2]; 35 unsigned long tramp[2];
38 struct siginfo info; 36 struct siginfo info;
@@ -149,11 +147,9 @@ sigsegv:
149/* 147/*
150 * Setup invocation of signal handler 148 * Setup invocation of signal handler
151 */ 149 */
152static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, 150static void handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
153 sigset_t *oldset, struct pt_regs *regs) 151 struct pt_regs *regs)
154{ 152{
155 int rc;
156
157 /* 153 /*
158 * If we're handling a signal that aborted a system call, 154 * If we're handling a signal that aborted a system call,
159 * set up the error return value before adding the signal 155 * set up the error return value before adding the signal
@@ -186,15 +182,12 @@ static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka,
186 * Set up the stack frame; not doing the SA_SIGINFO thing. We 182 * Set up the stack frame; not doing the SA_SIGINFO thing. We
187 * only set up the rt_frame flavor. 183 * only set up the rt_frame flavor.
188 */ 184 */
189 rc = setup_rt_frame(sig, ka, info, oldset, regs);
190
191 /* If there was an error on setup, no signal was delivered. */ 185 /* If there was an error on setup, no signal was delivered. */
192 if (rc) 186 if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs) < 0)
193 return rc; 187 return;
194
195 block_sigmask(ka, sig);
196 188
197 return 0; 189 signal_delivered(sig, info, ka, regs,
190 test_thread_flag(TIF_SINGLESTEP));
198} 191}
199 192
200/* 193/*
@@ -209,34 +202,13 @@ static void do_signal(struct pt_regs *regs)
209 if (!user_mode(regs)) 202 if (!user_mode(regs))
210 return; 203 return;
211 204
212 if (try_to_freeze())
213 goto no_signal;
214
215 signo = get_signal_to_deliver(&info, &sigact, regs, NULL); 205 signo = get_signal_to_deliver(&info, &sigact, regs, NULL);
216 206
217 if (signo > 0) { 207 if (signo > 0) {
218 sigset_t *oldset; 208 handle_signal(signo, &info, &sigact, regs);
219
220 if (test_thread_flag(TIF_RESTORE_SIGMASK))
221 oldset = &current->saved_sigmask;
222 else
223 oldset = &current->blocked;
224
225 if (handle_signal(signo, &info, &sigact, oldset, regs) == 0) {
226 /*
227 * Successful delivery case. The saved sigmask is
228 * stored in the signal frame, and will be restored
229 * by sigreturn. We can clear the TIF flag.
230 */
231 clear_thread_flag(TIF_RESTORE_SIGMASK);
232
233 tracehook_signal_handler(signo, &info, &sigact, regs,
234 test_thread_flag(TIF_SINGLESTEP));
235 }
236 return; 209 return;
237 } 210 }
238 211
239no_signal:
240 /* 212 /*
241 * If we came from a system call, handle the restart. 213 * If we came from a system call, handle the restart.
242 */ 214 */
@@ -259,10 +231,7 @@ no_signal:
259 231
260no_restart: 232no_restart:
261 /* If there's no signal to deliver, put the saved sigmask back */ 233 /* If there's no signal to deliver, put the saved sigmask back */
262 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 234 restore_saved_sigmask();
263 clear_thread_flag(TIF_RESTORE_SIGMASK);
264 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
265 }
266} 235}
267 236
268void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) 237void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
@@ -273,8 +242,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
273 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 242 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
274 clear_thread_flag(TIF_NOTIFY_RESUME); 243 clear_thread_flag(TIF_NOTIFY_RESUME);
275 tracehook_notify_resume(regs); 244 tracehook_notify_resume(regs);
276 if (current->replacement_session_keyring)
277 key_replace_session_keyring();
278 } 245 }
279} 246}
280 247
@@ -303,7 +270,6 @@ asmlinkage int sys_rt_sigreturn(void)
303 if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked))) 270 if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked)))
304 goto badframe; 271 goto badframe;
305 272
306 sigdelsetmask(&blocked, ~_BLOCKABLE);
307 set_current_blocked(&blocked); 273 set_current_blocked(&blocked);
308 274
309 if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 275 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
diff --git a/arch/ia64/include/asm/posix_types.h b/arch/ia64/include/asm/posix_types.h
index 7323ab9467eb..99ee1d6510cf 100644
--- a/arch/ia64/include/asm/posix_types.h
+++ b/arch/ia64/include/asm/posix_types.h
@@ -1,9 +1,6 @@
1#ifndef _ASM_IA64_POSIX_TYPES_H 1#ifndef _ASM_IA64_POSIX_TYPES_H
2#define _ASM_IA64_POSIX_TYPES_H 2#define _ASM_IA64_POSIX_TYPES_H
3 3
4typedef unsigned int __kernel_nlink_t;
5#define __kernel_nlink_t __kernel_nlink_t
6
7typedef unsigned long __kernel_sigset_t; /* at least 32 bits */ 4typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
8 5
9#include <asm-generic/posix_types.h> 6#include <asm-generic/posix_types.h>
diff --git a/arch/ia64/include/asm/thread_info.h b/arch/ia64/include/asm/thread_info.h
index 310d9734f02d..f7ee85378311 100644
--- a/arch/ia64/include/asm/thread_info.h
+++ b/arch/ia64/include/asm/thread_info.h
@@ -141,7 +141,23 @@ static inline void set_restore_sigmask(void)
141{ 141{
142 struct thread_info *ti = current_thread_info(); 142 struct thread_info *ti = current_thread_info();
143 ti->status |= TS_RESTORE_SIGMASK; 143 ti->status |= TS_RESTORE_SIGMASK;
144 set_bit(TIF_SIGPENDING, &ti->flags); 144 WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
145}
146static inline void clear_restore_sigmask(void)
147{
148 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
149}
150static inline bool test_restore_sigmask(void)
151{
152 return current_thread_info()->status & TS_RESTORE_SIGMASK;
153}
154static inline bool test_and_clear_restore_sigmask(void)
155{
156 struct thread_info *ti = current_thread_info();
157 if (!(ti->status & TS_RESTORE_SIGMASK))
158 return false;
159 ti->status &= ~TS_RESTORE_SIGMASK;
160 return true;
145} 161}
146#endif /* !__ASSEMBLY__ */ 162#endif /* !__ASSEMBLY__ */
147 163
diff --git a/arch/ia64/kernel/perfmon.c b/arch/ia64/kernel/perfmon.c
index f00ba025375d..d7f558c1e711 100644
--- a/arch/ia64/kernel/perfmon.c
+++ b/arch/ia64/kernel/perfmon.c
@@ -604,12 +604,6 @@ pfm_unprotect_ctx_ctxsw(pfm_context_t *x, unsigned long f)
604 spin_unlock(&(x)->ctx_lock); 604 spin_unlock(&(x)->ctx_lock);
605} 605}
606 606
607static inline unsigned long
608pfm_get_unmapped_area(struct file *file, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags, unsigned long exec)
609{
610 return get_unmapped_area(file, addr, len, pgoff, flags);
611}
612
613/* forward declaration */ 607/* forward declaration */
614static const struct dentry_operations pfmfs_dentry_operations; 608static const struct dentry_operations pfmfs_dentry_operations;
615 609
@@ -2333,8 +2327,8 @@ pfm_smpl_buffer_alloc(struct task_struct *task, struct file *filp, pfm_context_t
2333 down_write(&task->mm->mmap_sem); 2327 down_write(&task->mm->mmap_sem);
2334 2328
2335 /* find some free area in address space, must have mmap sem held */ 2329 /* find some free area in address space, must have mmap sem held */
2336 vma->vm_start = pfm_get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS, 0); 2330 vma->vm_start = get_unmapped_area(NULL, 0, size, 0, MAP_PRIVATE|MAP_ANONYMOUS);
2337 if (vma->vm_start == 0UL) { 2331 if (IS_ERR_VALUE(vma->vm_start)) {
2338 DPRINT(("Cannot find unmapped area for size %ld\n", size)); 2332 DPRINT(("Cannot find unmapped area for size %ld\n", size));
2339 up_write(&task->mm->mmap_sem); 2333 up_write(&task->mm->mmap_sem);
2340 goto error; 2334 goto error;
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 5e0e86ddb12f..dd6fc1449741 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -199,8 +199,6 @@ do_notify_resume_user(sigset_t *unused, struct sigscratch *scr, long in_syscall)
199 if (test_thread_flag(TIF_NOTIFY_RESUME)) { 199 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
200 clear_thread_flag(TIF_NOTIFY_RESUME); 200 clear_thread_flag(TIF_NOTIFY_RESUME);
201 tracehook_notify_resume(&scr->pt); 201 tracehook_notify_resume(&scr->pt);
202 if (current->replacement_session_keyring)
203 key_replace_session_keyring();
204 } 202 }
205 203
206 /* copy user rbs to kernel rbs */ 204 /* copy user rbs to kernel rbs */
diff --git a/arch/ia64/kernel/signal.c b/arch/ia64/kernel/signal.c
index 7523501d3bc0..a199be1fe619 100644
--- a/arch/ia64/kernel/signal.c
+++ b/arch/ia64/kernel/signal.c
@@ -30,7 +30,6 @@
30 30
31#define DEBUG_SIG 0 31#define DEBUG_SIG 0
32#define STACK_ALIGN 16 /* minimal alignment for stack pointer */ 32#define STACK_ALIGN 16 /* minimal alignment for stack pointer */
33#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
34 33
35#if _NSIG_WORDS > 1 34#if _NSIG_WORDS > 1
36# define PUT_SIGSET(k,u) __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t)) 35# define PUT_SIGSET(k,u) __copy_to_user((u)->sig, (k)->sig, sizeof(sigset_t))
@@ -200,7 +199,6 @@ ia64_rt_sigreturn (struct sigscratch *scr)
200 if (GET_SIGSET(&set, &sc->sc_mask)) 199 if (GET_SIGSET(&set, &sc->sc_mask))
201 goto give_sigsegv; 200 goto give_sigsegv;
202 201
203 sigdelsetmask(&set, ~_BLOCKABLE);
204 set_current_blocked(&set); 202 set_current_blocked(&set);
205 203
206 if (restore_sigcontext(sc, scr)) 204 if (restore_sigcontext(sc, scr))
@@ -415,18 +413,13 @@ setup_frame (int sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *set,
415} 413}
416 414
417static long 415static long
418handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info, sigset_t *oldset, 416handle_signal (unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
419 struct sigscratch *scr) 417 struct sigscratch *scr)
420{ 418{
421 if (!setup_frame(sig, ka, info, oldset, scr)) 419 if (!setup_frame(sig, ka, info, sigmask_to_save(), scr))
422 return 0; 420 return 0;
423 421
424 block_sigmask(ka, sig); 422 signal_delivered(sig, info, ka, &scr->pt,
425
426 /*
427 * Let tracing know that we've done the handler setup.
428 */
429 tracehook_signal_handler(sig, info, ka, &scr->pt,
430 test_thread_flag(TIF_SINGLESTEP)); 423 test_thread_flag(TIF_SINGLESTEP));
431 424
432 return 1; 425 return 1;
@@ -440,7 +433,6 @@ void
440ia64_do_signal (struct sigscratch *scr, long in_syscall) 433ia64_do_signal (struct sigscratch *scr, long in_syscall)
441{ 434{
442 struct k_sigaction ka; 435 struct k_sigaction ka;
443 sigset_t *oldset;
444 siginfo_t info; 436 siginfo_t info;
445 long restart = in_syscall; 437 long restart = in_syscall;
446 long errno = scr->pt.r8; 438 long errno = scr->pt.r8;
@@ -453,11 +445,6 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
453 if (!user_mode(&scr->pt)) 445 if (!user_mode(&scr->pt))
454 return; 446 return;
455 447
456 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
457 oldset = &current->saved_sigmask;
458 else
459 oldset = &current->blocked;
460
461 /* 448 /*
462 * This only loops in the rare cases of handle_signal() failing, in which case we 449 * This only loops in the rare cases of handle_signal() failing, in which case we
463 * need to push through a forced SIGSEGV. 450 * need to push through a forced SIGSEGV.
@@ -507,16 +494,8 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
507 * Whee! Actually deliver the signal. If the delivery failed, we need to 494 * Whee! Actually deliver the signal. If the delivery failed, we need to
508 * continue to iterate in this loop so we can deliver the SIGSEGV... 495 * continue to iterate in this loop so we can deliver the SIGSEGV...
509 */ 496 */
510 if (handle_signal(signr, &ka, &info, oldset, scr)) { 497 if (handle_signal(signr, &ka, &info, scr))
511 /*
512 * A signal was successfully delivered; the saved
513 * sigmask will have been stored in the signal frame,
514 * and will be restored by sigreturn, so we can simply
515 * clear the TS_RESTORE_SIGMASK flag.
516 */
517 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
518 return; 498 return;
519 }
520 } 499 }
521 500
522 /* Did we come from a system call? */ 501 /* Did we come from a system call? */
@@ -538,8 +517,5 @@ ia64_do_signal (struct sigscratch *scr, long in_syscall)
538 517
539 /* if there's no signal to deliver, we just put the saved sigmask 518 /* if there's no signal to deliver, we just put the saved sigmask
540 * back */ 519 * back */
541 if (current_thread_info()->status & TS_RESTORE_SIGMASK) { 520 restore_saved_sigmask();
542 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
543 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
544 }
545} 521}
diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
index 609d50056a6c..d9439ef2f661 100644
--- a/arch/ia64/kernel/sys_ia64.c
+++ b/arch/ia64/kernel/sys_ia64.c
@@ -171,22 +171,9 @@ asmlinkage unsigned long
171ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags, 171ia64_mremap (unsigned long addr, unsigned long old_len, unsigned long new_len, unsigned long flags,
172 unsigned long new_addr) 172 unsigned long new_addr)
173{ 173{
174 extern unsigned long do_mremap (unsigned long addr, 174 addr = sys_mremap(addr, old_len, new_len, flags, new_addr);
175 unsigned long old_len, 175 if (!IS_ERR((void *) addr))
176 unsigned long new_len, 176 force_successful_syscall_return();
177 unsigned long flags,
178 unsigned long new_addr);
179
180 down_write(&current->mm->mmap_sem);
181 {
182 addr = do_mremap(addr, old_len, new_len, flags, new_addr);
183 }
184 up_write(&current->mm->mmap_sem);
185
186 if (IS_ERR((void *) addr))
187 return addr;
188
189 force_successful_syscall_return();
190 return addr; 177 return addr;
191} 178}
192 179
diff --git a/arch/m32r/include/asm/posix_types.h b/arch/m32r/include/asm/posix_types.h
index 0195850e1f88..236de26a409b 100644
--- a/arch/m32r/include/asm/posix_types.h
+++ b/arch/m32r/include/asm/posix_types.h
@@ -10,9 +10,6 @@
10typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t 11#define __kernel_mode_t __kernel_mode_t
12 12
13typedef unsigned short __kernel_nlink_t;
14#define __kernel_nlink_t __kernel_nlink_t
15
16typedef unsigned short __kernel_ipc_pid_t; 13typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t 14#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18 15
diff --git a/arch/m32r/kernel/signal.c b/arch/m32r/kernel/signal.c
index f54d96993ea1..f3fb2c029cfc 100644
--- a/arch/m32r/kernel/signal.c
+++ b/arch/m32r/kernel/signal.c
@@ -28,8 +28,6 @@
28 28
29#define DEBUG_SIG 0 29#define DEBUG_SIG 0
30 30
31#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
32
33asmlinkage int 31asmlinkage int
34sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 32sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
35 unsigned long r2, unsigned long r3, unsigned long r4, 33 unsigned long r2, unsigned long r3, unsigned long r4,
@@ -111,7 +109,6 @@ sys_rt_sigreturn(unsigned long r0, unsigned long r1,
111 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 109 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
112 goto badframe; 110 goto badframe;
113 111
114 sigdelsetmask(&set, ~_BLOCKABLE);
115 set_current_blocked(&set); 112 set_current_blocked(&set);
116 113
117 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result)) 114 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &result))
@@ -267,9 +264,9 @@ static int prev_insn(struct pt_regs *regs)
267 * OK, we're invoking a handler 264 * OK, we're invoking a handler
268 */ 265 */
269 266
270static int 267static void
271handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, 268handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
272 sigset_t *oldset, struct pt_regs *regs) 269 struct pt_regs *regs)
273{ 270{
274 /* Are we from a system call? */ 271 /* Are we from a system call? */
275 if (regs->syscall_nr >= 0) { 272 if (regs->syscall_nr >= 0) {
@@ -294,11 +291,10 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
294 } 291 }
295 292
296 /* Set up the stack frame */ 293 /* Set up the stack frame */
297 if (setup_rt_frame(sig, ka, info, oldset, regs)) 294 if (setup_rt_frame(sig, ka, info, sigmask_to_save(), regs))
298 return -EFAULT; 295 return;
299 296
300 block_sigmask(ka, sig); 297 signal_delivered(sig, info, ka, regs, 0);
301 return 0;
302} 298}
303 299
304/* 300/*
@@ -311,7 +307,6 @@ static void do_signal(struct pt_regs *regs)
311 siginfo_t info; 307 siginfo_t info;
312 int signr; 308 int signr;
313 struct k_sigaction ka; 309 struct k_sigaction ka;
314 sigset_t *oldset;
315 310
316 /* 311 /*
317 * We want the common case to go fast, which 312 * We want the common case to go fast, which
@@ -322,14 +317,6 @@ static void do_signal(struct pt_regs *regs)
322 if (!user_mode(regs)) 317 if (!user_mode(regs))
323 return; 318 return;
324 319
325 if (try_to_freeze())
326 goto no_signal;
327
328 if (test_thread_flag(TIF_RESTORE_SIGMASK))
329 oldset = &current->saved_sigmask;
330 else
331 oldset = &current->blocked;
332
333 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 320 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
334 if (signr > 0) { 321 if (signr > 0) {
335 /* Re-enable any watchpoints before delivering the 322 /* Re-enable any watchpoints before delivering the
@@ -339,13 +326,11 @@ static void do_signal(struct pt_regs *regs)
339 */ 326 */
340 327
341 /* Whee! Actually deliver the signal. */ 328 /* Whee! Actually deliver the signal. */
342 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) 329 handle_signal(signr, &ka, &info, regs);
343 clear_thread_flag(TIF_RESTORE_SIGMASK);
344 330
345 return; 331 return;
346 } 332 }
347 333
348 no_signal:
349 /* Did we come from a system call? */ 334 /* Did we come from a system call? */
350 if (regs->syscall_nr >= 0) { 335 if (regs->syscall_nr >= 0) {
351 /* Restart the system call - no handlers present */ 336 /* Restart the system call - no handlers present */
@@ -360,10 +345,7 @@ static void do_signal(struct pt_regs *regs)
360 prev_insn(regs); 345 prev_insn(regs);
361 } 346 }
362 } 347 }
363 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 348 restore_saved_sigmask();
364 clear_thread_flag(TIF_RESTORE_SIGMASK);
365 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
366 }
367} 349}
368 350
369/* 351/*
@@ -383,8 +365,6 @@ void do_notify_resume(struct pt_regs *regs, __u32 thread_info_flags)
383 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 365 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
384 clear_thread_flag(TIF_NOTIFY_RESUME); 366 clear_thread_flag(TIF_NOTIFY_RESUME);
385 tracehook_notify_resume(regs); 367 tracehook_notify_resume(regs);
386 if (current->replacement_session_keyring)
387 key_replace_session_keyring();
388 } 368 }
389 369
390 clear_thread_flag(TIF_IRET); 370 clear_thread_flag(TIF_IRET);
diff --git a/arch/m68k/Kconfig b/arch/m68k/Kconfig
index cac5b6be572a..147120128260 100644
--- a/arch/m68k/Kconfig
+++ b/arch/m68k/Kconfig
@@ -7,6 +7,8 @@ config M68K
7 select GENERIC_IRQ_SHOW 7 select GENERIC_IRQ_SHOW
8 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS 8 select ARCH_HAVE_NMI_SAFE_CMPXCHG if RMW_INSNS
9 select GENERIC_CPU_DEVICES 9 select GENERIC_CPU_DEVICES
10 select GENERIC_STRNCPY_FROM_USER if MMU
11 select GENERIC_STRNLEN_USER if MMU
10 select FPU if MMU 12 select FPU if MMU
11 select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE 13 select ARCH_USES_GETTIMEOFFSET if MMU && !COLDFIRE
12 14
diff --git a/arch/m68k/include/asm/Kbuild b/arch/m68k/include/asm/Kbuild
index 1a922fad76f7..eafa2539a8ee 100644
--- a/arch/m68k/include/asm/Kbuild
+++ b/arch/m68k/include/asm/Kbuild
@@ -1,2 +1,4 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2header-y += cachectl.h 2header-y += cachectl.h
3
4generic-y += word-at-a-time.h
diff --git a/arch/m68k/include/asm/m528xsim.h b/arch/m68k/include/asm/m528xsim.h
index d63b99ff7ff7..497c31c803ff 100644
--- a/arch/m68k/include/asm/m528xsim.h
+++ b/arch/m68k/include/asm/m528xsim.h
@@ -86,7 +86,7 @@
86/* 86/*
87 * QSPI module. 87 * QSPI module.
88 */ 88 */
89#define MCFQSPI_IOBASE (MCF_IPSBAR + 0x340) 89#define MCFQSPI_BASE (MCF_IPSBAR + 0x340)
90#define MCFQSPI_SIZE 0x40 90#define MCFQSPI_SIZE 0x40
91 91
92#define MCFQSPI_CS0 147 92#define MCFQSPI_CS0 147
diff --git a/arch/m68k/include/asm/posix_types.h b/arch/m68k/include/asm/posix_types.h
index 6373093be72b..cf4dbf70fdc7 100644
--- a/arch/m68k/include/asm/posix_types.h
+++ b/arch/m68k/include/asm/posix_types.h
@@ -10,9 +10,6 @@
10typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t 11#define __kernel_mode_t __kernel_mode_t
12 12
13typedef unsigned short __kernel_nlink_t;
14#define __kernel_nlink_t __kernel_nlink_t
15
16typedef unsigned short __kernel_ipc_pid_t; 13typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t 14#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18 15
diff --git a/arch/m68k/include/asm/uaccess_mm.h b/arch/m68k/include/asm/uaccess_mm.h
index 9c80cd515b20..472c891a4aee 100644
--- a/arch/m68k/include/asm/uaccess_mm.h
+++ b/arch/m68k/include/asm/uaccess_mm.h
@@ -379,12 +379,15 @@ __constant_copy_to_user(void __user *to, const void *from, unsigned long n)
379#define copy_from_user(to, from, n) __copy_from_user(to, from, n) 379#define copy_from_user(to, from, n) __copy_from_user(to, from, n)
380#define copy_to_user(to, from, n) __copy_to_user(to, from, n) 380#define copy_to_user(to, from, n) __copy_to_user(to, from, n)
381 381
382long strncpy_from_user(char *dst, const char __user *src, long count); 382#define user_addr_max() \
383long strnlen_user(const char __user *src, long n); 383 (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
384
385extern long strncpy_from_user(char *dst, const char __user *src, long count);
386extern __must_check long strlen_user(const char __user *str);
387extern __must_check long strnlen_user(const char __user *str, long n);
388
384unsigned long __clear_user(void __user *to, unsigned long n); 389unsigned long __clear_user(void __user *to, unsigned long n);
385 390
386#define clear_user __clear_user 391#define clear_user __clear_user
387 392
388#define strlen_user(str) strnlen_user(str, 32767)
389
390#endif /* _M68K_UACCESS_H */ 393#endif /* _M68K_UACCESS_H */
diff --git a/arch/m68k/kernel/ptrace.c b/arch/m68k/kernel/ptrace.c
index 8b4a2222e658..1bc10e62b9af 100644
--- a/arch/m68k/kernel/ptrace.c
+++ b/arch/m68k/kernel/ptrace.c
@@ -286,7 +286,7 @@ asmlinkage void syscall_trace(void)
286 } 286 }
287} 287}
288 288
289#ifdef CONFIG_COLDFIRE 289#if defined(CONFIG_COLDFIRE) || !defined(CONFIG_MMU)
290asmlinkage int syscall_trace_enter(void) 290asmlinkage int syscall_trace_enter(void)
291{ 291{
292 int ret = 0; 292 int ret = 0;
diff --git a/arch/m68k/kernel/signal.c b/arch/m68k/kernel/signal.c
index d9f3d1900eed..710a528b928b 100644
--- a/arch/m68k/kernel/signal.c
+++ b/arch/m68k/kernel/signal.c
@@ -51,8 +51,6 @@
51#include <asm/traps.h> 51#include <asm/traps.h>
52#include <asm/ucontext.h> 52#include <asm/ucontext.h>
53 53
54#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
55
56#ifdef CONFIG_MMU 54#ifdef CONFIG_MMU
57 55
58/* 56/*
@@ -795,7 +793,6 @@ asmlinkage int do_sigreturn(unsigned long __unused)
795 sizeof(frame->extramask)))) 793 sizeof(frame->extramask))))
796 goto badframe; 794 goto badframe;
797 795
798 sigdelsetmask(&set, ~_BLOCKABLE);
799 set_current_blocked(&set); 796 set_current_blocked(&set);
800 797
801 if (restore_sigcontext(regs, &frame->sc, frame + 1)) 798 if (restore_sigcontext(regs, &frame->sc, frame + 1))
@@ -820,7 +817,6 @@ asmlinkage int do_rt_sigreturn(unsigned long __unused)
820 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 817 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
821 goto badframe; 818 goto badframe;
822 819
823 sigdelsetmask(&set, ~_BLOCKABLE);
824 set_current_blocked(&set); 820 set_current_blocked(&set);
825 821
826 if (rt_restore_ucontext(regs, sw, &frame->uc)) 822 if (rt_restore_ucontext(regs, sw, &frame->uc))
@@ -1123,8 +1119,9 @@ handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
1123 */ 1119 */
1124static void 1120static void
1125handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info, 1121handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
1126 sigset_t *oldset, struct pt_regs *regs) 1122 struct pt_regs *regs)
1127{ 1123{
1124 sigset_t *oldset = sigmask_to_save();
1128 int err; 1125 int err;
1129 /* are we from a system call? */ 1126 /* are we from a system call? */
1130 if (regs->orig_d0 >= 0) 1127 if (regs->orig_d0 >= 0)
@@ -1140,14 +1137,12 @@ handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
1140 if (err) 1137 if (err)
1141 return; 1138 return;
1142 1139
1143 block_sigmask(ka, sig); 1140 signal_delivered(sig, info, ka, regs, 0);
1144 1141
1145 if (test_thread_flag(TIF_DELAYED_TRACE)) { 1142 if (test_thread_flag(TIF_DELAYED_TRACE)) {
1146 regs->sr &= ~0x8000; 1143 regs->sr &= ~0x8000;
1147 send_sig(SIGTRAP, current, 1); 1144 send_sig(SIGTRAP, current, 1);
1148 } 1145 }
1149
1150 clear_thread_flag(TIF_RESTORE_SIGMASK);
1151} 1146}
1152 1147
1153/* 1148/*
@@ -1160,19 +1155,13 @@ static void do_signal(struct pt_regs *regs)
1160 siginfo_t info; 1155 siginfo_t info;
1161 struct k_sigaction ka; 1156 struct k_sigaction ka;
1162 int signr; 1157 int signr;
1163 sigset_t *oldset;
1164 1158
1165 current->thread.esp0 = (unsigned long) regs; 1159 current->thread.esp0 = (unsigned long) regs;
1166 1160
1167 if (test_thread_flag(TIF_RESTORE_SIGMASK))
1168 oldset = &current->saved_sigmask;
1169 else
1170 oldset = &current->blocked;
1171
1172 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 1161 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
1173 if (signr > 0) { 1162 if (signr > 0) {
1174 /* Whee! Actually deliver the signal. */ 1163 /* Whee! Actually deliver the signal. */
1175 handle_signal(signr, &ka, &info, oldset, regs); 1164 handle_signal(signr, &ka, &info, regs);
1176 return; 1165 return;
1177 } 1166 }
1178 1167
@@ -1182,10 +1171,7 @@ static void do_signal(struct pt_regs *regs)
1182 handle_restart(regs, NULL, 0); 1171 handle_restart(regs, NULL, 0);
1183 1172
1184 /* If there's no signal to deliver, we just restore the saved mask. */ 1173 /* If there's no signal to deliver, we just restore the saved mask. */
1185 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 1174 restore_saved_sigmask();
1186 clear_thread_flag(TIF_RESTORE_SIGMASK);
1187 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
1188 }
1189} 1175}
1190 1176
1191void do_notify_resume(struct pt_regs *regs) 1177void do_notify_resume(struct pt_regs *regs)
@@ -1193,9 +1179,6 @@ void do_notify_resume(struct pt_regs *regs)
1193 if (test_thread_flag(TIF_SIGPENDING)) 1179 if (test_thread_flag(TIF_SIGPENDING))
1194 do_signal(regs); 1180 do_signal(regs);
1195 1181
1196 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) { 1182 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
1197 tracehook_notify_resume(regs); 1183 tracehook_notify_resume(regs);
1198 if (current->replacement_session_keyring)
1199 key_replace_session_keyring();
1200 }
1201} 1184}
diff --git a/arch/m68k/kernel/time.c b/arch/m68k/kernel/time.c
index d7deb7fc7eb5..707f0573ec6b 100644
--- a/arch/m68k/kernel/time.c
+++ b/arch/m68k/kernel/time.c
@@ -85,7 +85,7 @@ void __init time_init(void)
85 mach_sched_init(timer_interrupt); 85 mach_sched_init(timer_interrupt);
86} 86}
87 87
88#ifdef CONFIG_M68KCLASSIC 88#ifdef CONFIG_ARCH_USES_GETTIMEOFFSET
89 89
90u32 arch_gettimeoffset(void) 90u32 arch_gettimeoffset(void)
91{ 91{
@@ -108,4 +108,4 @@ static int __init rtc_init(void)
108 108
109module_init(rtc_init); 109module_init(rtc_init);
110 110
111#endif /* CONFIG_M68KCLASSIC */ 111#endif /* CONFIG_ARCH_USES_GETTIMEOFFSET */
diff --git a/arch/m68k/lib/uaccess.c b/arch/m68k/lib/uaccess.c
index 5664386338da..5e97f2ee7c11 100644
--- a/arch/m68k/lib/uaccess.c
+++ b/arch/m68k/lib/uaccess.c
@@ -104,80 +104,6 @@ unsigned long __generic_copy_to_user(void __user *to, const void *from,
104EXPORT_SYMBOL(__generic_copy_to_user); 104EXPORT_SYMBOL(__generic_copy_to_user);
105 105
106/* 106/*
107 * Copy a null terminated string from userspace.
108 */
109long strncpy_from_user(char *dst, const char __user *src, long count)
110{
111 long res;
112 char c;
113
114 if (count <= 0)
115 return count;
116
117 asm volatile ("\n"
118 "1: "MOVES".b (%2)+,%4\n"
119 " move.b %4,(%1)+\n"
120 " jeq 2f\n"
121 " subq.l #1,%3\n"
122 " jne 1b\n"
123 "2: sub.l %3,%0\n"
124 "3:\n"
125 " .section .fixup,\"ax\"\n"
126 " .even\n"
127 "10: move.l %5,%0\n"
128 " jra 3b\n"
129 " .previous\n"
130 "\n"
131 " .section __ex_table,\"a\"\n"
132 " .align 4\n"
133 " .long 1b,10b\n"
134 " .previous"
135 : "=d" (res), "+a" (dst), "+a" (src), "+r" (count), "=&d" (c)
136 : "i" (-EFAULT), "0" (count));
137
138 return res;
139}
140EXPORT_SYMBOL(strncpy_from_user);
141
142/*
143 * Return the size of a string (including the ending 0)
144 *
145 * Return 0 on exception, a value greater than N if too long
146 */
147long strnlen_user(const char __user *src, long n)
148{
149 char c;
150 long res;
151
152 asm volatile ("\n"
153 "1: subq.l #1,%1\n"
154 " jmi 3f\n"
155 "2: "MOVES".b (%0)+,%2\n"
156 " tst.b %2\n"
157 " jne 1b\n"
158 " jra 4f\n"
159 "\n"
160 "3: addq.l #1,%0\n"
161 "4: sub.l %4,%0\n"
162 "5:\n"
163 " .section .fixup,\"ax\"\n"
164 " .even\n"
165 "20: sub.l %0,%0\n"
166 " jra 5b\n"
167 " .previous\n"
168 "\n"
169 " .section __ex_table,\"a\"\n"
170 " .align 4\n"
171 " .long 2b,20b\n"
172 " .previous\n"
173 : "=&a" (res), "+d" (n), "=&d" (c)
174 : "0" (src), "r" (src));
175
176 return res;
177}
178EXPORT_SYMBOL(strnlen_user);
179
180/*
181 * Zero Userspace 107 * Zero Userspace
182 */ 108 */
183 109
diff --git a/arch/m68k/platform/68328/timers.c b/arch/m68k/platform/68328/timers.c
index c801c172b822..f4dc9b295609 100644
--- a/arch/m68k/platform/68328/timers.c
+++ b/arch/m68k/platform/68328/timers.c
@@ -53,6 +53,7 @@
53#endif 53#endif
54 54
55static u32 m68328_tick_cnt; 55static u32 m68328_tick_cnt;
56static irq_handler_t timer_interrupt;
56 57
57/***************************************************************************/ 58/***************************************************************************/
58 59
@@ -62,7 +63,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
62 TSTAT &= 0; 63 TSTAT &= 0;
63 64
64 m68328_tick_cnt += TICKS_PER_JIFFY; 65 m68328_tick_cnt += TICKS_PER_JIFFY;
65 return arch_timer_interrupt(irq, dummy); 66 return timer_interrupt(irq, dummy);
66} 67}
67 68
68/***************************************************************************/ 69/***************************************************************************/
@@ -99,7 +100,7 @@ static struct clocksource m68328_clk = {
99 100
100/***************************************************************************/ 101/***************************************************************************/
101 102
102void hw_timer_init(void) 103void hw_timer_init(irq_handler_t handler)
103{ 104{
104 /* disable timer 1 */ 105 /* disable timer 1 */
105 TCTL = 0; 106 TCTL = 0;
@@ -115,6 +116,7 @@ void hw_timer_init(void)
115 /* Enable timer 1 */ 116 /* Enable timer 1 */
116 TCTL |= TCTL_TEN; 117 TCTL |= TCTL_TEN;
117 clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ); 118 clocksource_register_hz(&m68328_clk, TICKS_PER_JIFFY*HZ);
119 timer_interrupt = handler;
118} 120}
119 121
120/***************************************************************************/ 122/***************************************************************************/
diff --git a/arch/m68k/platform/68360/config.c b/arch/m68k/platform/68360/config.c
index 255fc03913e9..9877cefad1e7 100644
--- a/arch/m68k/platform/68360/config.c
+++ b/arch/m68k/platform/68360/config.c
@@ -35,6 +35,7 @@ extern void m360_cpm_reset(void);
35#define OSCILLATOR (unsigned long int)33000000 35#define OSCILLATOR (unsigned long int)33000000
36#endif 36#endif
37 37
38static irq_handler_t timer_interrupt;
38unsigned long int system_clock; 39unsigned long int system_clock;
39 40
40extern QUICC *pquicc; 41extern QUICC *pquicc;
@@ -52,7 +53,7 @@ static irqreturn_t hw_tick(int irq, void *dummy)
52 53
53 pquicc->timer_ter1 = 0x0002; /* clear timer event */ 54 pquicc->timer_ter1 = 0x0002; /* clear timer event */
54 55
55 return arch_timer_interrupt(irq, dummy); 56 return timer_interrupt(irq, dummy);
56} 57}
57 58
58static struct irqaction m68360_timer_irq = { 59static struct irqaction m68360_timer_irq = {
@@ -61,7 +62,7 @@ static struct irqaction m68360_timer_irq = {
61 .handler = hw_tick, 62 .handler = hw_tick,
62}; 63};
63 64
64void hw_timer_init(void) 65void hw_timer_init(irq_handler_t handler)
65{ 66{
66 unsigned char prescaler; 67 unsigned char prescaler;
67 unsigned short tgcr_save; 68 unsigned short tgcr_save;
@@ -94,6 +95,8 @@ void hw_timer_init(void)
94 95
95 pquicc->timer_ter1 = 0x0003; /* clear timer events */ 96 pquicc->timer_ter1 = 0x0003; /* clear timer events */
96 97
98 timer_interrupt = handler;
99
97 /* enable timer 1 interrupt in CIMR */ 100 /* enable timer 1 interrupt in CIMR */
98 setup_irq(CPMVEC_TIMER1, &m68360_timer_irq); 101 setup_irq(CPMVEC_TIMER1, &m68360_timer_irq);
99 102
diff --git a/arch/microblaze/include/asm/thread_info.h b/arch/microblaze/include/asm/thread_info.h
index 1a8ab6a5c03f..6c610234ffab 100644
--- a/arch/microblaze/include/asm/thread_info.h
+++ b/arch/microblaze/include/asm/thread_info.h
@@ -166,7 +166,23 @@ static inline void set_restore_sigmask(void)
166{ 166{
167 struct thread_info *ti = current_thread_info(); 167 struct thread_info *ti = current_thread_info();
168 ti->status |= TS_RESTORE_SIGMASK; 168 ti->status |= TS_RESTORE_SIGMASK;
169 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); 169 WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
170}
171static inline void clear_restore_sigmask(void)
172{
173 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
174}
175static inline bool test_restore_sigmask(void)
176{
177 return current_thread_info()->status & TS_RESTORE_SIGMASK;
178}
179static inline bool test_and_clear_restore_sigmask(void)
180{
181 struct thread_info *ti = current_thread_info();
182 if (!(ti->status & TS_RESTORE_SIGMASK))
183 return false;
184 ti->status &= ~TS_RESTORE_SIGMASK;
185 return true;
170} 186}
171#endif 187#endif
172 188
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
index 7f4c7bef1642..76b9722557db 100644
--- a/arch/microblaze/kernel/signal.c
+++ b/arch/microblaze/kernel/signal.c
@@ -41,8 +41,6 @@
41#include <asm/cacheflush.h> 41#include <asm/cacheflush.h>
42#include <asm/syscalls.h> 42#include <asm/syscalls.h>
43 43
44#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
45
46asmlinkage long 44asmlinkage long
47sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss, 45sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss,
48 struct pt_regs *regs) 46 struct pt_regs *regs)
@@ -106,7 +104,6 @@ asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
106 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 104 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
107 goto badframe; 105 goto badframe;
108 106
109 sigdelsetmask(&set, ~_BLOCKABLE);
110 set_current_blocked(&set); 107 set_current_blocked(&set);
111 108
112 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval)) 109 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval))
@@ -310,10 +307,11 @@ do_restart:
310 * OK, we're invoking a handler 307 * OK, we're invoking a handler
311 */ 308 */
312 309
313static int 310static void
314handle_signal(unsigned long sig, struct k_sigaction *ka, 311handle_signal(unsigned long sig, struct k_sigaction *ka,
315 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) 312 siginfo_t *info, struct pt_regs *regs)
316{ 313{
314 sigset_t *oldset = sigmask_to_save();
317 int ret; 315 int ret;
318 316
319 /* Set up the stack frame */ 317 /* Set up the stack frame */
@@ -323,11 +321,9 @@ handle_signal(unsigned long sig, struct k_sigaction *ka,
323 ret = setup_rt_frame(sig, ka, NULL, oldset, regs); 321 ret = setup_rt_frame(sig, ka, NULL, oldset, regs);
324 322
325 if (ret) 323 if (ret)
326 return ret; 324 return;
327
328 block_sigmask(ka, sig);
329 325
330 return 0; 326 signal_delivered(sig, info, ka, regs, 0);
331} 327}
332 328
333/* 329/*
@@ -344,33 +340,18 @@ static void do_signal(struct pt_regs *regs, int in_syscall)
344 siginfo_t info; 340 siginfo_t info;
345 int signr; 341 int signr;
346 struct k_sigaction ka; 342 struct k_sigaction ka;
347 sigset_t *oldset;
348#ifdef DEBUG_SIG 343#ifdef DEBUG_SIG
349 printk(KERN_INFO "do signal: %p %d\n", regs, in_syscall); 344 printk(KERN_INFO "do signal: %p %d\n", regs, in_syscall);
350 printk(KERN_INFO "do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1, 345 printk(KERN_INFO "do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1,
351 regs->r12, current_thread_info()->flags); 346 regs->r12, current_thread_info()->flags);
352#endif 347#endif
353 348
354 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
355 oldset = &current->saved_sigmask;
356 else
357 oldset = &current->blocked;
358
359 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 349 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
360 if (signr > 0) { 350 if (signr > 0) {
361 /* Whee! Actually deliver the signal. */ 351 /* Whee! Actually deliver the signal. */
362 if (in_syscall) 352 if (in_syscall)
363 handle_restart(regs, &ka, 1); 353 handle_restart(regs, &ka, 1);
364 if (!handle_signal(signr, &ka, &info, oldset, regs)) { 354 handle_signal(signr, &ka, &info, regs);
365 /*
366 * A signal was successfully delivered; the saved
367 * sigmask will have been stored in the signal frame,
368 * and will be restored by sigreturn, so we can simply
369 * clear the TS_RESTORE_SIGMASK flag.
370 */
371 current_thread_info()->status &=
372 ~TS_RESTORE_SIGMASK;
373 }
374 return; 355 return;
375 } 356 }
376 357
@@ -381,10 +362,7 @@ static void do_signal(struct pt_regs *regs, int in_syscall)
381 * If there's no signal to deliver, we just put the saved sigmask 362 * If there's no signal to deliver, we just put the saved sigmask
382 * back. 363 * back.
383 */ 364 */
384 if (current_thread_info()->status & TS_RESTORE_SIGMASK) { 365 restore_saved_sigmask();
385 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
386 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
387 }
388} 366}
389 367
390void do_notify_resume(struct pt_regs *regs, int in_syscall) 368void do_notify_resume(struct pt_regs *regs, int in_syscall)
@@ -401,9 +379,6 @@ void do_notify_resume(struct pt_regs *regs, int in_syscall)
401 if (test_thread_flag(TIF_SIGPENDING)) 379 if (test_thread_flag(TIF_SIGPENDING))
402 do_signal(regs, in_syscall); 380 do_signal(regs, in_syscall);
403 381
404 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) { 382 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
405 tracehook_notify_resume(regs); 383 tracehook_notify_resume(regs);
406 if (current->replacement_session_keyring)
407 key_replace_session_keyring();
408 }
409} 384}
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index 7dde01642d6b..bf2248474fa8 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -213,8 +213,6 @@ static int au1200_nand_device_ready(struct mtd_info *mtd)
213 return __raw_readl((void __iomem *)MEM_STSTAT) & 1; 213 return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
214} 214}
215 215
216static const char *db1200_part_probes[] = { "cmdlinepart", NULL };
217
218static struct mtd_partition db1200_nand_parts[] = { 216static struct mtd_partition db1200_nand_parts[] = {
219 { 217 {
220 .name = "NAND FS 0", 218 .name = "NAND FS 0",
@@ -235,7 +233,6 @@ struct platform_nand_data db1200_nand_platdata = {
235 .nr_partitions = ARRAY_SIZE(db1200_nand_parts), 233 .nr_partitions = ARRAY_SIZE(db1200_nand_parts),
236 .partitions = db1200_nand_parts, 234 .partitions = db1200_nand_parts,
237 .chip_delay = 20, 235 .chip_delay = 20,
238 .part_probe_types = db1200_part_probes,
239 }, 236 },
240 .ctrl = { 237 .ctrl = {
241 .dev_ready = au1200_nand_device_ready, 238 .dev_ready = au1200_nand_device_ready,
diff --git a/arch/mips/alchemy/devboards/db1300.c b/arch/mips/alchemy/devboards/db1300.c
index 0893f2af0d01..c56e0246694e 100644
--- a/arch/mips/alchemy/devboards/db1300.c
+++ b/arch/mips/alchemy/devboards/db1300.c
@@ -145,8 +145,6 @@ static int au1300_nand_device_ready(struct mtd_info *mtd)
145 return __raw_readl((void __iomem *)MEM_STSTAT) & 1; 145 return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
146} 146}
147 147
148static const char *db1300_part_probes[] = { "cmdlinepart", NULL };
149
150static struct mtd_partition db1300_nand_parts[] = { 148static struct mtd_partition db1300_nand_parts[] = {
151 { 149 {
152 .name = "NAND FS 0", 150 .name = "NAND FS 0",
@@ -167,7 +165,6 @@ struct platform_nand_data db1300_nand_platdata = {
167 .nr_partitions = ARRAY_SIZE(db1300_nand_parts), 165 .nr_partitions = ARRAY_SIZE(db1300_nand_parts),
168 .partitions = db1300_nand_parts, 166 .partitions = db1300_nand_parts,
169 .chip_delay = 20, 167 .chip_delay = 20,
170 .part_probe_types = db1300_part_probes,
171 }, 168 },
172 .ctrl = { 169 .ctrl = {
173 .dev_ready = au1300_nand_device_ready, 170 .dev_ready = au1300_nand_device_ready,
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index 6815d0783cd8..9eb79062f46e 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -149,8 +149,6 @@ static int au1550_nand_device_ready(struct mtd_info *mtd)
149 return __raw_readl((void __iomem *)MEM_STSTAT) & 1; 149 return __raw_readl((void __iomem *)MEM_STSTAT) & 1;
150} 150}
151 151
152static const char *db1550_part_probes[] = { "cmdlinepart", NULL };
153
154static struct mtd_partition db1550_nand_parts[] = { 152static struct mtd_partition db1550_nand_parts[] = {
155 { 153 {
156 .name = "NAND FS 0", 154 .name = "NAND FS 0",
@@ -171,7 +169,6 @@ struct platform_nand_data db1550_nand_platdata = {
171 .nr_partitions = ARRAY_SIZE(db1550_nand_parts), 169 .nr_partitions = ARRAY_SIZE(db1550_nand_parts),
172 .partitions = db1550_nand_parts, 170 .partitions = db1550_nand_parts,
173 .chip_delay = 20, 171 .chip_delay = 20,
174 .part_probe_types = db1550_part_probes,
175 }, 172 },
176 .ctrl = { 173 .ctrl = {
177 .dev_ready = au1550_nand_device_ready, 174 .dev_ready = au1550_nand_device_ready,
diff --git a/arch/mips/include/asm/posix_types.h b/arch/mips/include/asm/posix_types.h
index e0308dcca135..fa03ec3fbf89 100644
--- a/arch/mips/include/asm/posix_types.h
+++ b/arch/mips/include/asm/posix_types.h
@@ -17,11 +17,6 @@
17 * assume GCC is being used. 17 * assume GCC is being used.
18 */ 18 */
19 19
20#if (_MIPS_SZLONG == 64)
21typedef unsigned int __kernel_nlink_t;
22#define __kernel_nlink_t __kernel_nlink_t
23#endif
24
25typedef long __kernel_daddr_t; 20typedef long __kernel_daddr_t;
26#define __kernel_daddr_t __kernel_daddr_t 21#define __kernel_daddr_t __kernel_daddr_t
27 22
diff --git a/arch/mips/include/asm/stat.h b/arch/mips/include/asm/stat.h
index 6e00f751ab6d..fe9a4c3ec5a1 100644
--- a/arch/mips/include/asm/stat.h
+++ b/arch/mips/include/asm/stat.h
@@ -20,7 +20,7 @@ struct stat {
20 long st_pad1[3]; /* Reserved for network id */ 20 long st_pad1[3]; /* Reserved for network id */
21 ino_t st_ino; 21 ino_t st_ino;
22 mode_t st_mode; 22 mode_t st_mode;
23 nlink_t st_nlink; 23 __u32 st_nlink;
24 uid_t st_uid; 24 uid_t st_uid;
25 gid_t st_gid; 25 gid_t st_gid;
26 unsigned st_rdev; 26 unsigned st_rdev;
@@ -55,7 +55,7 @@ struct stat64 {
55 unsigned long long st_ino; 55 unsigned long long st_ino;
56 56
57 mode_t st_mode; 57 mode_t st_mode;
58 nlink_t st_nlink; 58 __u32 st_nlink;
59 59
60 uid_t st_uid; 60 uid_t st_uid;
61 gid_t st_gid; 61 gid_t st_gid;
@@ -96,7 +96,7 @@ struct stat {
96 unsigned long st_ino; 96 unsigned long st_ino;
97 97
98 mode_t st_mode; 98 mode_t st_mode;
99 nlink_t st_nlink; 99 __u32 st_nlink;
100 100
101 uid_t st_uid; 101 uid_t st_uid;
102 gid_t st_gid; 102 gid_t st_gid;
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
index 10263b405981..9c60d09e62a7 100644
--- a/arch/mips/kernel/signal-common.h
+++ b/arch/mips/kernel/signal-common.h
@@ -19,8 +19,6 @@
19# define DEBUGP(fmt, args...) 19# define DEBUGP(fmt, args...)
20#endif 20#endif
21 21
22#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
23
24/* 22/*
25 * Determine which stack to use.. 23 * Determine which stack to use..
26 */ 24 */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 17f6ee30ad0d..f2c09cfc60ac 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -339,7 +339,6 @@ asmlinkage void sys_sigreturn(nabi_no_regargs struct pt_regs regs)
339 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) 339 if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
340 goto badframe; 340 goto badframe;
341 341
342 sigdelsetmask(&blocked, ~_BLOCKABLE);
343 set_current_blocked(&blocked); 342 set_current_blocked(&blocked);
344 343
345 sig = restore_sigcontext(&regs, &frame->sf_sc); 344 sig = restore_sigcontext(&regs, &frame->sf_sc);
@@ -375,7 +374,6 @@ asmlinkage void sys_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
375 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 374 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
376 goto badframe; 375 goto badframe;
377 376
378 sigdelsetmask(&set, ~_BLOCKABLE);
379 set_current_blocked(&set); 377 set_current_blocked(&set);
380 378
381 sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext); 379 sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
@@ -514,9 +512,10 @@ struct mips_abi mips_abi = {
514 .restart = __NR_restart_syscall 512 .restart = __NR_restart_syscall
515}; 513};
516 514
517static int handle_signal(unsigned long sig, siginfo_t *info, 515static void handle_signal(unsigned long sig, siginfo_t *info,
518 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) 516 struct k_sigaction *ka, struct pt_regs *regs)
519{ 517{
518 sigset_t *oldset = sigmask_to_save();
520 int ret; 519 int ret;
521 struct mips_abi *abi = current->thread.abi; 520 struct mips_abi *abi = current->thread.abi;
522 void *vdso = current->mm->context.vdso; 521 void *vdso = current->mm->context.vdso;
@@ -550,17 +549,14 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
550 ka, regs, sig, oldset); 549 ka, regs, sig, oldset);
551 550
552 if (ret) 551 if (ret)
553 return ret; 552 return;
554
555 block_sigmask(ka, sig);
556 553
557 return ret; 554 signal_delivered(sig, info, ka, regs, 0);
558} 555}
559 556
560static void do_signal(struct pt_regs *regs) 557static void do_signal(struct pt_regs *regs)
561{ 558{
562 struct k_sigaction ka; 559 struct k_sigaction ka;
563 sigset_t *oldset;
564 siginfo_t info; 560 siginfo_t info;
565 int signr; 561 int signr;
566 562
@@ -572,25 +568,10 @@ static void do_signal(struct pt_regs *regs)
572 if (!user_mode(regs)) 568 if (!user_mode(regs))
573 return; 569 return;
574 570
575 if (test_thread_flag(TIF_RESTORE_SIGMASK))
576 oldset = &current->saved_sigmask;
577 else
578 oldset = &current->blocked;
579
580 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 571 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
581 if (signr > 0) { 572 if (signr > 0) {
582 /* Whee! Actually deliver the signal. */ 573 /* Whee! Actually deliver the signal. */
583 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 574 handle_signal(signr, &info, &ka, regs);
584 /*
585 * A signal was successfully delivered; the saved
586 * sigmask will have been stored in the signal frame,
587 * and will be restored by sigreturn, so we can simply
588 * clear the TIF_RESTORE_SIGMASK flag.
589 */
590 if (test_thread_flag(TIF_RESTORE_SIGMASK))
591 clear_thread_flag(TIF_RESTORE_SIGMASK);
592 }
593
594 return; 575 return;
595 } 576 }
596 577
@@ -614,10 +595,7 @@ static void do_signal(struct pt_regs *regs)
614 * If there's no signal to deliver, we just put the saved sigmask 595 * If there's no signal to deliver, we just put the saved sigmask
615 * back 596 * back
616 */ 597 */
617 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 598 restore_saved_sigmask();
618 clear_thread_flag(TIF_RESTORE_SIGMASK);
619 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
620 }
621} 599}
622 600
623/* 601/*
@@ -630,14 +608,12 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
630 local_irq_enable(); 608 local_irq_enable();
631 609
632 /* deal with pending signal delivery */ 610 /* deal with pending signal delivery */
633 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 611 if (thread_info_flags & _TIF_SIGPENDING)
634 do_signal(regs); 612 do_signal(regs);
635 613
636 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 614 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
637 clear_thread_flag(TIF_NOTIFY_RESUME); 615 clear_thread_flag(TIF_NOTIFY_RESUME);
638 tracehook_notify_resume(regs); 616 tracehook_notify_resume(regs);
639 if (current->replacement_session_keyring)
640 key_replace_session_keyring();
641 } 617 }
642} 618}
643 619
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index b4fe2eacbd5d..da1b56a39ac7 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -465,7 +465,6 @@ asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
465 if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) 465 if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
466 goto badframe; 466 goto badframe;
467 467
468 sigdelsetmask(&blocked, ~_BLOCKABLE);
469 set_current_blocked(&blocked); 468 set_current_blocked(&blocked);
470 469
471 sig = restore_sigcontext32(&regs, &frame->sf_sc); 470 sig = restore_sigcontext32(&regs, &frame->sf_sc);
@@ -503,7 +502,6 @@ asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
503 if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) 502 if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
504 goto badframe; 503 goto badframe;
505 504
506 sigdelsetmask(&set, ~_BLOCKABLE);
507 set_current_blocked(&set); 505 set_current_blocked(&set);
508 506
509 sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext); 507 sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
index 63ffac9af7c5..3574c145511b 100644
--- a/arch/mips/kernel/signal_n32.c
+++ b/arch/mips/kernel/signal_n32.c
@@ -109,7 +109,6 @@ asmlinkage void sysn32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
109 if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) 109 if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
110 goto badframe; 110 goto badframe;
111 111
112 sigdelsetmask(&set, ~_BLOCKABLE);
113 set_current_blocked(&set); 112 set_current_blocked(&set);
114 113
115 sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext); 114 sig = restore_sigcontext(&regs, &frame->rs_uc.uc_mcontext);
diff --git a/arch/mips/pnx833x/common/platform.c b/arch/mips/pnx833x/common/platform.c
index 87167dcc79fa..05a1d922cd60 100644
--- a/arch/mips/pnx833x/common/platform.c
+++ b/arch/mips/pnx833x/common/platform.c
@@ -244,11 +244,6 @@ static struct platform_device pnx833x_sata_device = {
244 .resource = pnx833x_sata_resources, 244 .resource = pnx833x_sata_resources,
245}; 245};
246 246
247static const char *part_probes[] = {
248 "cmdlinepart",
249 NULL
250};
251
252static void 247static void
253pnx833x_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl) 248pnx833x_flash_nand_cmd_ctrl(struct mtd_info *mtd, int cmd, unsigned int ctrl)
254{ 249{
@@ -268,7 +263,6 @@ static struct platform_nand_data pnx833x_flash_nand_data = {
268 .chip = { 263 .chip = {
269 .nr_chips = 1, 264 .nr_chips = 1,
270 .chip_delay = 25, 265 .chip_delay = 25,
271 .part_probe_types = part_probes,
272 }, 266 },
273 .ctrl = { 267 .ctrl = {
274 .cmd_ctrl = pnx833x_flash_nand_cmd_ctrl 268 .cmd_ctrl = pnx833x_flash_nand_cmd_ctrl
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index ea774285e6c5..716e9a12f0e7 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -293,7 +293,6 @@ static void __init rb532_nand_setup(void)
293 rb532_nand_data.chip.nr_partitions = ARRAY_SIZE(rb532_partition_info); 293 rb532_nand_data.chip.nr_partitions = ARRAY_SIZE(rb532_partition_info);
294 rb532_nand_data.chip.partitions = rb532_partition_info; 294 rb532_nand_data.chip.partitions = rb532_partition_info;
295 rb532_nand_data.chip.chip_delay = NAND_CHIP_DELAY; 295 rb532_nand_data.chip.chip_delay = NAND_CHIP_DELAY;
296 rb532_nand_data.chip.options = NAND_NO_AUTOINCR;
297} 296}
298 297
299 298
diff --git a/arch/mn10300/include/asm/posix_types.h b/arch/mn10300/include/asm/posix_types.h
index ab506181ec31..d31eeea480cf 100644
--- a/arch/mn10300/include/asm/posix_types.h
+++ b/arch/mn10300/include/asm/posix_types.h
@@ -20,9 +20,6 @@
20typedef unsigned short __kernel_mode_t; 20typedef unsigned short __kernel_mode_t;
21#define __kernel_mode_t __kernel_mode_t 21#define __kernel_mode_t __kernel_mode_t
22 22
23typedef unsigned short __kernel_nlink_t;
24#define __kernel_nlink_t __kernel_nlink_t
25
26typedef unsigned short __kernel_ipc_pid_t; 23typedef unsigned short __kernel_ipc_pid_t;
27#define __kernel_ipc_pid_t __kernel_ipc_pid_t 24#define __kernel_ipc_pid_t __kernel_ipc_pid_t
28 25
diff --git a/arch/mn10300/kernel/signal.c b/arch/mn10300/kernel/signal.c
index 890cf91767cc..6ab0bee2a54f 100644
--- a/arch/mn10300/kernel/signal.c
+++ b/arch/mn10300/kernel/signal.c
@@ -31,8 +31,6 @@
31 31
32#define DEBUG_SIG 0 32#define DEBUG_SIG 0
33 33
34#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
35
36/* 34/*
37 * atomically swap in the new signal mask, and wait for a signal. 35 * atomically swap in the new signal mask, and wait for a signal.
38 */ 36 */
@@ -163,7 +161,6 @@ asmlinkage long sys_sigreturn(void)
163 sizeof(frame->extramask))) 161 sizeof(frame->extramask)))
164 goto badframe; 162 goto badframe;
165 163
166 sigdelsetmask(&set, ~_BLOCKABLE);
167 set_current_blocked(&set); 164 set_current_blocked(&set);
168 165
169 if (restore_sigcontext(current_frame(), &frame->sc, &d0)) 166 if (restore_sigcontext(current_frame(), &frame->sc, &d0))
@@ -191,7 +188,6 @@ asmlinkage long sys_rt_sigreturn(void)
191 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 188 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
192 goto badframe; 189 goto badframe;
193 190
194 sigdelsetmask(&set, ~_BLOCKABLE);
195 set_current_blocked(&set); 191 set_current_blocked(&set);
196 192
197 if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0)) 193 if (restore_sigcontext(current_frame(), &frame->uc.uc_mcontext, &d0))
@@ -430,8 +426,9 @@ static inline void stepback(struct pt_regs *regs)
430 */ 426 */
431static int handle_signal(int sig, 427static int handle_signal(int sig,
432 siginfo_t *info, struct k_sigaction *ka, 428 siginfo_t *info, struct k_sigaction *ka,
433 sigset_t *oldset, struct pt_regs *regs) 429 struct pt_regs *regs)
434{ 430{
431 sigset_t *oldset = sigmask_to_save();
435 int ret; 432 int ret;
436 433
437 /* Are we from a system call? */ 434 /* Are we from a system call? */
@@ -461,11 +458,11 @@ static int handle_signal(int sig,
461 ret = setup_rt_frame(sig, ka, info, oldset, regs); 458 ret = setup_rt_frame(sig, ka, info, oldset, regs);
462 else 459 else
463 ret = setup_frame(sig, ka, oldset, regs); 460 ret = setup_frame(sig, ka, oldset, regs);
461 if (ret)
462 return;
464 463
465 if (ret == 0) 464 signal_delivered(sig, info, ka, regs,
466 block_sigmask(ka, sig); 465 test_thread_flag(TIF_SINGLESTEP));
467
468 return ret;
469} 466}
470 467
471/* 468/*
@@ -475,7 +472,6 @@ static void do_signal(struct pt_regs *regs)
475{ 472{
476 struct k_sigaction ka; 473 struct k_sigaction ka;
477 siginfo_t info; 474 siginfo_t info;
478 sigset_t *oldset;
479 int signr; 475 int signr;
480 476
481 /* we want the common case to go fast, which is why we may in certain 477 /* we want the common case to go fast, which is why we may in certain
@@ -483,23 +479,9 @@ static void do_signal(struct pt_regs *regs)
483 if (!user_mode(regs)) 479 if (!user_mode(regs))
484 return; 480 return;
485 481
486 if (test_thread_flag(TIF_RESTORE_SIGMASK))
487 oldset = &current->saved_sigmask;
488 else
489 oldset = &current->blocked;
490
491 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 482 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
492 if (signr > 0) { 483 if (signr > 0) {
493 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 484 if (handle_signal(signr, &info, &ka, regs) == 0) {
494 /* a signal was successfully delivered; the saved
495 * sigmask will have been stored in the signal frame,
496 * and will be restored by sigreturn, so we can simply
497 * clear the TIF_RESTORE_SIGMASK flag */
498 if (test_thread_flag(TIF_RESTORE_SIGMASK))
499 clear_thread_flag(TIF_RESTORE_SIGMASK);
500
501 tracehook_signal_handler(signr, &info, &ka, regs,
502 test_thread_flag(TIF_SINGLESTEP));
503 } 485 }
504 486
505 return; 487 return;
@@ -525,10 +507,7 @@ static void do_signal(struct pt_regs *regs)
525 507
526 /* if there's no signal to deliver, we just put the saved sigmask 508 /* if there's no signal to deliver, we just put the saved sigmask
527 * back */ 509 * back */
528 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 510 restore_saved_sigmask();
529 clear_thread_flag(TIF_RESTORE_SIGMASK);
530 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
531 }
532} 511}
533 512
534/* 513/*
@@ -548,13 +527,11 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, u32 thread_info_flags)
548 } 527 }
549 528
550 /* deal with pending signal delivery */ 529 /* deal with pending signal delivery */
551 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 530 if (thread_info_flags & _TIF_SIGPENDING)
552 do_signal(regs); 531 do_signal(regs);
553 532
554 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 533 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
555 clear_thread_flag(TIF_NOTIFY_RESUME); 534 clear_thread_flag(TIF_NOTIFY_RESUME);
556 tracehook_notify_resume(current_frame()); 535 tracehook_notify_resume(current_frame());
557 if (current->replacement_session_keyring)
558 key_replace_session_keyring();
559 } 536 }
560} 537}
diff --git a/arch/openrisc/kernel/signal.c b/arch/openrisc/kernel/signal.c
index e970743251ae..30110297f4f9 100644
--- a/arch/openrisc/kernel/signal.c
+++ b/arch/openrisc/kernel/signal.c
@@ -33,8 +33,6 @@
33 33
34#define DEBUG_SIG 0 34#define DEBUG_SIG 0
35 35
36#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
37
38asmlinkage long 36asmlinkage long
39_sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs) 37_sys_sigaltstack(const stack_t *uss, stack_t *uoss, struct pt_regs *regs)
40{ 38{
@@ -101,7 +99,6 @@ asmlinkage long _sys_rt_sigreturn(struct pt_regs *regs)
101 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 99 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
102 goto badframe; 100 goto badframe;
103 101
104 sigdelsetmask(&set, ~_BLOCKABLE);
105 set_current_blocked(&set); 102 set_current_blocked(&set);
106 103
107 if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 104 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -251,20 +248,19 @@ give_sigsegv:
251 return -EFAULT; 248 return -EFAULT;
252} 249}
253 250
254static inline int 251static inline void
255handle_signal(unsigned long sig, 252handle_signal(unsigned long sig,
256 siginfo_t *info, struct k_sigaction *ka, 253 siginfo_t *info, struct k_sigaction *ka,
257 sigset_t *oldset, struct pt_regs *regs) 254 struct pt_regs *regs)
258{ 255{
259 int ret; 256 int ret;
260 257
261 ret = setup_rt_frame(sig, ka, info, oldset, regs); 258 ret = setup_rt_frame(sig, ka, info, sigmask_to_save(), regs);
262 if (ret) 259 if (ret)
263 return ret; 260 return;
264
265 block_sigmask(ka, sig);
266 261
267 return 0; 262 signal_delivered(sig, info, ka, regs,
263 test_thread_flag(TIF_SINGLESTEP));
268} 264}
269 265
270/* 266/*
@@ -339,30 +335,10 @@ void do_signal(struct pt_regs *regs)
339 if (signr <= 0) { 335 if (signr <= 0) {
340 /* no signal to deliver so we just put the saved sigmask 336 /* no signal to deliver so we just put the saved sigmask
341 * back */ 337 * back */
342 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 338 restore_saved_sigmask();
343 clear_thread_flag(TIF_RESTORE_SIGMASK);
344 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
345 }
346
347 } else { /* signr > 0 */ 339 } else { /* signr > 0 */
348 sigset_t *oldset;
349
350 if (current_thread_info()->flags & _TIF_RESTORE_SIGMASK)
351 oldset = &current->saved_sigmask;
352 else
353 oldset = &current->blocked;
354
355 /* Whee! Actually deliver the signal. */ 340 /* Whee! Actually deliver the signal. */
356 if (!handle_signal(signr, &info, &ka, oldset, regs)) { 341 handle_signal(signr, &info, &ka, regs);
357 /* a signal was successfully delivered; the saved
358 * sigmask will have been stored in the signal frame,
359 * and will be restored by sigreturn, so we can simply
360 * clear the TIF_RESTORE_SIGMASK flag */
361 clear_thread_flag(TIF_RESTORE_SIGMASK);
362 }
363
364 tracehook_signal_handler(signr, &info, &ka, regs,
365 test_thread_flag(TIF_SINGLESTEP));
366 } 342 }
367 343
368 return; 344 return;
@@ -376,7 +352,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs)
376 if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) { 352 if (current_thread_info()->flags & _TIF_NOTIFY_RESUME) {
377 clear_thread_flag(TIF_NOTIFY_RESUME); 353 clear_thread_flag(TIF_NOTIFY_RESUME);
378 tracehook_notify_resume(regs); 354 tracehook_notify_resume(regs);
379 if (current->replacement_session_keyring)
380 key_replace_session_keyring();
381 } 355 }
382} 356}
diff --git a/arch/parisc/Kconfig b/arch/parisc/Kconfig
index ddb8b24b823d..3ff21b536f28 100644
--- a/arch/parisc/Kconfig
+++ b/arch/parisc/Kconfig
@@ -18,6 +18,7 @@ config PARISC
18 select IRQ_PER_CPU 18 select IRQ_PER_CPU
19 select ARCH_HAVE_NMI_SAFE_CMPXCHG 19 select ARCH_HAVE_NMI_SAFE_CMPXCHG
20 select GENERIC_SMP_IDLE_THREAD 20 select GENERIC_SMP_IDLE_THREAD
21 select GENERIC_STRNCPY_FROM_USER
21 22
22 help 23 help
23 The PA-RISC microprocessor is designed by Hewlett-Packard and used 24 The PA-RISC microprocessor is designed by Hewlett-Packard and used
diff --git a/arch/parisc/Makefile b/arch/parisc/Makefile
index dbc3850b1d0d..5707f1a62341 100644
--- a/arch/parisc/Makefile
+++ b/arch/parisc/Makefile
@@ -21,6 +21,7 @@ KBUILD_DEFCONFIG := default_defconfig
21 21
22NM = sh $(srctree)/arch/parisc/nm 22NM = sh $(srctree)/arch/parisc/nm
23CHECKFLAGS += -D__hppa__=1 23CHECKFLAGS += -D__hppa__=1
24LIBGCC = $(shell $(CC) $(KBUILD_CFLAGS) -print-libgcc-file-name)
24 25
25MACHINE := $(shell uname -m) 26MACHINE := $(shell uname -m)
26ifeq ($(MACHINE),parisc*) 27ifeq ($(MACHINE),parisc*)
@@ -79,7 +80,7 @@ kernel-y := mm/ kernel/ math-emu/
79kernel-$(CONFIG_HPUX) += hpux/ 80kernel-$(CONFIG_HPUX) += hpux/
80 81
81core-y += $(addprefix arch/parisc/, $(kernel-y)) 82core-y += $(addprefix arch/parisc/, $(kernel-y))
82libs-y += arch/parisc/lib/ `$(CC) -print-libgcc-file-name` 83libs-y += arch/parisc/lib/ $(LIBGCC)
83 84
84drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/ 85drivers-$(CONFIG_OPROFILE) += arch/parisc/oprofile/
85 86
diff --git a/arch/parisc/include/asm/Kbuild b/arch/parisc/include/asm/Kbuild
index 19a434f55059..4383707d9801 100644
--- a/arch/parisc/include/asm/Kbuild
+++ b/arch/parisc/include/asm/Kbuild
@@ -1,3 +1,4 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3header-y += pdc.h 3header-y += pdc.h
4generic-y += word-at-a-time.h
diff --git a/arch/parisc/include/asm/bug.h b/arch/parisc/include/asm/bug.h
index 72cfdb0cfdd1..62a33338549c 100644
--- a/arch/parisc/include/asm/bug.h
+++ b/arch/parisc/include/asm/bug.h
@@ -1,6 +1,8 @@
1#ifndef _PARISC_BUG_H 1#ifndef _PARISC_BUG_H
2#define _PARISC_BUG_H 2#define _PARISC_BUG_H
3 3
4#include <linux/kernel.h> /* for BUGFLAG_TAINT */
5
4/* 6/*
5 * Tell the user there is some problem. 7 * Tell the user there is some problem.
6 * The offending file and line are encoded in the __bug_table section. 8 * The offending file and line are encoded in the __bug_table section.
diff --git a/arch/parisc/include/asm/posix_types.h b/arch/parisc/include/asm/posix_types.h
index 5212b0357daf..b9344256f76b 100644
--- a/arch/parisc/include/asm/posix_types.h
+++ b/arch/parisc/include/asm/posix_types.h
@@ -10,9 +10,6 @@
10typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t 11#define __kernel_mode_t __kernel_mode_t
12 12
13typedef unsigned short __kernel_nlink_t;
14#define __kernel_nlink_t __kernel_nlink_t
15
16typedef unsigned short __kernel_ipc_pid_t; 13typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t 14#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18 15
diff --git a/arch/parisc/include/asm/smp.h b/arch/parisc/include/asm/smp.h
index e8f8037d872b..a5dc9066c6d8 100644
--- a/arch/parisc/include/asm/smp.h
+++ b/arch/parisc/include/asm/smp.h
@@ -25,7 +25,6 @@ typedef unsigned long address_t;
25#define cpu_number_map(cpu) (cpu) 25#define cpu_number_map(cpu) (cpu)
26#define cpu_logical_map(cpu) (cpu) 26#define cpu_logical_map(cpu) (cpu)
27 27
28extern void smp_send_reschedule(int cpu);
29extern void smp_send_all_nop(void); 28extern void smp_send_all_nop(void);
30 29
31extern void arch_send_call_function_single_ipi(int cpu); 30extern void arch_send_call_function_single_ipi(int cpu);
@@ -50,6 +49,5 @@ static inline void __cpu_die (unsigned int cpu) {
50 while(1) 49 while(1)
51 ; 50 ;
52} 51}
53extern int __cpu_up (unsigned int cpu);
54 52
55#endif /* __ASM_SMP_H */ 53#endif /* __ASM_SMP_H */
diff --git a/arch/parisc/include/asm/stat.h b/arch/parisc/include/asm/stat.h
index 9d5fbbc5c31f..d76fbda5d62c 100644
--- a/arch/parisc/include/asm/stat.h
+++ b/arch/parisc/include/asm/stat.h
@@ -7,7 +7,7 @@ struct stat {
7 unsigned int st_dev; /* dev_t is 32 bits on parisc */ 7 unsigned int st_dev; /* dev_t is 32 bits on parisc */
8 ino_t st_ino; /* 32 bits */ 8 ino_t st_ino; /* 32 bits */
9 mode_t st_mode; /* 16 bits */ 9 mode_t st_mode; /* 16 bits */
10 nlink_t st_nlink; /* 16 bits */ 10 unsigned short st_nlink; /* 16 bits */
11 unsigned short st_reserved1; /* old st_uid */ 11 unsigned short st_reserved1; /* old st_uid */
12 unsigned short st_reserved2; /* old st_gid */ 12 unsigned short st_reserved2; /* old st_gid */
13 unsigned int st_rdev; 13 unsigned int st_rdev;
@@ -42,7 +42,7 @@ struct hpux_stat64 {
42 unsigned int st_dev; /* dev_t is 32 bits on parisc */ 42 unsigned int st_dev; /* dev_t is 32 bits on parisc */
43 ino_t st_ino; /* 32 bits */ 43 ino_t st_ino; /* 32 bits */
44 mode_t st_mode; /* 16 bits */ 44 mode_t st_mode; /* 16 bits */
45 nlink_t st_nlink; /* 16 bits */ 45 unsigned short st_nlink; /* 16 bits */
46 unsigned short st_reserved1; /* old st_uid */ 46 unsigned short st_reserved1; /* old st_uid */
47 unsigned short st_reserved2; /* old st_gid */ 47 unsigned short st_reserved2; /* old st_gid */
48 unsigned int st_rdev; 48 unsigned int st_rdev;
diff --git a/arch/parisc/include/asm/thread_info.h b/arch/parisc/include/asm/thread_info.h
index 83ae7dd4d99e..22b4726dee49 100644
--- a/arch/parisc/include/asm/thread_info.h
+++ b/arch/parisc/include/asm/thread_info.h
@@ -74,7 +74,7 @@ struct thread_info {
74#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP) 74#define _TIF_BLOCKSTEP (1 << TIF_BLOCKSTEP)
75 75
76#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \ 76#define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NOTIFY_RESUME | \
77 _TIF_NEED_RESCHED | _TIF_RESTORE_SIGMASK) 77 _TIF_NEED_RESCHED)
78 78
79#endif /* __KERNEL__ */ 79#endif /* __KERNEL__ */
80 80
diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
index 9ac066086f03..4ba2c93770f1 100644
--- a/arch/parisc/include/asm/uaccess.h
+++ b/arch/parisc/include/asm/uaccess.h
@@ -218,15 +218,14 @@ struct exception_data {
218extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long); 218extern unsigned long lcopy_to_user(void __user *, const void *, unsigned long);
219extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long); 219extern unsigned long lcopy_from_user(void *, const void __user *, unsigned long);
220extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long); 220extern unsigned long lcopy_in_user(void __user *, const void __user *, unsigned long);
221extern long lstrncpy_from_user(char *, const char __user *, long); 221extern long strncpy_from_user(char *, const char __user *, long);
222extern unsigned lclear_user(void __user *,unsigned long); 222extern unsigned lclear_user(void __user *,unsigned long);
223extern long lstrnlen_user(const char __user *,long); 223extern long lstrnlen_user(const char __user *,long);
224
225/* 224/*
226 * Complex access routines -- macros 225 * Complex access routines -- macros
227 */ 226 */
227#define user_addr_max() (~0UL)
228 228
229#define strncpy_from_user lstrncpy_from_user
230#define strnlen_user lstrnlen_user 229#define strnlen_user lstrnlen_user
231#define strlen_user(str) lstrnlen_user(str, 0x7fffffffL) 230#define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
232#define clear_user lclear_user 231#define clear_user lclear_user
diff --git a/arch/parisc/kernel/entry.S b/arch/parisc/kernel/entry.S
index 535034217021..18670a078849 100644
--- a/arch/parisc/kernel/entry.S
+++ b/arch/parisc/kernel/entry.S
@@ -552,7 +552,7 @@
552 * entry (identifying the physical page) and %r23 up with 552 * entry (identifying the physical page) and %r23 up with
553 * the from tlb entry (or nothing if only a to entry---for 553 * the from tlb entry (or nothing if only a to entry---for
554 * clear_user_page_asm) */ 554 * clear_user_page_asm) */
555 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault 555 .macro do_alias spc,tmp,tmp1,va,pte,prot,fault,patype
556 cmpib,COND(<>),n 0,\spc,\fault 556 cmpib,COND(<>),n 0,\spc,\fault
557 ldil L%(TMPALIAS_MAP_START),\tmp 557 ldil L%(TMPALIAS_MAP_START),\tmp
558#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000) 558#if defined(CONFIG_64BIT) && (TMPALIAS_MAP_START >= 0x80000000)
@@ -581,11 +581,15 @@
581 */ 581 */
582 cmpiclr,= 0x01,\tmp,%r0 582 cmpiclr,= 0x01,\tmp,%r0
583 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot 583 ldi (_PAGE_DIRTY|_PAGE_READ|_PAGE_WRITE),\prot
584#ifdef CONFIG_64BIT 584.ifc \patype,20
585 depd,z \prot,8,7,\prot 585 depd,z \prot,8,7,\prot
586#else 586.else
587.ifc \patype,11
587 depw,z \prot,8,7,\prot 588 depw,z \prot,8,7,\prot
588#endif 589.else
590 .error "undefined PA type to do_alias"
591.endif
592.endif
589 /* 593 /*
590 * OK, it is in the temp alias region, check whether "from" or "to". 594 * OK, it is in the temp alias region, check whether "from" or "to".
591 * Check "subtle" note in pacache.S re: r23/r26. 595 * Check "subtle" note in pacache.S re: r23/r26.
@@ -920,7 +924,7 @@ intr_check_sig:
920 /* As above */ 924 /* As above */
921 mfctl %cr30,%r1 925 mfctl %cr30,%r1
922 LDREG TI_FLAGS(%r1),%r19 926 LDREG TI_FLAGS(%r1),%r19
923 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NOTIFY_RESUME), %r20 927 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r20
924 and,COND(<>) %r19, %r20, %r0 928 and,COND(<>) %r19, %r20, %r0
925 b,n intr_restore /* skip past if we've nothing to do */ 929 b,n intr_restore /* skip past if we've nothing to do */
926 930
@@ -1189,7 +1193,7 @@ dtlb_miss_20w:
1189 nop 1193 nop
1190 1194
1191dtlb_check_alias_20w: 1195dtlb_check_alias_20w:
1192 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1196 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1193 1197
1194 idtlbt pte,prot 1198 idtlbt pte,prot
1195 1199
@@ -1213,7 +1217,7 @@ nadtlb_miss_20w:
1213 nop 1217 nop
1214 1218
1215nadtlb_check_alias_20w: 1219nadtlb_check_alias_20w:
1216 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate 1220 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1217 1221
1218 idtlbt pte,prot 1222 idtlbt pte,prot
1219 1223
@@ -1245,7 +1249,7 @@ dtlb_miss_11:
1245 nop 1249 nop
1246 1250
1247dtlb_check_alias_11: 1251dtlb_check_alias_11:
1248 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1252 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,11
1249 1253
1250 idtlba pte,(va) 1254 idtlba pte,(va)
1251 idtlbp prot,(va) 1255 idtlbp prot,(va)
@@ -1277,7 +1281,7 @@ nadtlb_miss_11:
1277 nop 1281 nop
1278 1282
1279nadtlb_check_alias_11: 1283nadtlb_check_alias_11:
1280 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate 1284 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,11
1281 1285
1282 idtlba pte,(va) 1286 idtlba pte,(va)
1283 idtlbp prot,(va) 1287 idtlbp prot,(va)
@@ -1304,7 +1308,7 @@ dtlb_miss_20:
1304 nop 1308 nop
1305 1309
1306dtlb_check_alias_20: 1310dtlb_check_alias_20:
1307 do_alias spc,t0,t1,va,pte,prot,dtlb_fault 1311 do_alias spc,t0,t1,va,pte,prot,dtlb_fault,20
1308 1312
1309 idtlbt pte,prot 1313 idtlbt pte,prot
1310 1314
@@ -1330,7 +1334,7 @@ nadtlb_miss_20:
1330 nop 1334 nop
1331 1335
1332nadtlb_check_alias_20: 1336nadtlb_check_alias_20:
1333 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate 1337 do_alias spc,t0,t1,va,pte,prot,nadtlb_emulate,20
1334 1338
1335 idtlbt pte,prot 1339 idtlbt pte,prot
1336 1340
@@ -1457,7 +1461,7 @@ naitlb_miss_20w:
1457 nop 1461 nop
1458 1462
1459naitlb_check_alias_20w: 1463naitlb_check_alias_20w:
1460 do_alias spc,t0,t1,va,pte,prot,naitlb_fault 1464 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1461 1465
1462 iitlbt pte,prot 1466 iitlbt pte,prot
1463 1467
@@ -1511,7 +1515,7 @@ naitlb_miss_11:
1511 nop 1515 nop
1512 1516
1513naitlb_check_alias_11: 1517naitlb_check_alias_11:
1514 do_alias spc,t0,t1,va,pte,prot,itlb_fault 1518 do_alias spc,t0,t1,va,pte,prot,itlb_fault,11
1515 1519
1516 iitlba pte,(%sr0, va) 1520 iitlba pte,(%sr0, va)
1517 iitlbp prot,(%sr0, va) 1521 iitlbp prot,(%sr0, va)
@@ -1557,7 +1561,7 @@ naitlb_miss_20:
1557 nop 1561 nop
1558 1562
1559naitlb_check_alias_20: 1563naitlb_check_alias_20:
1560 do_alias spc,t0,t1,va,pte,prot,naitlb_fault 1564 do_alias spc,t0,t1,va,pte,prot,naitlb_fault,20
1561 1565
1562 iitlbt pte,prot 1566 iitlbt pte,prot
1563 1567
@@ -2028,7 +2032,7 @@ syscall_check_resched:
2028 .import do_signal,code 2032 .import do_signal,code
2029syscall_check_sig: 2033syscall_check_sig:
2030 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19 2034 LDREG TI_FLAGS-THREAD_SZ_ALGN-FRAME_SIZE(%r30),%r19
2031 ldi (_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK), %r26 2035 ldi (_TIF_SIGPENDING|_TIF_NOTIFY_RESUME), %r26
2032 and,COND(<>) %r19, %r26, %r0 2036 and,COND(<>) %r19, %r26, %r0
2033 b,n syscall_restore /* skip past if we've nothing to do */ 2037 b,n syscall_restore /* skip past if we've nothing to do */
2034 2038
diff --git a/arch/parisc/kernel/parisc_ksyms.c b/arch/parisc/kernel/parisc_ksyms.c
index a7bb757a5497..ceec85de6290 100644
--- a/arch/parisc/kernel/parisc_ksyms.c
+++ b/arch/parisc/kernel/parisc_ksyms.c
@@ -44,7 +44,6 @@ EXPORT_SYMBOL(__cmpxchg_u64);
44#endif 44#endif
45 45
46#include <asm/uaccess.h> 46#include <asm/uaccess.h>
47EXPORT_SYMBOL(lstrncpy_from_user);
48EXPORT_SYMBOL(lclear_user); 47EXPORT_SYMBOL(lclear_user);
49EXPORT_SYMBOL(lstrnlen_user); 48EXPORT_SYMBOL(lstrnlen_user);
50 49
diff --git a/arch/parisc/kernel/signal.c b/arch/parisc/kernel/signal.c
index 4b9cb0d546d1..594459bde14e 100644
--- a/arch/parisc/kernel/signal.c
+++ b/arch/parisc/kernel/signal.c
@@ -48,9 +48,6 @@
48#define DBG(LEVEL, ...) 48#define DBG(LEVEL, ...)
49#endif 49#endif
50 50
51
52#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
53
54/* gcc will complain if a pointer is cast to an integer of different 51/* gcc will complain if a pointer is cast to an integer of different
55 * size. If you really need to do this (and we do for an ELF32 user 52 * size. If you really need to do this (and we do for an ELF32 user
56 * application in an ELF64 kernel) then you have to do a cast to an 53 * application in an ELF64 kernel) then you have to do a cast to an
@@ -131,7 +128,6 @@ sys_rt_sigreturn(struct pt_regs *regs, int in_syscall)
131 goto give_sigsegv; 128 goto give_sigsegv;
132 } 129 }
133 130
134 sigdelsetmask(&set, ~_BLOCKABLE);
135 set_current_blocked(&set); 131 set_current_blocked(&set);
136 132
137 /* Good thing we saved the old gr[30], eh? */ 133 /* Good thing we saved the old gr[30], eh? */
@@ -443,8 +439,9 @@ give_sigsegv:
443 439
444static long 440static long
445handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 441handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
446 sigset_t *oldset, struct pt_regs *regs, int in_syscall) 442 struct pt_regs *regs, int in_syscall)
447{ 443{
444 sigset_t *oldset = sigmask_to_save();
448 DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n", 445 DBG(1,"handle_signal: sig=%ld, ka=%p, info=%p, oldset=%p, regs=%p\n",
449 sig, ka, info, oldset, regs); 446 sig, ka, info, oldset, regs);
450 447
@@ -452,12 +449,13 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
452 if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall)) 449 if (!setup_rt_frame(sig, ka, info, oldset, regs, in_syscall))
453 return 0; 450 return 0;
454 451
455 block_sigmask(ka, sig); 452 signal_delivered(sig, info, ka, regs,
456
457 tracehook_signal_handler(sig, info, ka, regs,
458 test_thread_flag(TIF_SINGLESTEP) || 453 test_thread_flag(TIF_SINGLESTEP) ||
459 test_thread_flag(TIF_BLOCKSTEP)); 454 test_thread_flag(TIF_BLOCKSTEP));
460 455
456 DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
457 regs->gr[28]);
458
461 return 1; 459 return 1;
462} 460}
463 461
@@ -568,28 +566,17 @@ do_signal(struct pt_regs *regs, long in_syscall)
568 siginfo_t info; 566 siginfo_t info;
569 struct k_sigaction ka; 567 struct k_sigaction ka;
570 int signr; 568 int signr;
571 sigset_t *oldset;
572 569
573 DBG(1,"\ndo_signal: oldset=0x%p, regs=0x%p, sr7 %#lx, in_syscall=%d\n", 570 DBG(1,"\ndo_signal: regs=0x%p, sr7 %#lx, in_syscall=%d\n",
574 oldset, regs, regs->sr[7], in_syscall); 571 regs, regs->sr[7], in_syscall);
575 572
576 /* Everyone else checks to see if they are in kernel mode at 573 /* Everyone else checks to see if they are in kernel mode at
577 this point and exits if that's the case. I'm not sure why 574 this point and exits if that's the case. I'm not sure why
578 we would be called in that case, but for some reason we 575 we would be called in that case, but for some reason we
579 are. */ 576 are. */
580 577
581 if (test_thread_flag(TIF_RESTORE_SIGMASK))
582 oldset = &current->saved_sigmask;
583 else
584 oldset = &current->blocked;
585
586 DBG(1,"do_signal: oldset %08lx / %08lx\n",
587 oldset->sig[0], oldset->sig[1]);
588
589
590 /* May need to force signal if handle_signal failed to deliver */ 578 /* May need to force signal if handle_signal failed to deliver */
591 while (1) { 579 while (1) {
592
593 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 580 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
594 DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]); 581 DBG(3,"do_signal: signr = %d, regs->gr[28] = %ld\n", signr, regs->gr[28]);
595 582
@@ -603,14 +590,8 @@ do_signal(struct pt_regs *regs, long in_syscall)
603 /* Whee! Actually deliver the signal. If the 590 /* Whee! Actually deliver the signal. If the
604 delivery failed, we need to continue to iterate in 591 delivery failed, we need to continue to iterate in
605 this loop so we can deliver the SIGSEGV... */ 592 this loop so we can deliver the SIGSEGV... */
606 if (handle_signal(signr, &info, &ka, oldset, 593 if (handle_signal(signr, &info, &ka, regs, in_syscall))
607 regs, in_syscall)) {
608 DBG(1,KERN_DEBUG "do_signal: Exit (success), regs->gr[28] = %ld\n",
609 regs->gr[28]);
610 if (test_thread_flag(TIF_RESTORE_SIGMASK))
611 clear_thread_flag(TIF_RESTORE_SIGMASK);
612 return; 594 return;
613 }
614 } 595 }
615 /* end of while(1) looping forever if we can't force a signal */ 596 /* end of while(1) looping forever if we can't force a signal */
616 597
@@ -621,24 +602,16 @@ do_signal(struct pt_regs *regs, long in_syscall)
621 DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n", 602 DBG(1,"do_signal: Exit (not delivered), regs->gr[28] = %ld\n",
622 regs->gr[28]); 603 regs->gr[28]);
623 604
624 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 605 restore_saved_sigmask();
625 clear_thread_flag(TIF_RESTORE_SIGMASK);
626 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
627 }
628
629 return;
630} 606}
631 607
632void do_notify_resume(struct pt_regs *regs, long in_syscall) 608void do_notify_resume(struct pt_regs *regs, long in_syscall)
633{ 609{
634 if (test_thread_flag(TIF_SIGPENDING) || 610 if (test_thread_flag(TIF_SIGPENDING))
635 test_thread_flag(TIF_RESTORE_SIGMASK))
636 do_signal(regs, in_syscall); 611 do_signal(regs, in_syscall);
637 612
638 if (test_thread_flag(TIF_NOTIFY_RESUME)) { 613 if (test_thread_flag(TIF_NOTIFY_RESUME)) {
639 clear_thread_flag(TIF_NOTIFY_RESUME); 614 clear_thread_flag(TIF_NOTIFY_RESUME);
640 tracehook_notify_resume(regs); 615 tracehook_notify_resume(regs);
641 if (current->replacement_session_keyring)
642 key_replace_session_keyring();
643 } 616 }
644} 617}
diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c
index e14132430762..fd49aeda9eb8 100644
--- a/arch/parisc/kernel/signal32.c
+++ b/arch/parisc/kernel/signal32.c
@@ -47,8 +47,6 @@
47#define DBG(LEVEL, ...) 47#define DBG(LEVEL, ...)
48#endif 48#endif
49 49
50#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
51
52inline void 50inline void
53sigset_32to64(sigset_t *s64, compat_sigset_t *s32) 51sigset_32to64(sigset_t *s64, compat_sigset_t *s32)
54{ 52{
diff --git a/arch/parisc/kernel/vmlinux.lds.S b/arch/parisc/kernel/vmlinux.lds.S
index fa6f2b8163e0..64a999882e4f 100644
--- a/arch/parisc/kernel/vmlinux.lds.S
+++ b/arch/parisc/kernel/vmlinux.lds.S
@@ -50,8 +50,10 @@ SECTIONS
50 . = KERNEL_BINARY_TEXT_START; 50 . = KERNEL_BINARY_TEXT_START;
51 51
52 _text = .; /* Text and read-only data */ 52 _text = .; /* Text and read-only data */
53 .text ALIGN(16) : { 53 .head ALIGN(16) : {
54 HEAD_TEXT 54 HEAD_TEXT
55 } = 0
56 .text ALIGN(16) : {
55 TEXT_TEXT 57 TEXT_TEXT
56 SCHED_TEXT 58 SCHED_TEXT
57 LOCK_TEXT 59 LOCK_TEXT
@@ -65,7 +67,7 @@ SECTIONS
65 *(.fixup) 67 *(.fixup)
66 *(.lock.text) /* out-of-line lock text */ 68 *(.lock.text) /* out-of-line lock text */
67 *(.gnu.warning) 69 *(.gnu.warning)
68 } = 0 70 }
69 /* End of text section */ 71 /* End of text section */
70 _etext = .; 72 _etext = .;
71 73
diff --git a/arch/parisc/lib/lusercopy.S b/arch/parisc/lib/lusercopy.S
index 1bd23ccec17b..6f2d9355efe2 100644
--- a/arch/parisc/lib/lusercopy.S
+++ b/arch/parisc/lib/lusercopy.S
@@ -61,47 +61,6 @@
61 .endm 61 .endm
62 62
63 /* 63 /*
64 * long lstrncpy_from_user(char *dst, const char *src, long n)
65 *
66 * Returns -EFAULT if exception before terminator,
67 * N if the entire buffer filled,
68 * otherwise strlen (i.e. excludes zero byte)
69 */
70
71ENTRY(lstrncpy_from_user)
72 .proc
73 .callinfo NO_CALLS
74 .entry
75 comib,= 0,%r24,$lsfu_done
76 copy %r24,%r23
77 get_sr
781: ldbs,ma 1(%sr1,%r25),%r1
79$lsfu_loop:
80 stbs,ma %r1,1(%r26)
81 comib,=,n 0,%r1,$lsfu_done
82 addib,<>,n -1,%r24,$lsfu_loop
832: ldbs,ma 1(%sr1,%r25),%r1
84$lsfu_done:
85 sub %r23,%r24,%r28
86$lsfu_exit:
87 bv %r0(%r2)
88 nop
89 .exit
90ENDPROC(lstrncpy_from_user)
91
92 .section .fixup,"ax"
933: fixup_branch $lsfu_exit
94 ldi -EFAULT,%r28
95 .previous
96
97 .section __ex_table,"aw"
98 ASM_ULONG_INSN 1b,3b
99 ASM_ULONG_INSN 2b,3b
100 .previous
101
102 .procend
103
104 /*
105 * unsigned long lclear_user(void *to, unsigned long n) 64 * unsigned long lclear_user(void *to, unsigned long n)
106 * 65 *
107 * Returns 0 for success. 66 * Returns 0 for success.
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index c9aac24b02e2..32b394f3b854 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -100,6 +100,9 @@ static inline void hard_irq_disable(void)
100 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS; 100 get_paca()->irq_happened |= PACA_IRQ_HARD_DIS;
101} 101}
102 102
103/* include/linux/interrupt.h needs hard_irq_disable to be a macro */
104#define hard_irq_disable hard_irq_disable
105
103/* 106/*
104 * This is called by asynchronous interrupts to conditionally 107 * This is called by asynchronous interrupts to conditionally
105 * re-enable hard interrupts when soft-disabled after having 108 * re-enable hard interrupts when soft-disabled after having
diff --git a/arch/powerpc/include/asm/posix_types.h b/arch/powerpc/include/asm/posix_types.h
index f1393252bbda..2958c5b97b2d 100644
--- a/arch/powerpc/include/asm/posix_types.h
+++ b/arch/powerpc/include/asm/posix_types.h
@@ -16,9 +16,6 @@ typedef int __kernel_ssize_t;
16typedef long __kernel_ptrdiff_t; 16typedef long __kernel_ptrdiff_t;
17#define __kernel_size_t __kernel_size_t 17#define __kernel_size_t __kernel_size_t
18 18
19typedef unsigned short __kernel_nlink_t;
20#define __kernel_nlink_t __kernel_nlink_t
21
22typedef short __kernel_ipc_pid_t; 19typedef short __kernel_ipc_pid_t;
23#define __kernel_ipc_pid_t __kernel_ipc_pid_t 20#define __kernel_ipc_pid_t __kernel_ipc_pid_t
24#endif 21#endif
diff --git a/arch/powerpc/include/asm/stat.h b/arch/powerpc/include/asm/stat.h
index e4edc510b530..84880b80cc1c 100644
--- a/arch/powerpc/include/asm/stat.h
+++ b/arch/powerpc/include/asm/stat.h
@@ -30,11 +30,11 @@ struct stat {
30 unsigned long st_dev; 30 unsigned long st_dev;
31 ino_t st_ino; 31 ino_t st_ino;
32#ifdef __powerpc64__ 32#ifdef __powerpc64__
33 nlink_t st_nlink; 33 unsigned long st_nlink;
34 mode_t st_mode; 34 mode_t st_mode;
35#else 35#else
36 mode_t st_mode; 36 mode_t st_mode;
37 nlink_t st_nlink; 37 unsigned short st_nlink;
38#endif 38#endif
39 uid_t st_uid; 39 uid_t st_uid;
40 gid_t st_gid; 40 gid_t st_gid;
diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
index a556ccc16b58..68831e9cf82f 100644
--- a/arch/powerpc/include/asm/thread_info.h
+++ b/arch/powerpc/include/asm/thread_info.h
@@ -140,7 +140,23 @@ static inline void set_restore_sigmask(void)
140{ 140{
141 struct thread_info *ti = current_thread_info(); 141 struct thread_info *ti = current_thread_info();
142 ti->local_flags |= _TLF_RESTORE_SIGMASK; 142 ti->local_flags |= _TLF_RESTORE_SIGMASK;
143 set_bit(TIF_SIGPENDING, &ti->flags); 143 WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
144}
145static inline void clear_restore_sigmask(void)
146{
147 current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
148}
149static inline bool test_restore_sigmask(void)
150{
151 return current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK;
152}
153static inline bool test_and_clear_restore_sigmask(void)
154{
155 struct thread_info *ti = current_thread_info();
156 if (!(ti->local_flags & _TLF_RESTORE_SIGMASK))
157 return false;
158 ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
159 return true;
144} 160}
145 161
146static inline bool test_thread_local_flags(unsigned int flags) 162static inline bool test_thread_local_flags(unsigned int flags)
diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
index 0b6d79617d7b..2e3200ca485f 100644
--- a/arch/powerpc/kernel/module_32.c
+++ b/arch/powerpc/kernel/module_32.c
@@ -176,8 +176,8 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
176 176
177static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val) 177static inline int entry_matches(struct ppc_plt_entry *entry, Elf32_Addr val)
178{ 178{
179 if (entry->jump[0] == 0x3d600000 + ((val + 0x8000) >> 16) 179 if (entry->jump[0] == 0x3d800000 + ((val + 0x8000) >> 16)
180 && entry->jump[1] == 0x396b0000 + (val & 0xffff)) 180 && entry->jump[1] == 0x398c0000 + (val & 0xffff))
181 return 1; 181 return 1;
182 return 0; 182 return 0;
183} 183}
@@ -204,10 +204,9 @@ static uint32_t do_plt_call(void *location,
204 entry++; 204 entry++;
205 } 205 }
206 206
207 /* Stolen from Paul Mackerras as well... */ 207 entry->jump[0] = 0x3d800000+((val+0x8000)>>16); /* lis r12,sym@ha */
208 entry->jump[0] = 0x3d600000+((val+0x8000)>>16); /* lis r11,sym@ha */ 208 entry->jump[1] = 0x398c0000 + (val&0xffff); /* addi r12,r12,sym@l*/
209 entry->jump[1] = 0x396b0000 + (val&0xffff); /* addi r11,r11,sym@l*/ 209 entry->jump[2] = 0x7d8903a6; /* mtctr r12 */
210 entry->jump[2] = 0x7d6903a6; /* mtctr r11 */
211 entry->jump[3] = 0x4e800420; /* bctr */ 210 entry->jump[3] = 0x4e800420; /* bctr */
212 211
213 DEBUGP("Initialized plt for 0x%x at %p\n", val, entry); 212 DEBUGP("Initialized plt for 0x%x at %p\n", val, entry);
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 651c5963662b..5c023c9cf16e 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -51,16 +51,6 @@ void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
51 return (void __user *)newsp; 51 return (void __user *)newsp;
52} 52}
53 53
54
55/*
56 * Restore the user process's signal mask
57 */
58void restore_sigmask(sigset_t *set)
59{
60 sigdelsetmask(set, ~_BLOCKABLE);
61 set_current_blocked(set);
62}
63
64static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka, 54static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
65 int has_handler) 55 int has_handler)
66{ 56{
@@ -114,30 +104,21 @@ static void check_syscall_restart(struct pt_regs *regs, struct k_sigaction *ka,
114 104
115static int do_signal(struct pt_regs *regs) 105static int do_signal(struct pt_regs *regs)
116{ 106{
117 sigset_t *oldset; 107 sigset_t *oldset = sigmask_to_save();
118 siginfo_t info; 108 siginfo_t info;
119 int signr; 109 int signr;
120 struct k_sigaction ka; 110 struct k_sigaction ka;
121 int ret; 111 int ret;
122 int is32 = is_32bit_task(); 112 int is32 = is_32bit_task();
123 113
124 if (current_thread_info()->local_flags & _TLF_RESTORE_SIGMASK)
125 oldset = &current->saved_sigmask;
126 else
127 oldset = &current->blocked;
128
129 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 114 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
130 115
131 /* Is there any syscall restart business here ? */ 116 /* Is there any syscall restart business here ? */
132 check_syscall_restart(regs, &ka, signr > 0); 117 check_syscall_restart(regs, &ka, signr > 0);
133 118
134 if (signr <= 0) { 119 if (signr <= 0) {
135 struct thread_info *ti = current_thread_info();
136 /* No signal to deliver -- put the saved sigmask back */ 120 /* No signal to deliver -- put the saved sigmask back */
137 if (ti->local_flags & _TLF_RESTORE_SIGMASK) { 121 restore_saved_sigmask();
138 ti->local_flags &= ~_TLF_RESTORE_SIGMASK;
139 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
140 }
141 regs->trap = 0; 122 regs->trap = 0;
142 return 0; /* no signals delivered */ 123 return 0; /* no signals delivered */
143 } 124 }
@@ -167,18 +148,7 @@ static int do_signal(struct pt_regs *regs)
167 148
168 regs->trap = 0; 149 regs->trap = 0;
169 if (ret) { 150 if (ret) {
170 block_sigmask(&ka, signr); 151 signal_delivered(signr, &info, &ka, regs,
171
172 /*
173 * A signal was successfully delivered; the saved sigmask is in
174 * its frame, and we can clear the TLF_RESTORE_SIGMASK flag.
175 */
176 current_thread_info()->local_flags &= ~_TLF_RESTORE_SIGMASK;
177
178 /*
179 * Let tracing know that we've done the handler setup.
180 */
181 tracehook_signal_handler(signr, &info, &ka, regs,
182 test_thread_flag(TIF_SINGLESTEP)); 152 test_thread_flag(TIF_SINGLESTEP));
183 } 153 }
184 154
@@ -193,8 +163,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
193 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 163 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
194 clear_thread_flag(TIF_NOTIFY_RESUME); 164 clear_thread_flag(TIF_NOTIFY_RESUME);
195 tracehook_notify_resume(regs); 165 tracehook_notify_resume(regs);
196 if (current->replacement_session_keyring)
197 key_replace_session_keyring();
198 } 166 }
199} 167}
200 168
diff --git a/arch/powerpc/kernel/signal.h b/arch/powerpc/kernel/signal.h
index 8dde973aaaf5..e00acb413934 100644
--- a/arch/powerpc/kernel/signal.h
+++ b/arch/powerpc/kernel/signal.h
@@ -10,13 +10,10 @@
10#ifndef _POWERPC_ARCH_SIGNAL_H 10#ifndef _POWERPC_ARCH_SIGNAL_H
11#define _POWERPC_ARCH_SIGNAL_H 11#define _POWERPC_ARCH_SIGNAL_H
12 12
13#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
14
15extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags); 13extern void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags);
16 14
17extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, 15extern void __user * get_sigframe(struct k_sigaction *ka, struct pt_regs *regs,
18 size_t frame_size, int is_32); 16 size_t frame_size, int is_32);
19extern void restore_sigmask(sigset_t *set);
20 17
21extern int handle_signal32(unsigned long sig, struct k_sigaction *ka, 18extern int handle_signal32(unsigned long sig, struct k_sigaction *ka,
22 siginfo_t *info, sigset_t *oldset, 19 siginfo_t *info, sigset_t *oldset,
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 61f6aff25edc..8b4c049aee20 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -919,7 +919,7 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
919 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp))) 919 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
920 return -EFAULT; 920 return -EFAULT;
921#endif 921#endif
922 restore_sigmask(&set); 922 set_current_blocked(&set);
923 if (restore_user_regs(regs, mcp, sig)) 923 if (restore_user_regs(regs, mcp, sig))
924 return -EFAULT; 924 return -EFAULT;
925 925
@@ -1273,7 +1273,7 @@ long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1273 set.sig[0] = sigctx.oldmask; 1273 set.sig[0] = sigctx.oldmask;
1274 set.sig[1] = sigctx._unused[3]; 1274 set.sig[1] = sigctx._unused[3];
1275#endif 1275#endif
1276 restore_sigmask(&set); 1276 set_current_blocked(&set);
1277 1277
1278 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs); 1278 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1279 addr = sr; 1279 addr = sr;
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 2692efdb154e..d183f8719a50 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -335,7 +335,7 @@ int sys_swapcontext(struct ucontext __user *old_ctx,
335 335
336 if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set))) 336 if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
337 do_exit(SIGSEGV); 337 do_exit(SIGSEGV);
338 restore_sigmask(&set); 338 set_current_blocked(&set);
339 if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext)) 339 if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext))
340 do_exit(SIGSEGV); 340 do_exit(SIGSEGV);
341 341
@@ -364,7 +364,7 @@ int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
364 364
365 if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) 365 if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
366 goto badframe; 366 goto badframe;
367 restore_sigmask(&set); 367 set_current_blocked(&set);
368 if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext)) 368 if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
369 goto badframe; 369 goto badframe;
370 370
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 99a995c2a3f2..be171ee73bf8 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -475,6 +475,7 @@ void timer_interrupt(struct pt_regs * regs)
475 struct pt_regs *old_regs; 475 struct pt_regs *old_regs;
476 u64 *next_tb = &__get_cpu_var(decrementers_next_tb); 476 u64 *next_tb = &__get_cpu_var(decrementers_next_tb);
477 struct clock_event_device *evt = &__get_cpu_var(decrementers); 477 struct clock_event_device *evt = &__get_cpu_var(decrementers);
478 u64 now;
478 479
479 /* Ensure a positive value is written to the decrementer, or else 480 /* Ensure a positive value is written to the decrementer, or else
480 * some CPUs will continue to take decrementer exceptions. 481 * some CPUs will continue to take decrementer exceptions.
@@ -509,9 +510,16 @@ void timer_interrupt(struct pt_regs * regs)
509 irq_work_run(); 510 irq_work_run();
510 } 511 }
511 512
512 *next_tb = ~(u64)0; 513 now = get_tb_or_rtc();
513 if (evt->event_handler) 514 if (now >= *next_tb) {
514 evt->event_handler(evt); 515 *next_tb = ~(u64)0;
516 if (evt->event_handler)
517 evt->event_handler(evt);
518 } else {
519 now = *next_tb - now;
520 if (now <= DECREMENTER_MAX)
521 set_dec((int)now);
522 }
515 523
516#ifdef CONFIG_PPC64 524#ifdef CONFIG_PPC64
517 /* collect purr register values often, for accurate calculations */ 525 /* collect purr register values often, for accurate calculations */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index c6af1d623839..3abe1b86e583 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -268,24 +268,45 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
268 return err; 268 return err;
269} 269}
270 270
271static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap) 271static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
272{ 272{
273 struct kvm *kvm = vcpu->kvm;
273 void *va; 274 void *va;
274 unsigned long nb; 275 unsigned long nb;
276 unsigned long gpa;
275 277
276 vpap->update_pending = 0; 278 /*
277 va = NULL; 279 * We need to pin the page pointed to by vpap->next_gpa,
278 if (vpap->next_gpa) { 280 * but we can't call kvmppc_pin_guest_page under the lock
279 va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb); 281 * as it does get_user_pages() and down_read(). So we
280 if (nb < vpap->len) { 282 * have to drop the lock, pin the page, then get the lock
281 /* 283 * again and check that a new area didn't get registered
282 * If it's now too short, it must be that userspace 284 * in the meantime.
283 * has changed the mappings underlying guest memory, 285 */
284 * so unregister the region. 286 for (;;) {
285 */ 287 gpa = vpap->next_gpa;
288 spin_unlock(&vcpu->arch.vpa_update_lock);
289 va = NULL;
290 nb = 0;
291 if (gpa)
292 va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
293 spin_lock(&vcpu->arch.vpa_update_lock);
294 if (gpa == vpap->next_gpa)
295 break;
296 /* sigh... unpin that one and try again */
297 if (va)
286 kvmppc_unpin_guest_page(kvm, va); 298 kvmppc_unpin_guest_page(kvm, va);
287 va = NULL; 299 }
288 } 300
301 vpap->update_pending = 0;
302 if (va && nb < vpap->len) {
303 /*
304 * If it's now too short, it must be that userspace
305 * has changed the mappings underlying guest memory,
306 * so unregister the region.
307 */
308 kvmppc_unpin_guest_page(kvm, va);
309 va = NULL;
289 } 310 }
290 if (vpap->pinned_addr) 311 if (vpap->pinned_addr)
291 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr); 312 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
@@ -296,20 +317,18 @@ static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
296 317
297static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) 318static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
298{ 319{
299 struct kvm *kvm = vcpu->kvm;
300
301 spin_lock(&vcpu->arch.vpa_update_lock); 320 spin_lock(&vcpu->arch.vpa_update_lock);
302 if (vcpu->arch.vpa.update_pending) { 321 if (vcpu->arch.vpa.update_pending) {
303 kvmppc_update_vpa(kvm, &vcpu->arch.vpa); 322 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
304 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); 323 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
305 } 324 }
306 if (vcpu->arch.dtl.update_pending) { 325 if (vcpu->arch.dtl.update_pending) {
307 kvmppc_update_vpa(kvm, &vcpu->arch.dtl); 326 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
308 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; 327 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
309 vcpu->arch.dtl_index = 0; 328 vcpu->arch.dtl_index = 0;
310 } 329 }
311 if (vcpu->arch.slb_shadow.update_pending) 330 if (vcpu->arch.slb_shadow.update_pending)
312 kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow); 331 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
313 spin_unlock(&vcpu->arch.vpa_update_lock); 332 spin_unlock(&vcpu->arch.vpa_update_lock);
314} 333}
315 334
@@ -800,12 +819,39 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
800 struct kvm_vcpu *vcpu, *vcpu0, *vnext; 819 struct kvm_vcpu *vcpu, *vcpu0, *vnext;
801 long ret; 820 long ret;
802 u64 now; 821 u64 now;
803 int ptid, i; 822 int ptid, i, need_vpa_update;
804 823
805 /* don't start if any threads have a signal pending */ 824 /* don't start if any threads have a signal pending */
806 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 825 need_vpa_update = 0;
826 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
807 if (signal_pending(vcpu->arch.run_task)) 827 if (signal_pending(vcpu->arch.run_task))
808 return 0; 828 return 0;
829 need_vpa_update |= vcpu->arch.vpa.update_pending |
830 vcpu->arch.slb_shadow.update_pending |
831 vcpu->arch.dtl.update_pending;
832 }
833
834 /*
835 * Initialize *vc, in particular vc->vcore_state, so we can
836 * drop the vcore lock if necessary.
837 */
838 vc->n_woken = 0;
839 vc->nap_count = 0;
840 vc->entry_exit_count = 0;
841 vc->vcore_state = VCORE_RUNNING;
842 vc->in_guest = 0;
843 vc->napping_threads = 0;
844
845 /*
846 * Updating any of the vpas requires calling kvmppc_pin_guest_page,
847 * which can't be called with any spinlocks held.
848 */
849 if (need_vpa_update) {
850 spin_unlock(&vc->lock);
851 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
852 kvmppc_update_vpas(vcpu);
853 spin_lock(&vc->lock);
854 }
809 855
810 /* 856 /*
811 * Make sure we are running on thread 0, and that 857 * Make sure we are running on thread 0, and that
@@ -838,20 +884,10 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
838 if (vcpu->arch.ceded) 884 if (vcpu->arch.ceded)
839 vcpu->arch.ptid = ptid++; 885 vcpu->arch.ptid = ptid++;
840 886
841 vc->n_woken = 0;
842 vc->nap_count = 0;
843 vc->entry_exit_count = 0;
844 vc->vcore_state = VCORE_RUNNING;
845 vc->stolen_tb += mftb() - vc->preempt_tb; 887 vc->stolen_tb += mftb() - vc->preempt_tb;
846 vc->in_guest = 0;
847 vc->pcpu = smp_processor_id(); 888 vc->pcpu = smp_processor_id();
848 vc->napping_threads = 0;
849 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) { 889 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
850 kvmppc_start_thread(vcpu); 890 kvmppc_start_thread(vcpu);
851 if (vcpu->arch.vpa.update_pending ||
852 vcpu->arch.slb_shadow.update_pending ||
853 vcpu->arch.dtl.update_pending)
854 kvmppc_update_vpas(vcpu);
855 kvmppc_create_dtl_entry(vcpu, vc); 891 kvmppc_create_dtl_entry(vcpu, vc);
856 } 892 }
857 /* Grab any remaining hw threads so they can't go into the kernel */ 893 /* Grab any remaining hw threads so they can't go into the kernel */
diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
index 5b63bd3da4a9..e779642c25e5 100644
--- a/arch/powerpc/mm/mmu_context_nohash.c
+++ b/arch/powerpc/mm/mmu_context_nohash.c
@@ -333,9 +333,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
333 unsigned long action, void *hcpu) 333 unsigned long action, void *hcpu)
334{ 334{
335 unsigned int cpu = (unsigned int)(long)hcpu; 335 unsigned int cpu = (unsigned int)(long)hcpu;
336#ifdef CONFIG_HOTPLUG_CPU 336
337 struct task_struct *p;
338#endif
339 /* We don't touch CPU 0 map, it's allocated at aboot and kept 337 /* We don't touch CPU 0 map, it's allocated at aboot and kept
340 * around forever 338 * around forever
341 */ 339 */
@@ -358,12 +356,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
358 stale_map[cpu] = NULL; 356 stale_map[cpu] = NULL;
359 357
360 /* We also clear the cpu_vm_mask bits of CPUs going away */ 358 /* We also clear the cpu_vm_mask bits of CPUs going away */
361 read_lock(&tasklist_lock); 359 clear_tasks_mm_cpumask(cpu);
362 for_each_process(p) {
363 if (p->mm)
364 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
365 }
366 read_unlock(&tasklist_lock);
367 break; 360 break;
368#endif /* CONFIG_HOTPLUG_CPU */ 361#endif /* CONFIG_HOTPLUG_CPU */
369 } 362 }
diff --git a/arch/powerpc/platforms/pseries/nvram.c b/arch/powerpc/platforms/pseries/nvram.c
index 36f957f31842..8733a86ad52e 100644
--- a/arch/powerpc/platforms/pseries/nvram.c
+++ b/arch/powerpc/platforms/pseries/nvram.c
@@ -68,9 +68,7 @@ static const char *pseries_nvram_os_partitions[] = {
68}; 68};
69 69
70static void oops_to_nvram(struct kmsg_dumper *dumper, 70static void oops_to_nvram(struct kmsg_dumper *dumper,
71 enum kmsg_dump_reason reason, 71 enum kmsg_dump_reason reason);
72 const char *old_msgs, unsigned long old_len,
73 const char *new_msgs, unsigned long new_len);
74 72
75static struct kmsg_dumper nvram_kmsg_dumper = { 73static struct kmsg_dumper nvram_kmsg_dumper = {
76 .dump = oops_to_nvram 74 .dump = oops_to_nvram
@@ -504,28 +502,6 @@ int __init pSeries_nvram_init(void)
504} 502}
505 503
506/* 504/*
507 * Try to capture the last capture_len bytes of the printk buffer. Return
508 * the amount actually captured.
509 */
510static size_t capture_last_msgs(const char *old_msgs, size_t old_len,
511 const char *new_msgs, size_t new_len,
512 char *captured, size_t capture_len)
513{
514 if (new_len >= capture_len) {
515 memcpy(captured, new_msgs + (new_len - capture_len),
516 capture_len);
517 return capture_len;
518 } else {
519 /* Grab the end of old_msgs. */
520 size_t old_tail_len = min(old_len, capture_len - new_len);
521 memcpy(captured, old_msgs + (old_len - old_tail_len),
522 old_tail_len);
523 memcpy(captured + old_tail_len, new_msgs, new_len);
524 return old_tail_len + new_len;
525 }
526}
527
528/*
529 * Are we using the ibm,rtas-log for oops/panic reports? And if so, 505 * Are we using the ibm,rtas-log for oops/panic reports? And if so,
530 * would logging this oops/panic overwrite an RTAS event that rtas_errd 506 * would logging this oops/panic overwrite an RTAS event that rtas_errd
531 * hasn't had a chance to read and process? Return 1 if so, else 0. 507 * hasn't had a chance to read and process? Return 1 if so, else 0.
@@ -541,27 +517,6 @@ static int clobbering_unread_rtas_event(void)
541 NVRAM_RTAS_READ_TIMEOUT); 517 NVRAM_RTAS_READ_TIMEOUT);
542} 518}
543 519
544/* Squeeze out each line's <n> severity prefix. */
545static size_t elide_severities(char *buf, size_t len)
546{
547 char *in, *out, *buf_end = buf + len;
548 /* Assume a <n> at the very beginning marks the start of a line. */
549 int newline = 1;
550
551 in = out = buf;
552 while (in < buf_end) {
553 if (newline && in+3 <= buf_end &&
554 *in == '<' && isdigit(in[1]) && in[2] == '>') {
555 in += 3;
556 newline = 0;
557 } else {
558 newline = (*in == '\n');
559 *out++ = *in++;
560 }
561 }
562 return out - buf;
563}
564
565/* Derived from logfs_compress() */ 520/* Derived from logfs_compress() */
566static int nvram_compress(const void *in, void *out, size_t inlen, 521static int nvram_compress(const void *in, void *out, size_t inlen,
567 size_t outlen) 522 size_t outlen)
@@ -619,9 +574,7 @@ static int zip_oops(size_t text_len)
619 * partition. If that's too much, go back and capture uncompressed text. 574 * partition. If that's too much, go back and capture uncompressed text.
620 */ 575 */
621static void oops_to_nvram(struct kmsg_dumper *dumper, 576static void oops_to_nvram(struct kmsg_dumper *dumper,
622 enum kmsg_dump_reason reason, 577 enum kmsg_dump_reason reason)
623 const char *old_msgs, unsigned long old_len,
624 const char *new_msgs, unsigned long new_len)
625{ 578{
626 static unsigned int oops_count = 0; 579 static unsigned int oops_count = 0;
627 static bool panicking = false; 580 static bool panicking = false;
@@ -660,14 +613,14 @@ static void oops_to_nvram(struct kmsg_dumper *dumper,
660 return; 613 return;
661 614
662 if (big_oops_buf) { 615 if (big_oops_buf) {
663 text_len = capture_last_msgs(old_msgs, old_len, 616 kmsg_dump_get_buffer(dumper, false,
664 new_msgs, new_len, big_oops_buf, big_oops_buf_sz); 617 big_oops_buf, big_oops_buf_sz, &text_len);
665 text_len = elide_severities(big_oops_buf, text_len);
666 rc = zip_oops(text_len); 618 rc = zip_oops(text_len);
667 } 619 }
668 if (rc != 0) { 620 if (rc != 0) {
669 text_len = capture_last_msgs(old_msgs, old_len, 621 kmsg_dump_rewind(dumper);
670 new_msgs, new_len, oops_data, oops_data_sz); 622 kmsg_dump_get_buffer(dumper, true,
623 oops_data, oops_data_sz, &text_len);
671 err_type = ERR_TYPE_KERNEL_PANIC; 624 err_type = ERR_TYPE_KERNEL_PANIC;
672 *oops_len = (u16) text_len; 625 *oops_len = (u16) text_len;
673 } 626 }
diff --git a/arch/s390/Kconfig b/arch/s390/Kconfig
index b403c533432c..a39b4690c171 100644
--- a/arch/s390/Kconfig
+++ b/arch/s390/Kconfig
@@ -87,6 +87,7 @@ config S390
87 select ARCH_SAVE_PAGE_KEYS if HIBERNATION 87 select ARCH_SAVE_PAGE_KEYS if HIBERNATION
88 select HAVE_MEMBLOCK 88 select HAVE_MEMBLOCK
89 select HAVE_MEMBLOCK_NODE_MAP 89 select HAVE_MEMBLOCK_NODE_MAP
90 select HAVE_CMPXCHG_LOCAL
90 select ARCH_DISCARD_MEMBLOCK 91 select ARCH_DISCARD_MEMBLOCK
91 select ARCH_INLINE_SPIN_TRYLOCK 92 select ARCH_INLINE_SPIN_TRYLOCK
92 select ARCH_INLINE_SPIN_TRYLOCK_BH 93 select ARCH_INLINE_SPIN_TRYLOCK_BH
diff --git a/arch/s390/include/asm/bitops.h b/arch/s390/include/asm/bitops.h
index e5beb490959b..a6ff5a83e227 100644
--- a/arch/s390/include/asm/bitops.h
+++ b/arch/s390/include/asm/bitops.h
@@ -13,8 +13,6 @@
13 * 13 *
14 */ 14 */
15 15
16#ifdef __KERNEL__
17
18#ifndef _LINUX_BITOPS_H 16#ifndef _LINUX_BITOPS_H
19#error only <linux/bitops.h> can be included directly 17#error only <linux/bitops.h> can be included directly
20#endif 18#endif
@@ -63,7 +61,7 @@ extern const char _ni_bitmap[];
63extern const char _zb_findmap[]; 61extern const char _zb_findmap[];
64extern const char _sb_findmap[]; 62extern const char _sb_findmap[];
65 63
66#ifndef __s390x__ 64#ifndef CONFIG_64BIT
67 65
68#define __BITOPS_ALIGN 3 66#define __BITOPS_ALIGN 3
69#define __BITOPS_WORDSIZE 32 67#define __BITOPS_WORDSIZE 32
@@ -83,7 +81,7 @@ extern const char _sb_findmap[];
83 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 81 : "d" (__val), "Q" (*(unsigned long *) __addr) \
84 : "cc"); 82 : "cc");
85 83
86#else /* __s390x__ */ 84#else /* CONFIG_64BIT */
87 85
88#define __BITOPS_ALIGN 7 86#define __BITOPS_ALIGN 7
89#define __BITOPS_WORDSIZE 64 87#define __BITOPS_WORDSIZE 64
@@ -103,7 +101,7 @@ extern const char _sb_findmap[];
103 : "d" (__val), "Q" (*(unsigned long *) __addr) \ 101 : "d" (__val), "Q" (*(unsigned long *) __addr) \
104 : "cc"); 102 : "cc");
105 103
106#endif /* __s390x__ */ 104#endif /* CONFIG_64BIT */
107 105
108#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE) 106#define __BITOPS_WORDS(bits) (((bits)+__BITOPS_WORDSIZE-1)/__BITOPS_WORDSIZE)
109#define __BITOPS_BARRIER() asm volatile("" : : : "memory") 107#define __BITOPS_BARRIER() asm volatile("" : : : "memory")
@@ -412,7 +410,7 @@ static inline unsigned long __ffz_word_loop(const unsigned long *addr,
412 unsigned long bytes = 0; 410 unsigned long bytes = 0;
413 411
414 asm volatile( 412 asm volatile(
415#ifndef __s390x__ 413#ifndef CONFIG_64BIT
416 " ahi %1,-1\n" 414 " ahi %1,-1\n"
417 " sra %1,5\n" 415 " sra %1,5\n"
418 " jz 1f\n" 416 " jz 1f\n"
@@ -449,7 +447,7 @@ static inline unsigned long __ffs_word_loop(const unsigned long *addr,
449 unsigned long bytes = 0; 447 unsigned long bytes = 0;
450 448
451 asm volatile( 449 asm volatile(
452#ifndef __s390x__ 450#ifndef CONFIG_64BIT
453 " ahi %1,-1\n" 451 " ahi %1,-1\n"
454 " sra %1,5\n" 452 " sra %1,5\n"
455 " jz 1f\n" 453 " jz 1f\n"
@@ -481,7 +479,7 @@ static inline unsigned long __ffs_word_loop(const unsigned long *addr,
481 */ 479 */
482static inline unsigned long __ffz_word(unsigned long nr, unsigned long word) 480static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
483{ 481{
484#ifdef __s390x__ 482#ifdef CONFIG_64BIT
485 if ((word & 0xffffffff) == 0xffffffff) { 483 if ((word & 0xffffffff) == 0xffffffff) {
486 word >>= 32; 484 word >>= 32;
487 nr += 32; 485 nr += 32;
@@ -505,7 +503,7 @@ static inline unsigned long __ffz_word(unsigned long nr, unsigned long word)
505 */ 503 */
506static inline unsigned long __ffs_word(unsigned long nr, unsigned long word) 504static inline unsigned long __ffs_word(unsigned long nr, unsigned long word)
507{ 505{
508#ifdef __s390x__ 506#ifdef CONFIG_64BIT
509 if ((word & 0xffffffff) == 0) { 507 if ((word & 0xffffffff) == 0) {
510 word >>= 32; 508 word >>= 32;
511 nr += 32; 509 nr += 32;
@@ -546,7 +544,7 @@ static inline unsigned long __load_ulong_le(const unsigned long *p,
546 unsigned long word; 544 unsigned long word;
547 545
548 p = (unsigned long *)((unsigned long) p + offset); 546 p = (unsigned long *)((unsigned long) p + offset);
549#ifndef __s390x__ 547#ifndef CONFIG_64BIT
550 asm volatile( 548 asm volatile(
551 " ic %0,%O1(%R1)\n" 549 " ic %0,%O1(%R1)\n"
552 " icm %0,2,%O1+1(%R1)\n" 550 " icm %0,2,%O1+1(%R1)\n"
@@ -834,7 +832,4 @@ static inline int find_next_bit_le(void *vaddr, unsigned long size,
834 832
835#include <asm-generic/bitops/ext2-atomic-setbit.h> 833#include <asm-generic/bitops/ext2-atomic-setbit.h>
836 834
837
838#endif /* __KERNEL__ */
839
840#endif /* _S390_BITOPS_H */ 835#endif /* _S390_BITOPS_H */
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index fc50a3342da3..4c8d4d5b8bd2 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -10,8 +10,6 @@
10#include <linux/spinlock.h> 10#include <linux/spinlock.h>
11#include <asm/types.h> 11#include <asm/types.h>
12 12
13#ifdef __KERNEL__
14
15#define LPM_ANYPATH 0xff 13#define LPM_ANYPATH 0xff
16#define __MAX_CSSID 0 14#define __MAX_CSSID 0
17 15
@@ -291,5 +289,3 @@ int chsc_sstpc(void *page, unsigned int op, u16 ctrl);
291int chsc_sstpi(void *page, void *result, size_t size); 289int chsc_sstpi(void *page, void *result, size_t size);
292 290
293#endif 291#endif
294
295#endif
diff --git a/arch/s390/include/asm/cmpxchg.h b/arch/s390/include/asm/cmpxchg.h
index 81d7908416cf..8d798e962b63 100644
--- a/arch/s390/include/asm/cmpxchg.h
+++ b/arch/s390/include/asm/cmpxchg.h
@@ -29,7 +29,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
29 " cs %0,0,%4\n" 29 " cs %0,0,%4\n"
30 " jl 0b\n" 30 " jl 0b\n"
31 : "=&d" (old), "=Q" (*(int *) addr) 31 : "=&d" (old), "=Q" (*(int *) addr)
32 : "d" (x << shift), "d" (~(255 << shift)), 32 : "d" ((x & 0xff) << shift), "d" (~(0xff << shift)),
33 "Q" (*(int *) addr) : "memory", "cc", "0"); 33 "Q" (*(int *) addr) : "memory", "cc", "0");
34 return old >> shift; 34 return old >> shift;
35 case 2: 35 case 2:
@@ -44,7 +44,7 @@ static inline unsigned long __xchg(unsigned long x, void *ptr, int size)
44 " cs %0,0,%4\n" 44 " cs %0,0,%4\n"
45 " jl 0b\n" 45 " jl 0b\n"
46 : "=&d" (old), "=Q" (*(int *) addr) 46 : "=&d" (old), "=Q" (*(int *) addr)
47 : "d" (x << shift), "d" (~(65535 << shift)), 47 : "d" ((x & 0xffff) << shift), "d" (~(0xffff << shift)),
48 "Q" (*(int *) addr) : "memory", "cc", "0"); 48 "Q" (*(int *) addr) : "memory", "cc", "0");
49 return old >> shift; 49 return old >> shift;
50 case 4: 50 case 4:
@@ -113,9 +113,10 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
113 " nr %1,%5\n" 113 " nr %1,%5\n"
114 " jnz 0b\n" 114 " jnz 0b\n"
115 "1:" 115 "1:"
116 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) 116 : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
117 : "d" (old << shift), "d" (new << shift), 117 : "d" ((old & 0xff) << shift),
118 "d" (~(255 << shift)), "Q" (*(int *) ptr) 118 "d" ((new & 0xff) << shift),
119 "d" (~(0xff << shift))
119 : "memory", "cc"); 120 : "memory", "cc");
120 return prev >> shift; 121 return prev >> shift;
121 case 2: 122 case 2:
@@ -134,9 +135,10 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
134 " nr %1,%5\n" 135 " nr %1,%5\n"
135 " jnz 0b\n" 136 " jnz 0b\n"
136 "1:" 137 "1:"
137 : "=&d" (prev), "=&d" (tmp), "=Q" (*(int *) ptr) 138 : "=&d" (prev), "=&d" (tmp), "+Q" (*(int *) addr)
138 : "d" (old << shift), "d" (new << shift), 139 : "d" ((old & 0xffff) << shift),
139 "d" (~(65535 << shift)), "Q" (*(int *) ptr) 140 "d" ((new & 0xffff) << shift),
141 "d" (~(0xffff << shift))
140 : "memory", "cc"); 142 : "memory", "cc");
141 return prev >> shift; 143 return prev >> shift;
142 case 4: 144 case 4:
@@ -160,9 +162,14 @@ static inline unsigned long __cmpxchg(void *ptr, unsigned long old,
160 return old; 162 return old;
161} 163}
162 164
163#define cmpxchg(ptr, o, n) \ 165#define cmpxchg(ptr, o, n) \
164 ((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \ 166({ \
165 (unsigned long)(n), sizeof(*(ptr)))) 167 __typeof__(*(ptr)) __ret; \
168 __ret = (__typeof__(*(ptr))) \
169 __cmpxchg((ptr), (unsigned long)(o), (unsigned long)(n), \
170 sizeof(*(ptr))); \
171 __ret; \
172})
166 173
167#ifdef CONFIG_64BIT 174#ifdef CONFIG_64BIT
168#define cmpxchg64(ptr, o, n) \ 175#define cmpxchg64(ptr, o, n) \
@@ -181,13 +188,19 @@ static inline unsigned long long __cmpxchg64(void *ptr,
181 " cds %0,%2,%1" 188 " cds %0,%2,%1"
182 : "+&d" (rp_old), "=Q" (ptr) 189 : "+&d" (rp_old), "=Q" (ptr)
183 : "d" (rp_new), "Q" (ptr) 190 : "d" (rp_new), "Q" (ptr)
184 : "cc"); 191 : "memory", "cc");
185 return rp_old.pair; 192 return rp_old.pair;
186} 193}
187#define cmpxchg64(ptr, o, n) \ 194
188 ((__typeof__(*(ptr)))__cmpxchg64((ptr), \ 195#define cmpxchg64(ptr, o, n) \
189 (unsigned long long)(o), \ 196({ \
190 (unsigned long long)(n))) 197 __typeof__(*(ptr)) __ret; \
198 __ret = (__typeof__(*(ptr))) \
199 __cmpxchg64((ptr), \
200 (unsigned long long)(o), \
201 (unsigned long long)(n)); \
202 __ret; \
203})
191#endif /* CONFIG_64BIT */ 204#endif /* CONFIG_64BIT */
192 205
193#include <asm-generic/cmpxchg-local.h> 206#include <asm-generic/cmpxchg-local.h>
@@ -216,8 +229,13 @@ static inline unsigned long __cmpxchg_local(void *ptr,
216 * them available. 229 * them available.
217 */ 230 */
218#define cmpxchg_local(ptr, o, n) \ 231#define cmpxchg_local(ptr, o, n) \
219 ((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \ 232({ \
220 (unsigned long)(n), sizeof(*(ptr)))) 233 __typeof__(*(ptr)) __ret; \
234 __ret = (__typeof__(*(ptr))) \
235 __cmpxchg_local((ptr), (unsigned long)(o), \
236 (unsigned long)(n), sizeof(*(ptr))); \
237 __ret; \
238})
221 239
222#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n)) 240#define cmpxchg64_local(ptr, o, n) cmpxchg64((ptr), (o), (n))
223 241
diff --git a/arch/s390/include/asm/cputime.h b/arch/s390/include/asm/cputime.h
index 24ef186a1c4f..718374de9c7f 100644
--- a/arch/s390/include/asm/cputime.h
+++ b/arch/s390/include/asm/cputime.h
@@ -21,15 +21,15 @@ typedef unsigned long long __nocast cputime64_t;
21 21
22static inline unsigned long __div(unsigned long long n, unsigned long base) 22static inline unsigned long __div(unsigned long long n, unsigned long base)
23{ 23{
24#ifndef __s390x__ 24#ifndef CONFIG_64BIT
25 register_pair rp; 25 register_pair rp;
26 26
27 rp.pair = n >> 1; 27 rp.pair = n >> 1;
28 asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1)); 28 asm ("dr %0,%1" : "+d" (rp) : "d" (base >> 1));
29 return rp.subreg.odd; 29 return rp.subreg.odd;
30#else /* __s390x__ */ 30#else /* CONFIG_64BIT */
31 return n / base; 31 return n / base;
32#endif /* __s390x__ */ 32#endif /* CONFIG_64BIT */
33} 33}
34 34
35#define cputime_one_jiffy jiffies_to_cputime(1) 35#define cputime_one_jiffy jiffies_to_cputime(1)
@@ -100,7 +100,7 @@ static inline void cputime_to_timespec(const cputime_t cputime,
100 struct timespec *value) 100 struct timespec *value)
101{ 101{
102 unsigned long long __cputime = (__force unsigned long long) cputime; 102 unsigned long long __cputime = (__force unsigned long long) cputime;
103#ifndef __s390x__ 103#ifndef CONFIG_64BIT
104 register_pair rp; 104 register_pair rp;
105 105
106 rp.pair = __cputime >> 1; 106 rp.pair = __cputime >> 1;
@@ -128,7 +128,7 @@ static inline void cputime_to_timeval(const cputime_t cputime,
128 struct timeval *value) 128 struct timeval *value)
129{ 129{
130 unsigned long long __cputime = (__force unsigned long long) cputime; 130 unsigned long long __cputime = (__force unsigned long long) cputime;
131#ifndef __s390x__ 131#ifndef CONFIG_64BIT
132 register_pair rp; 132 register_pair rp;
133 133
134 rp.pair = __cputime >> 1; 134 rp.pair = __cputime >> 1;
diff --git a/arch/s390/include/asm/ctl_reg.h b/arch/s390/include/asm/ctl_reg.h
index ecde9417d669..debfda33d1f8 100644
--- a/arch/s390/include/asm/ctl_reg.h
+++ b/arch/s390/include/asm/ctl_reg.h
@@ -7,7 +7,7 @@
7#ifndef __ASM_CTL_REG_H 7#ifndef __ASM_CTL_REG_H
8#define __ASM_CTL_REG_H 8#define __ASM_CTL_REG_H
9 9
10#ifdef __s390x__ 10#ifdef CONFIG_64BIT
11 11
12#define __ctl_load(array, low, high) ({ \ 12#define __ctl_load(array, low, high) ({ \
13 typedef struct { char _[sizeof(array)]; } addrtype; \ 13 typedef struct { char _[sizeof(array)]; } addrtype; \
@@ -25,7 +25,7 @@
25 : "i" (low), "i" (high)); \ 25 : "i" (low), "i" (high)); \
26 }) 26 })
27 27
28#else /* __s390x__ */ 28#else /* CONFIG_64BIT */
29 29
30#define __ctl_load(array, low, high) ({ \ 30#define __ctl_load(array, low, high) ({ \
31 typedef struct { char _[sizeof(array)]; } addrtype; \ 31 typedef struct { char _[sizeof(array)]; } addrtype; \
@@ -43,7 +43,7 @@
43 : "i" (low), "i" (high)); \ 43 : "i" (low), "i" (high)); \
44 }) 44 })
45 45
46#endif /* __s390x__ */ 46#endif /* CONFIG_64BIT */
47 47
48#define __ctl_set_bit(cr, bit) ({ \ 48#define __ctl_set_bit(cr, bit) ({ \
49 unsigned long __dummy; \ 49 unsigned long __dummy; \
diff --git a/arch/s390/include/asm/current.h b/arch/s390/include/asm/current.h
index 83cf36cde2da..7a68084ec2f0 100644
--- a/arch/s390/include/asm/current.h
+++ b/arch/s390/include/asm/current.h
@@ -11,13 +11,10 @@
11#ifndef _S390_CURRENT_H 11#ifndef _S390_CURRENT_H
12#define _S390_CURRENT_H 12#define _S390_CURRENT_H
13 13
14#ifdef __KERNEL__
15#include <asm/lowcore.h> 14#include <asm/lowcore.h>
16 15
17struct task_struct; 16struct task_struct;
18 17
19#define current ((struct task_struct *const)S390_lowcore.current_task) 18#define current ((struct task_struct *const)S390_lowcore.current_task)
20 19
21#endif
22
23#endif /* !(_S390_CURRENT_H) */ 20#endif /* !(_S390_CURRENT_H) */
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index c4ee39f7a4d6..06151e6a3098 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -107,11 +107,11 @@
107/* 107/*
108 * These are used to set parameters in the core dumps. 108 * These are used to set parameters in the core dumps.
109 */ 109 */
110#ifndef __s390x__ 110#ifndef CONFIG_64BIT
111#define ELF_CLASS ELFCLASS32 111#define ELF_CLASS ELFCLASS32
112#else /* __s390x__ */ 112#else /* CONFIG_64BIT */
113#define ELF_CLASS ELFCLASS64 113#define ELF_CLASS ELFCLASS64
114#endif /* __s390x__ */ 114#endif /* CONFIG_64BIT */
115#define ELF_DATA ELFDATA2MSB 115#define ELF_DATA ELFDATA2MSB
116#define ELF_ARCH EM_S390 116#define ELF_ARCH EM_S390
117 117
@@ -181,9 +181,9 @@ extern unsigned long elf_hwcap;
181extern char elf_platform[]; 181extern char elf_platform[];
182#define ELF_PLATFORM (elf_platform) 182#define ELF_PLATFORM (elf_platform)
183 183
184#ifndef __s390x__ 184#ifndef CONFIG_64BIT
185#define SET_PERSONALITY(ex) set_personality(PER_LINUX) 185#define SET_PERSONALITY(ex) set_personality(PER_LINUX)
186#else /* __s390x__ */ 186#else /* CONFIG_64BIT */
187#define SET_PERSONALITY(ex) \ 187#define SET_PERSONALITY(ex) \
188do { \ 188do { \
189 if (personality(current->personality) != PER_LINUX32) \ 189 if (personality(current->personality) != PER_LINUX32) \
@@ -194,7 +194,7 @@ do { \
194 else \ 194 else \
195 clear_thread_flag(TIF_31BIT); \ 195 clear_thread_flag(TIF_31BIT); \
196} while (0) 196} while (0)
197#endif /* __s390x__ */ 197#endif /* CONFIG_64BIT */
198 198
199#define STACK_RND_MASK 0x7ffUL 199#define STACK_RND_MASK 0x7ffUL
200 200
diff --git a/arch/s390/include/asm/futex.h b/arch/s390/include/asm/futex.h
index 81cf36b691f1..96bc83ea5c90 100644
--- a/arch/s390/include/asm/futex.h
+++ b/arch/s390/include/asm/futex.h
@@ -1,8 +1,6 @@
1#ifndef _ASM_S390_FUTEX_H 1#ifndef _ASM_S390_FUTEX_H
2#define _ASM_S390_FUTEX_H 2#define _ASM_S390_FUTEX_H
3 3
4#ifdef __KERNEL__
5
6#include <linux/futex.h> 4#include <linux/futex.h>
7#include <linux/uaccess.h> 5#include <linux/uaccess.h>
8#include <asm/errno.h> 6#include <asm/errno.h>
@@ -48,5 +46,4 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
48 return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval); 46 return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval);
49} 47}
50 48
51#endif /* __KERNEL__ */
52#endif /* _ASM_S390_FUTEX_H */ 49#endif /* _ASM_S390_FUTEX_H */
diff --git a/arch/s390/include/asm/idals.h b/arch/s390/include/asm/idals.h
index aae276d00383..aef0dde340d1 100644
--- a/arch/s390/include/asm/idals.h
+++ b/arch/s390/include/asm/idals.h
@@ -20,7 +20,7 @@
20#include <asm/cio.h> 20#include <asm/cio.h>
21#include <asm/uaccess.h> 21#include <asm/uaccess.h>
22 22
23#ifdef __s390x__ 23#ifdef CONFIG_64BIT
24#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */ 24#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
25#else 25#else
26#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */ 26#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
@@ -33,7 +33,7 @@
33static inline int 33static inline int
34idal_is_needed(void *vaddr, unsigned int length) 34idal_is_needed(void *vaddr, unsigned int length)
35{ 35{
36#ifdef __s390x__ 36#ifdef CONFIG_64BIT
37 return ((__pa(vaddr) + length - 1) >> 31) != 0; 37 return ((__pa(vaddr) + length - 1) >> 31) != 0;
38#else 38#else
39 return 0; 39 return 0;
@@ -78,7 +78,7 @@ static inline unsigned long *idal_create_words(unsigned long *idaws,
78static inline int 78static inline int
79set_normalized_cda(struct ccw1 * ccw, void *vaddr) 79set_normalized_cda(struct ccw1 * ccw, void *vaddr)
80{ 80{
81#ifdef __s390x__ 81#ifdef CONFIG_64BIT
82 unsigned int nridaws; 82 unsigned int nridaws;
83 unsigned long *idal; 83 unsigned long *idal;
84 84
@@ -105,7 +105,7 @@ set_normalized_cda(struct ccw1 * ccw, void *vaddr)
105static inline void 105static inline void
106clear_normalized_cda(struct ccw1 * ccw) 106clear_normalized_cda(struct ccw1 * ccw)
107{ 107{
108#ifdef __s390x__ 108#ifdef CONFIG_64BIT
109 if (ccw->flags & CCW_FLAG_IDA) { 109 if (ccw->flags & CCW_FLAG_IDA) {
110 kfree((void *)(unsigned long) ccw->cda); 110 kfree((void *)(unsigned long) ccw->cda);
111 ccw->flags &= ~CCW_FLAG_IDA; 111 ccw->flags &= ~CCW_FLAG_IDA;
@@ -182,7 +182,7 @@ idal_buffer_free(struct idal_buffer *ib)
182static inline int 182static inline int
183__idal_buffer_is_needed(struct idal_buffer *ib) 183__idal_buffer_is_needed(struct idal_buffer *ib)
184{ 184{
185#ifdef __s390x__ 185#ifdef CONFIG_64BIT
186 return ib->size > (4096ul << ib->page_order) || 186 return ib->size > (4096ul << ib->page_order) ||
187 idal_is_needed(ib->data[0], ib->size); 187 idal_is_needed(ib->data[0], ib->size);
188#else 188#else
diff --git a/arch/s390/include/asm/io.h b/arch/s390/include/asm/io.h
index 27216d317991..f81a0975cbea 100644
--- a/arch/s390/include/asm/io.h
+++ b/arch/s390/include/asm/io.h
@@ -11,8 +11,6 @@
11#ifndef _S390_IO_H 11#ifndef _S390_IO_H
12#define _S390_IO_H 12#define _S390_IO_H
13 13
14#ifdef __KERNEL__
15
16#include <asm/page.h> 14#include <asm/page.h>
17 15
18#define IO_SPACE_LIMIT 0xffffffff 16#define IO_SPACE_LIMIT 0xffffffff
@@ -46,6 +44,4 @@ void unxlate_dev_mem_ptr(unsigned long phys, void *addr);
46 */ 44 */
47#define xlate_dev_kmem_ptr(p) p 45#define xlate_dev_kmem_ptr(p) p
48 46
49#endif /* __KERNEL__ */
50
51#endif 47#endif
diff --git a/arch/s390/include/asm/irq.h b/arch/s390/include/asm/irq.h
index 5289cacd4861..2b9d41899d21 100644
--- a/arch/s390/include/asm/irq.h
+++ b/arch/s390/include/asm/irq.h
@@ -17,7 +17,8 @@ enum interruption_class {
17 EXTINT_VRT, 17 EXTINT_VRT,
18 EXTINT_SCP, 18 EXTINT_SCP,
19 EXTINT_IUC, 19 EXTINT_IUC,
20 EXTINT_CPM, 20 EXTINT_CMS,
21 EXTINT_CMC,
21 IOINT_CIO, 22 IOINT_CIO,
22 IOINT_QAI, 23 IOINT_QAI,
23 IOINT_DAS, 24 IOINT_DAS,
diff --git a/arch/s390/include/asm/kexec.h b/arch/s390/include/asm/kexec.h
index 3f30dac804ea..f4f38826eebb 100644
--- a/arch/s390/include/asm/kexec.h
+++ b/arch/s390/include/asm/kexec.h
@@ -10,10 +10,8 @@
10#ifndef _S390_KEXEC_H 10#ifndef _S390_KEXEC_H
11#define _S390_KEXEC_H 11#define _S390_KEXEC_H
12 12
13#ifdef __KERNEL__
14#include <asm/page.h>
15#endif
16#include <asm/processor.h> 13#include <asm/processor.h>
14#include <asm/page.h>
17/* 15/*
18 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return. 16 * KEXEC_SOURCE_MEMORY_LIMIT maximum page get_free_page can return.
19 * I.e. Maximum page that is mapped directly into kernel memory, 17 * I.e. Maximum page that is mapped directly into kernel memory,
diff --git a/arch/s390/include/asm/kmap_types.h b/arch/s390/include/asm/kmap_types.h
index 94ec3ee07983..0a88622339ee 100644
--- a/arch/s390/include/asm/kmap_types.h
+++ b/arch/s390/include/asm/kmap_types.h
@@ -1,8 +1,6 @@
1#ifdef __KERNEL__
2#ifndef _ASM_KMAP_TYPES_H 1#ifndef _ASM_KMAP_TYPES_H
3#define _ASM_KMAP_TYPES_H 2#define _ASM_KMAP_TYPES_H
4 3
5#include <asm-generic/kmap_types.h> 4#include <asm-generic/kmap_types.h>
6 5
7#endif 6#endif
8#endif /* __KERNEL__ */
diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
index 5d09e405c54d..69bdf72e95ec 100644
--- a/arch/s390/include/asm/mmu_context.h
+++ b/arch/s390/include/asm/mmu_context.h
@@ -49,7 +49,7 @@ static inline int init_new_context(struct task_struct *tsk,
49 49
50#define destroy_context(mm) do { } while (0) 50#define destroy_context(mm) do { } while (0)
51 51
52#ifndef __s390x__ 52#ifndef CONFIG_64BIT
53#define LCTL_OPCODE "lctl" 53#define LCTL_OPCODE "lctl"
54#else 54#else
55#define LCTL_OPCODE "lctlg" 55#define LCTL_OPCODE "lctlg"
diff --git a/arch/s390/include/asm/module.h b/arch/s390/include/asm/module.h
index 1cc1c5af705a..f0b6b26b6e59 100644
--- a/arch/s390/include/asm/module.h
+++ b/arch/s390/include/asm/module.h
@@ -28,7 +28,7 @@ struct mod_arch_specific
28 struct mod_arch_syminfo *syminfo; 28 struct mod_arch_syminfo *syminfo;
29}; 29};
30 30
31#ifdef __s390x__ 31#ifdef CONFIG_64BIT
32#define ElfW(x) Elf64_ ## x 32#define ElfW(x) Elf64_ ## x
33#define ELFW(x) ELF64_ ## x 33#define ELFW(x) ELF64_ ## x
34#else 34#else
diff --git a/arch/s390/include/asm/os_info.h b/arch/s390/include/asm/os_info.h
index d07518af09ea..295f2c4f1c96 100644
--- a/arch/s390/include/asm/os_info.h
+++ b/arch/s390/include/asm/os_info.h
@@ -13,7 +13,6 @@
13 13
14#define OS_INFO_VMCOREINFO 0 14#define OS_INFO_VMCOREINFO 0
15#define OS_INFO_REIPL_BLOCK 1 15#define OS_INFO_REIPL_BLOCK 1
16#define OS_INFO_INIT_FN 2
17 16
18struct os_info_entry { 17struct os_info_entry {
19 u64 addr; 18 u64 addr;
@@ -28,8 +27,8 @@ struct os_info {
28 u16 version_minor; 27 u16 version_minor;
29 u64 crashkernel_addr; 28 u64 crashkernel_addr;
30 u64 crashkernel_size; 29 u64 crashkernel_size;
31 struct os_info_entry entry[3]; 30 struct os_info_entry entry[2];
32 u8 reserved[4004]; 31 u8 reserved[4024];
33} __packed; 32} __packed;
34 33
35void os_info_init(void); 34void os_info_init(void);
diff --git a/arch/s390/include/asm/percpu.h b/arch/s390/include/asm/percpu.h
index 0fbd1899c7b0..6537e72e0853 100644
--- a/arch/s390/include/asm/percpu.h
+++ b/arch/s390/include/asm/percpu.h
@@ -15,7 +15,7 @@
15 * per cpu area, use weak definitions to force the compiler to 15 * per cpu area, use weak definitions to force the compiler to
16 * generate external references. 16 * generate external references.
17 */ 17 */
18#if defined(CONFIG_SMP) && defined(__s390x__) && defined(MODULE) 18#if defined(CONFIG_SMP) && defined(CONFIG_64BIT) && defined(MODULE)
19#define ARCH_NEEDS_WEAK_PER_CPU 19#define ARCH_NEEDS_WEAK_PER_CPU
20#endif 20#endif
21 21
diff --git a/arch/s390/include/asm/pgalloc.h b/arch/s390/include/asm/pgalloc.h
index 78e3041919de..43078c194394 100644
--- a/arch/s390/include/asm/pgalloc.h
+++ b/arch/s390/include/asm/pgalloc.h
@@ -48,7 +48,7 @@ static inline void crst_table_init(unsigned long *crst, unsigned long entry)
48 clear_table(crst, entry, sizeof(unsigned long)*2048); 48 clear_table(crst, entry, sizeof(unsigned long)*2048);
49} 49}
50 50
51#ifndef __s390x__ 51#ifndef CONFIG_64BIT
52 52
53static inline unsigned long pgd_entry_type(struct mm_struct *mm) 53static inline unsigned long pgd_entry_type(struct mm_struct *mm)
54{ 54{
@@ -64,7 +64,7 @@ static inline unsigned long pgd_entry_type(struct mm_struct *mm)
64#define pgd_populate(mm, pgd, pud) BUG() 64#define pgd_populate(mm, pgd, pud) BUG()
65#define pud_populate(mm, pud, pmd) BUG() 65#define pud_populate(mm, pud, pmd) BUG()
66 66
67#else /* __s390x__ */ 67#else /* CONFIG_64BIT */
68 68
69static inline unsigned long pgd_entry_type(struct mm_struct *mm) 69static inline unsigned long pgd_entry_type(struct mm_struct *mm)
70{ 70{
@@ -106,7 +106,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
106 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd); 106 pud_val(*pud) = _REGION3_ENTRY | __pa(pmd);
107} 107}
108 108
109#endif /* __s390x__ */ 109#endif /* CONFIG_64BIT */
110 110
111static inline pgd_t *pgd_alloc(struct mm_struct *mm) 111static inline pgd_t *pgd_alloc(struct mm_struct *mm)
112{ 112{
diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h
index 011358c1b18e..b3227415abda 100644
--- a/arch/s390/include/asm/pgtable.h
+++ b/arch/s390/include/asm/pgtable.h
@@ -74,15 +74,15 @@ static inline int is_zero_pfn(unsigned long pfn)
74 * table can map 74 * table can map
75 * PGDIR_SHIFT determines what a third-level page table entry can map 75 * PGDIR_SHIFT determines what a third-level page table entry can map
76 */ 76 */
77#ifndef __s390x__ 77#ifndef CONFIG_64BIT
78# define PMD_SHIFT 20 78# define PMD_SHIFT 20
79# define PUD_SHIFT 20 79# define PUD_SHIFT 20
80# define PGDIR_SHIFT 20 80# define PGDIR_SHIFT 20
81#else /* __s390x__ */ 81#else /* CONFIG_64BIT */
82# define PMD_SHIFT 20 82# define PMD_SHIFT 20
83# define PUD_SHIFT 31 83# define PUD_SHIFT 31
84# define PGDIR_SHIFT 42 84# define PGDIR_SHIFT 42
85#endif /* __s390x__ */ 85#endif /* CONFIG_64BIT */
86 86
87#define PMD_SIZE (1UL << PMD_SHIFT) 87#define PMD_SIZE (1UL << PMD_SHIFT)
88#define PMD_MASK (~(PMD_SIZE-1)) 88#define PMD_MASK (~(PMD_SIZE-1))
@@ -98,13 +98,13 @@ static inline int is_zero_pfn(unsigned long pfn)
98 * that leads to 1024 pte per pgd 98 * that leads to 1024 pte per pgd
99 */ 99 */
100#define PTRS_PER_PTE 256 100#define PTRS_PER_PTE 256
101#ifndef __s390x__ 101#ifndef CONFIG_64BIT
102#define PTRS_PER_PMD 1 102#define PTRS_PER_PMD 1
103#define PTRS_PER_PUD 1 103#define PTRS_PER_PUD 1
104#else /* __s390x__ */ 104#else /* CONFIG_64BIT */
105#define PTRS_PER_PMD 2048 105#define PTRS_PER_PMD 2048
106#define PTRS_PER_PUD 2048 106#define PTRS_PER_PUD 2048
107#endif /* __s390x__ */ 107#endif /* CONFIG_64BIT */
108#define PTRS_PER_PGD 2048 108#define PTRS_PER_PGD 2048
109 109
110#define FIRST_USER_ADDRESS 0 110#define FIRST_USER_ADDRESS 0
@@ -276,7 +276,7 @@ extern struct page *vmemmap;
276 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid. 276 * swap pte is 1011 and 0001, 0011, 0101, 0111 are invalid.
277 */ 277 */
278 278
279#ifndef __s390x__ 279#ifndef CONFIG_64BIT
280 280
281/* Bits in the segment table address-space-control-element */ 281/* Bits in the segment table address-space-control-element */
282#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */ 282#define _ASCE_SPACE_SWITCH 0x80000000UL /* space switch event */
@@ -308,7 +308,7 @@ extern struct page *vmemmap;
308#define KVM_UR_BIT 0x00008000UL 308#define KVM_UR_BIT 0x00008000UL
309#define KVM_UC_BIT 0x00004000UL 309#define KVM_UC_BIT 0x00004000UL
310 310
311#else /* __s390x__ */ 311#else /* CONFIG_64BIT */
312 312
313/* Bits in the segment/region table address-space-control-element */ 313/* Bits in the segment/region table address-space-control-element */
314#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */ 314#define _ASCE_ORIGIN ~0xfffUL/* segment table origin */
@@ -363,7 +363,7 @@ extern struct page *vmemmap;
363#define KVM_UR_BIT 0x0000800000000000UL 363#define KVM_UR_BIT 0x0000800000000000UL
364#define KVM_UC_BIT 0x0000400000000000UL 364#define KVM_UC_BIT 0x0000400000000000UL
365 365
366#endif /* __s390x__ */ 366#endif /* CONFIG_64BIT */
367 367
368/* 368/*
369 * A user page table pointer has the space-switch-event bit, the 369 * A user page table pointer has the space-switch-event bit, the
@@ -424,7 +424,7 @@ static inline int mm_has_pgste(struct mm_struct *mm)
424/* 424/*
425 * pgd/pmd/pte query functions 425 * pgd/pmd/pte query functions
426 */ 426 */
427#ifndef __s390x__ 427#ifndef CONFIG_64BIT
428 428
429static inline int pgd_present(pgd_t pgd) { return 1; } 429static inline int pgd_present(pgd_t pgd) { return 1; }
430static inline int pgd_none(pgd_t pgd) { return 0; } 430static inline int pgd_none(pgd_t pgd) { return 0; }
@@ -434,7 +434,7 @@ static inline int pud_present(pud_t pud) { return 1; }
434static inline int pud_none(pud_t pud) { return 0; } 434static inline int pud_none(pud_t pud) { return 0; }
435static inline int pud_bad(pud_t pud) { return 0; } 435static inline int pud_bad(pud_t pud) { return 0; }
436 436
437#else /* __s390x__ */ 437#else /* CONFIG_64BIT */
438 438
439static inline int pgd_present(pgd_t pgd) 439static inline int pgd_present(pgd_t pgd)
440{ 440{
@@ -490,7 +490,7 @@ static inline int pud_bad(pud_t pud)
490 return (pud_val(pud) & mask) != 0; 490 return (pud_val(pud) & mask) != 0;
491} 491}
492 492
493#endif /* __s390x__ */ 493#endif /* CONFIG_64BIT */
494 494
495static inline int pmd_present(pmd_t pmd) 495static inline int pmd_present(pmd_t pmd)
496{ 496{
@@ -741,7 +741,7 @@ static inline int pte_young(pte_t pte)
741 741
742static inline void pgd_clear(pgd_t *pgd) 742static inline void pgd_clear(pgd_t *pgd)
743{ 743{
744#ifdef __s390x__ 744#ifdef CONFIG_64BIT
745 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2) 745 if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R2)
746 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY; 746 pgd_val(*pgd) = _REGION2_ENTRY_EMPTY;
747#endif 747#endif
@@ -749,7 +749,7 @@ static inline void pgd_clear(pgd_t *pgd)
749 749
750static inline void pud_clear(pud_t *pud) 750static inline void pud_clear(pud_t *pud)
751{ 751{
752#ifdef __s390x__ 752#ifdef CONFIG_64BIT
753 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3) 753 if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) == _REGION_ENTRY_TYPE_R3)
754 pud_val(*pud) = _REGION3_ENTRY_EMPTY; 754 pud_val(*pud) = _REGION3_ENTRY_EMPTY;
755#endif 755#endif
@@ -921,7 +921,7 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
921static inline void __ptep_ipte(unsigned long address, pte_t *ptep) 921static inline void __ptep_ipte(unsigned long address, pte_t *ptep)
922{ 922{
923 if (!(pte_val(*ptep) & _PAGE_INVALID)) { 923 if (!(pte_val(*ptep) & _PAGE_INVALID)) {
924#ifndef __s390x__ 924#ifndef CONFIG_64BIT
925 /* pto must point to the start of the segment table */ 925 /* pto must point to the start of the segment table */
926 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00); 926 pte_t *pto = (pte_t *) (((unsigned long) ptep) & 0x7ffffc00);
927#else 927#else
@@ -1116,7 +1116,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1116#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) 1116#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
1117#define pgd_offset_k(address) pgd_offset(&init_mm, address) 1117#define pgd_offset_k(address) pgd_offset(&init_mm, address)
1118 1118
1119#ifndef __s390x__ 1119#ifndef CONFIG_64BIT
1120 1120
1121#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1121#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1122#define pud_deref(pmd) ({ BUG(); 0UL; }) 1122#define pud_deref(pmd) ({ BUG(); 0UL; })
@@ -1125,7 +1125,7 @@ static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
1125#define pud_offset(pgd, address) ((pud_t *) pgd) 1125#define pud_offset(pgd, address) ((pud_t *) pgd)
1126#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address)) 1126#define pmd_offset(pud, address) ((pmd_t *) pud + pmd_index(address))
1127 1127
1128#else /* __s390x__ */ 1128#else /* CONFIG_64BIT */
1129 1129
1130#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN) 1130#define pmd_deref(pmd) (pmd_val(pmd) & _SEGMENT_ENTRY_ORIGIN)
1131#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN) 1131#define pud_deref(pud) (pud_val(pud) & _REGION_ENTRY_ORIGIN)
@@ -1147,7 +1147,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1147 return pmd + pmd_index(address); 1147 return pmd + pmd_index(address);
1148} 1148}
1149 1149
1150#endif /* __s390x__ */ 1150#endif /* CONFIG_64BIT */
1151 1151
1152#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot)) 1152#define pfn_pte(pfn,pgprot) mk_pte_phys(__pa((pfn) << PAGE_SHIFT),(pgprot))
1153#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT) 1153#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
@@ -1196,7 +1196,7 @@ static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
1196 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66 1196 * 0000000000111111111122222222223333333333444444444455 5555 5 55566 66
1197 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23 1197 * 0123456789012345678901234567890123456789012345678901 2345 6 78901 23
1198 */ 1198 */
1199#ifndef __s390x__ 1199#ifndef CONFIG_64BIT
1200#define __SWP_OFFSET_MASK (~0UL >> 12) 1200#define __SWP_OFFSET_MASK (~0UL >> 12)
1201#else 1201#else
1202#define __SWP_OFFSET_MASK (~0UL >> 11) 1202#define __SWP_OFFSET_MASK (~0UL >> 11)
@@ -1217,11 +1217,11 @@ static inline pte_t mk_swap_pte(unsigned long type, unsigned long offset)
1217#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) 1217#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
1218#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) 1218#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
1219 1219
1220#ifndef __s390x__ 1220#ifndef CONFIG_64BIT
1221# define PTE_FILE_MAX_BITS 26 1221# define PTE_FILE_MAX_BITS 26
1222#else /* __s390x__ */ 1222#else /* CONFIG_64BIT */
1223# define PTE_FILE_MAX_BITS 59 1223# define PTE_FILE_MAX_BITS 59
1224#endif /* __s390x__ */ 1224#endif /* CONFIG_64BIT */
1225 1225
1226#define pte_to_pgoff(__pte) \ 1226#define pte_to_pgoff(__pte) \
1227 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f)) 1227 ((((__pte).pte >> 12) << 7) + (((__pte).pte >> 1) & 0x7f))
diff --git a/arch/s390/include/asm/posix_types.h b/arch/s390/include/asm/posix_types.h
index edf8527ff08d..7be104c0f192 100644
--- a/arch/s390/include/asm/posix_types.h
+++ b/arch/s390/include/asm/posix_types.h
@@ -24,7 +24,6 @@ typedef unsigned short __kernel_old_dev_t;
24 24
25typedef unsigned long __kernel_ino_t; 25typedef unsigned long __kernel_ino_t;
26typedef unsigned short __kernel_mode_t; 26typedef unsigned short __kernel_mode_t;
27typedef unsigned short __kernel_nlink_t;
28typedef unsigned short __kernel_ipc_pid_t; 27typedef unsigned short __kernel_ipc_pid_t;
29typedef unsigned short __kernel_uid_t; 28typedef unsigned short __kernel_uid_t;
30typedef unsigned short __kernel_gid_t; 29typedef unsigned short __kernel_gid_t;
@@ -35,7 +34,6 @@ typedef int __kernel_ptrdiff_t;
35 34
36typedef unsigned int __kernel_ino_t; 35typedef unsigned int __kernel_ino_t;
37typedef unsigned int __kernel_mode_t; 36typedef unsigned int __kernel_mode_t;
38typedef unsigned int __kernel_nlink_t;
39typedef int __kernel_ipc_pid_t; 37typedef int __kernel_ipc_pid_t;
40typedef unsigned int __kernel_uid_t; 38typedef unsigned int __kernel_uid_t;
41typedef unsigned int __kernel_gid_t; 39typedef unsigned int __kernel_gid_t;
@@ -47,7 +45,6 @@ typedef unsigned long __kernel_sigset_t; /* at least 32 bits */
47 45
48#define __kernel_ino_t __kernel_ino_t 46#define __kernel_ino_t __kernel_ino_t
49#define __kernel_mode_t __kernel_mode_t 47#define __kernel_mode_t __kernel_mode_t
50#define __kernel_nlink_t __kernel_nlink_t
51#define __kernel_ipc_pid_t __kernel_ipc_pid_t 48#define __kernel_ipc_pid_t __kernel_ipc_pid_t
52#define __kernel_uid_t __kernel_uid_t 49#define __kernel_uid_t __kernel_uid_t
53#define __kernel_gid_t __kernel_gid_t 50#define __kernel_gid_t __kernel_gid_t
diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
index 6cbf31311673..20d0585cf905 100644
--- a/arch/s390/include/asm/processor.h
+++ b/arch/s390/include/asm/processor.h
@@ -20,7 +20,6 @@
20#include <asm/ptrace.h> 20#include <asm/ptrace.h>
21#include <asm/setup.h> 21#include <asm/setup.h>
22 22
23#ifdef __KERNEL__
24/* 23/*
25 * Default implementation of macro that returns current 24 * Default implementation of macro that returns current
26 * instruction pointer ("program counter"). 25 * instruction pointer ("program counter").
@@ -33,39 +32,33 @@ static inline void get_cpu_id(struct cpuid *ptr)
33} 32}
34 33
35extern void s390_adjust_jiffies(void); 34extern void s390_adjust_jiffies(void);
36extern int get_cpu_capability(unsigned int *);
37extern const struct seq_operations cpuinfo_op; 35extern const struct seq_operations cpuinfo_op;
38extern int sysctl_ieee_emulation_warnings; 36extern int sysctl_ieee_emulation_warnings;
39 37
40/* 38/*
41 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit. 39 * User space process size: 2GB for 31 bit, 4TB or 8PT for 64 bit.
42 */ 40 */
43#ifndef __s390x__ 41#ifndef CONFIG_64BIT
44 42
45#define TASK_SIZE (1UL << 31) 43#define TASK_SIZE (1UL << 31)
46#define TASK_UNMAPPED_BASE (1UL << 30) 44#define TASK_UNMAPPED_BASE (1UL << 30)
47 45
48#else /* __s390x__ */ 46#else /* CONFIG_64BIT */
49 47
50#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit) 48#define TASK_SIZE_OF(tsk) ((tsk)->mm->context.asce_limit)
51#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \ 49#define TASK_UNMAPPED_BASE (test_thread_flag(TIF_31BIT) ? \
52 (1UL << 30) : (1UL << 41)) 50 (1UL << 30) : (1UL << 41))
53#define TASK_SIZE TASK_SIZE_OF(current) 51#define TASK_SIZE TASK_SIZE_OF(current)
54 52
55#endif /* __s390x__ */ 53#endif /* CONFIG_64BIT */
56 54
57#ifdef __KERNEL__ 55#ifndef CONFIG_64BIT
58
59#ifndef __s390x__
60#define STACK_TOP (1UL << 31) 56#define STACK_TOP (1UL << 31)
61#define STACK_TOP_MAX (1UL << 31) 57#define STACK_TOP_MAX (1UL << 31)
62#else /* __s390x__ */ 58#else /* CONFIG_64BIT */
63#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42)) 59#define STACK_TOP (1UL << (test_thread_flag(TIF_31BIT) ? 31:42))
64#define STACK_TOP_MAX (1UL << 42) 60#define STACK_TOP_MAX (1UL << 42)
65#endif /* __s390x__ */ 61#endif /* CONFIG_64BIT */
66
67
68#endif
69 62
70#define HAVE_ARCH_PICK_MMAP_LAYOUT 63#define HAVE_ARCH_PICK_MMAP_LAYOUT
71 64
@@ -182,7 +175,7 @@ static inline void psw_set_key(unsigned int key)
182 */ 175 */
183static inline void __load_psw(psw_t psw) 176static inline void __load_psw(psw_t psw)
184{ 177{
185#ifndef __s390x__ 178#ifndef CONFIG_64BIT
186 asm volatile("lpsw %0" : : "Q" (psw) : "cc"); 179 asm volatile("lpsw %0" : : "Q" (psw) : "cc");
187#else 180#else
188 asm volatile("lpswe %0" : : "Q" (psw) : "cc"); 181 asm volatile("lpswe %0" : : "Q" (psw) : "cc");
@@ -200,7 +193,7 @@ static inline void __load_psw_mask (unsigned long mask)
200 193
201 psw.mask = mask; 194 psw.mask = mask;
202 195
203#ifndef __s390x__ 196#ifndef CONFIG_64BIT
204 asm volatile( 197 asm volatile(
205 " basr %0,0\n" 198 " basr %0,0\n"
206 "0: ahi %0,1f-0b\n" 199 "0: ahi %0,1f-0b\n"
@@ -208,14 +201,14 @@ static inline void __load_psw_mask (unsigned long mask)
208 " lpsw %1\n" 201 " lpsw %1\n"
209 "1:" 202 "1:"
210 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); 203 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
211#else /* __s390x__ */ 204#else /* CONFIG_64BIT */
212 asm volatile( 205 asm volatile(
213 " larl %0,1f\n" 206 " larl %0,1f\n"
214 " stg %0,%O1+8(%R1)\n" 207 " stg %0,%O1+8(%R1)\n"
215 " lpswe %1\n" 208 " lpswe %1\n"
216 "1:" 209 "1:"
217 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc"); 210 : "=&d" (addr), "=Q" (psw) : "Q" (psw) : "memory", "cc");
218#endif /* __s390x__ */ 211#endif /* CONFIG_64BIT */
219} 212}
220 213
221/* 214/*
@@ -223,7 +216,7 @@ static inline void __load_psw_mask (unsigned long mask)
223 */ 216 */
224static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc) 217static inline unsigned long __rewind_psw(psw_t psw, unsigned long ilc)
225{ 218{
226#ifndef __s390x__ 219#ifndef CONFIG_64BIT
227 if (psw.addr & PSW_ADDR_AMODE) 220 if (psw.addr & PSW_ADDR_AMODE)
228 /* 31 bit mode */ 221 /* 31 bit mode */
229 return (psw.addr - ilc) | PSW_ADDR_AMODE; 222 return (psw.addr - ilc) | PSW_ADDR_AMODE;
@@ -253,7 +246,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
253 * Store status and then load disabled wait psw, 246 * Store status and then load disabled wait psw,
254 * the processor is dead afterwards 247 * the processor is dead afterwards
255 */ 248 */
256#ifndef __s390x__ 249#ifndef CONFIG_64BIT
257 asm volatile( 250 asm volatile(
258 " stctl 0,0,0(%2)\n" 251 " stctl 0,0,0(%2)\n"
259 " ni 0(%2),0xef\n" /* switch off protection */ 252 " ni 0(%2),0xef\n" /* switch off protection */
@@ -272,7 +265,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
272 " lpsw 0(%1)" 265 " lpsw 0(%1)"
273 : "=m" (ctl_buf) 266 : "=m" (ctl_buf)
274 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc"); 267 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc");
275#else /* __s390x__ */ 268#else /* CONFIG_64BIT */
276 asm volatile( 269 asm volatile(
277 " stctg 0,0,0(%2)\n" 270 " stctg 0,0,0(%2)\n"
278 " ni 4(%2),0xef\n" /* switch off protection */ 271 " ni 4(%2),0xef\n" /* switch off protection */
@@ -305,7 +298,7 @@ static inline void __noreturn disabled_wait(unsigned long code)
305 " lpswe 0(%1)" 298 " lpswe 0(%1)"
306 : "=m" (ctl_buf) 299 : "=m" (ctl_buf)
307 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1"); 300 : "a" (&dw_psw), "a" (&ctl_buf), "m" (dw_psw) : "cc", "0", "1");
308#endif /* __s390x__ */ 301#endif /* CONFIG_64BIT */
309 while (1); 302 while (1);
310} 303}
311 304
@@ -338,12 +331,10 @@ extern void (*s390_base_ext_handler_fn)(void);
338 331
339#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL 332#define ARCH_LOW_ADDRESS_LIMIT 0x7fffffffUL
340 333
341#endif
342
343/* 334/*
344 * Helper macro for exception table entries 335 * Helper macro for exception table entries
345 */ 336 */
346#ifndef __s390x__ 337#ifndef CONFIG_64BIT
347#define EX_TABLE(_fault,_target) \ 338#define EX_TABLE(_fault,_target) \
348 ".section __ex_table,\"a\"\n" \ 339 ".section __ex_table,\"a\"\n" \
349 " .align 4\n" \ 340 " .align 4\n" \
diff --git a/arch/s390/include/asm/rwsem.h b/arch/s390/include/asm/rwsem.h
index d0eb4653cebd..1ceee10264c3 100644
--- a/arch/s390/include/asm/rwsem.h
+++ b/arch/s390/include/asm/rwsem.h
@@ -41,19 +41,17 @@
41#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead" 41#error "please don't include asm/rwsem.h directly, use linux/rwsem.h instead"
42#endif 42#endif
43 43
44#ifdef __KERNEL__ 44#ifndef CONFIG_64BIT
45
46#ifndef __s390x__
47#define RWSEM_UNLOCKED_VALUE 0x00000000 45#define RWSEM_UNLOCKED_VALUE 0x00000000
48#define RWSEM_ACTIVE_BIAS 0x00000001 46#define RWSEM_ACTIVE_BIAS 0x00000001
49#define RWSEM_ACTIVE_MASK 0x0000ffff 47#define RWSEM_ACTIVE_MASK 0x0000ffff
50#define RWSEM_WAITING_BIAS (-0x00010000) 48#define RWSEM_WAITING_BIAS (-0x00010000)
51#else /* __s390x__ */ 49#else /* CONFIG_64BIT */
52#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L 50#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
53#define RWSEM_ACTIVE_BIAS 0x0000000000000001L 51#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
54#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL 52#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
55#define RWSEM_WAITING_BIAS (-0x0000000100000000L) 53#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
56#endif /* __s390x__ */ 54#endif /* CONFIG_64BIT */
57#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS 55#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
58#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS) 56#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
59 57
@@ -65,19 +63,19 @@ static inline void __down_read(struct rw_semaphore *sem)
65 signed long old, new; 63 signed long old, new;
66 64
67 asm volatile( 65 asm volatile(
68#ifndef __s390x__ 66#ifndef CONFIG_64BIT
69 " l %0,%2\n" 67 " l %0,%2\n"
70 "0: lr %1,%0\n" 68 "0: lr %1,%0\n"
71 " ahi %1,%4\n" 69 " ahi %1,%4\n"
72 " cs %0,%1,%2\n" 70 " cs %0,%1,%2\n"
73 " jl 0b" 71 " jl 0b"
74#else /* __s390x__ */ 72#else /* CONFIG_64BIT */
75 " lg %0,%2\n" 73 " lg %0,%2\n"
76 "0: lgr %1,%0\n" 74 "0: lgr %1,%0\n"
77 " aghi %1,%4\n" 75 " aghi %1,%4\n"
78 " csg %0,%1,%2\n" 76 " csg %0,%1,%2\n"
79 " jl 0b" 77 " jl 0b"
80#endif /* __s390x__ */ 78#endif /* CONFIG_64BIT */
81 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 79 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
82 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) 80 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
83 : "cc", "memory"); 81 : "cc", "memory");
@@ -93,7 +91,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
93 signed long old, new; 91 signed long old, new;
94 92
95 asm volatile( 93 asm volatile(
96#ifndef __s390x__ 94#ifndef CONFIG_64BIT
97 " l %0,%2\n" 95 " l %0,%2\n"
98 "0: ltr %1,%0\n" 96 "0: ltr %1,%0\n"
99 " jm 1f\n" 97 " jm 1f\n"
@@ -101,7 +99,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
101 " cs %0,%1,%2\n" 99 " cs %0,%1,%2\n"
102 " jl 0b\n" 100 " jl 0b\n"
103 "1:" 101 "1:"
104#else /* __s390x__ */ 102#else /* CONFIG_64BIT */
105 " lg %0,%2\n" 103 " lg %0,%2\n"
106 "0: ltgr %1,%0\n" 104 "0: ltgr %1,%0\n"
107 " jm 1f\n" 105 " jm 1f\n"
@@ -109,7 +107,7 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
109 " csg %0,%1,%2\n" 107 " csg %0,%1,%2\n"
110 " jl 0b\n" 108 " jl 0b\n"
111 "1:" 109 "1:"
112#endif /* __s390x__ */ 110#endif /* CONFIG_64BIT */
113 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 111 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
114 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS) 112 : "Q" (sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
115 : "cc", "memory"); 113 : "cc", "memory");
@@ -125,19 +123,19 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
125 123
126 tmp = RWSEM_ACTIVE_WRITE_BIAS; 124 tmp = RWSEM_ACTIVE_WRITE_BIAS;
127 asm volatile( 125 asm volatile(
128#ifndef __s390x__ 126#ifndef CONFIG_64BIT
129 " l %0,%2\n" 127 " l %0,%2\n"
130 "0: lr %1,%0\n" 128 "0: lr %1,%0\n"
131 " a %1,%4\n" 129 " a %1,%4\n"
132 " cs %0,%1,%2\n" 130 " cs %0,%1,%2\n"
133 " jl 0b" 131 " jl 0b"
134#else /* __s390x__ */ 132#else /* CONFIG_64BIT */
135 " lg %0,%2\n" 133 " lg %0,%2\n"
136 "0: lgr %1,%0\n" 134 "0: lgr %1,%0\n"
137 " ag %1,%4\n" 135 " ag %1,%4\n"
138 " csg %0,%1,%2\n" 136 " csg %0,%1,%2\n"
139 " jl 0b" 137 " jl 0b"
140#endif /* __s390x__ */ 138#endif /* CONFIG_64BIT */
141 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 139 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
142 : "Q" (sem->count), "m" (tmp) 140 : "Q" (sem->count), "m" (tmp)
143 : "cc", "memory"); 141 : "cc", "memory");
@@ -158,19 +156,19 @@ static inline int __down_write_trylock(struct rw_semaphore *sem)
158 signed long old; 156 signed long old;
159 157
160 asm volatile( 158 asm volatile(
161#ifndef __s390x__ 159#ifndef CONFIG_64BIT
162 " l %0,%1\n" 160 " l %0,%1\n"
163 "0: ltr %0,%0\n" 161 "0: ltr %0,%0\n"
164 " jnz 1f\n" 162 " jnz 1f\n"
165 " cs %0,%3,%1\n" 163 " cs %0,%3,%1\n"
166 " jl 0b\n" 164 " jl 0b\n"
167#else /* __s390x__ */ 165#else /* CONFIG_64BIT */
168 " lg %0,%1\n" 166 " lg %0,%1\n"
169 "0: ltgr %0,%0\n" 167 "0: ltgr %0,%0\n"
170 " jnz 1f\n" 168 " jnz 1f\n"
171 " csg %0,%3,%1\n" 169 " csg %0,%3,%1\n"
172 " jl 0b\n" 170 " jl 0b\n"
173#endif /* __s390x__ */ 171#endif /* CONFIG_64BIT */
174 "1:" 172 "1:"
175 : "=&d" (old), "=Q" (sem->count) 173 : "=&d" (old), "=Q" (sem->count)
176 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS) 174 : "Q" (sem->count), "d" (RWSEM_ACTIVE_WRITE_BIAS)
@@ -186,19 +184,19 @@ static inline void __up_read(struct rw_semaphore *sem)
186 signed long old, new; 184 signed long old, new;
187 185
188 asm volatile( 186 asm volatile(
189#ifndef __s390x__ 187#ifndef CONFIG_64BIT
190 " l %0,%2\n" 188 " l %0,%2\n"
191 "0: lr %1,%0\n" 189 "0: lr %1,%0\n"
192 " ahi %1,%4\n" 190 " ahi %1,%4\n"
193 " cs %0,%1,%2\n" 191 " cs %0,%1,%2\n"
194 " jl 0b" 192 " jl 0b"
195#else /* __s390x__ */ 193#else /* CONFIG_64BIT */
196 " lg %0,%2\n" 194 " lg %0,%2\n"
197 "0: lgr %1,%0\n" 195 "0: lgr %1,%0\n"
198 " aghi %1,%4\n" 196 " aghi %1,%4\n"
199 " csg %0,%1,%2\n" 197 " csg %0,%1,%2\n"
200 " jl 0b" 198 " jl 0b"
201#endif /* __s390x__ */ 199#endif /* CONFIG_64BIT */
202 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 200 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
203 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS) 201 : "Q" (sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
204 : "cc", "memory"); 202 : "cc", "memory");
@@ -216,19 +214,19 @@ static inline void __up_write(struct rw_semaphore *sem)
216 214
217 tmp = -RWSEM_ACTIVE_WRITE_BIAS; 215 tmp = -RWSEM_ACTIVE_WRITE_BIAS;
218 asm volatile( 216 asm volatile(
219#ifndef __s390x__ 217#ifndef CONFIG_64BIT
220 " l %0,%2\n" 218 " l %0,%2\n"
221 "0: lr %1,%0\n" 219 "0: lr %1,%0\n"
222 " a %1,%4\n" 220 " a %1,%4\n"
223 " cs %0,%1,%2\n" 221 " cs %0,%1,%2\n"
224 " jl 0b" 222 " jl 0b"
225#else /* __s390x__ */ 223#else /* CONFIG_64BIT */
226 " lg %0,%2\n" 224 " lg %0,%2\n"
227 "0: lgr %1,%0\n" 225 "0: lgr %1,%0\n"
228 " ag %1,%4\n" 226 " ag %1,%4\n"
229 " csg %0,%1,%2\n" 227 " csg %0,%1,%2\n"
230 " jl 0b" 228 " jl 0b"
231#endif /* __s390x__ */ 229#endif /* CONFIG_64BIT */
232 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 230 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
233 : "Q" (sem->count), "m" (tmp) 231 : "Q" (sem->count), "m" (tmp)
234 : "cc", "memory"); 232 : "cc", "memory");
@@ -246,19 +244,19 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
246 244
247 tmp = -RWSEM_WAITING_BIAS; 245 tmp = -RWSEM_WAITING_BIAS;
248 asm volatile( 246 asm volatile(
249#ifndef __s390x__ 247#ifndef CONFIG_64BIT
250 " l %0,%2\n" 248 " l %0,%2\n"
251 "0: lr %1,%0\n" 249 "0: lr %1,%0\n"
252 " a %1,%4\n" 250 " a %1,%4\n"
253 " cs %0,%1,%2\n" 251 " cs %0,%1,%2\n"
254 " jl 0b" 252 " jl 0b"
255#else /* __s390x__ */ 253#else /* CONFIG_64BIT */
256 " lg %0,%2\n" 254 " lg %0,%2\n"
257 "0: lgr %1,%0\n" 255 "0: lgr %1,%0\n"
258 " ag %1,%4\n" 256 " ag %1,%4\n"
259 " csg %0,%1,%2\n" 257 " csg %0,%1,%2\n"
260 " jl 0b" 258 " jl 0b"
261#endif /* __s390x__ */ 259#endif /* CONFIG_64BIT */
262 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 260 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
263 : "Q" (sem->count), "m" (tmp) 261 : "Q" (sem->count), "m" (tmp)
264 : "cc", "memory"); 262 : "cc", "memory");
@@ -274,19 +272,19 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
274 signed long old, new; 272 signed long old, new;
275 273
276 asm volatile( 274 asm volatile(
277#ifndef __s390x__ 275#ifndef CONFIG_64BIT
278 " l %0,%2\n" 276 " l %0,%2\n"
279 "0: lr %1,%0\n" 277 "0: lr %1,%0\n"
280 " ar %1,%4\n" 278 " ar %1,%4\n"
281 " cs %0,%1,%2\n" 279 " cs %0,%1,%2\n"
282 " jl 0b" 280 " jl 0b"
283#else /* __s390x__ */ 281#else /* CONFIG_64BIT */
284 " lg %0,%2\n" 282 " lg %0,%2\n"
285 "0: lgr %1,%0\n" 283 "0: lgr %1,%0\n"
286 " agr %1,%4\n" 284 " agr %1,%4\n"
287 " csg %0,%1,%2\n" 285 " csg %0,%1,%2\n"
288 " jl 0b" 286 " jl 0b"
289#endif /* __s390x__ */ 287#endif /* CONFIG_64BIT */
290 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 288 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
291 : "Q" (sem->count), "d" (delta) 289 : "Q" (sem->count), "d" (delta)
292 : "cc", "memory"); 290 : "cc", "memory");
@@ -300,24 +298,23 @@ static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
300 signed long old, new; 298 signed long old, new;
301 299
302 asm volatile( 300 asm volatile(
303#ifndef __s390x__ 301#ifndef CONFIG_64BIT
304 " l %0,%2\n" 302 " l %0,%2\n"
305 "0: lr %1,%0\n" 303 "0: lr %1,%0\n"
306 " ar %1,%4\n" 304 " ar %1,%4\n"
307 " cs %0,%1,%2\n" 305 " cs %0,%1,%2\n"
308 " jl 0b" 306 " jl 0b"
309#else /* __s390x__ */ 307#else /* CONFIG_64BIT */
310 " lg %0,%2\n" 308 " lg %0,%2\n"
311 "0: lgr %1,%0\n" 309 "0: lgr %1,%0\n"
312 " agr %1,%4\n" 310 " agr %1,%4\n"
313 " csg %0,%1,%2\n" 311 " csg %0,%1,%2\n"
314 " jl 0b" 312 " jl 0b"
315#endif /* __s390x__ */ 313#endif /* CONFIG_64BIT */
316 : "=&d" (old), "=&d" (new), "=Q" (sem->count) 314 : "=&d" (old), "=&d" (new), "=Q" (sem->count)
317 : "Q" (sem->count), "d" (delta) 315 : "Q" (sem->count), "d" (delta)
318 : "cc", "memory"); 316 : "cc", "memory");
319 return new; 317 return new;
320} 318}
321 319
322#endif /* __KERNEL__ */
323#endif /* _S390_RWSEM_H */ 320#endif /* _S390_RWSEM_H */
diff --git a/arch/s390/include/asm/setup.h b/arch/s390/include/asm/setup.h
index 7244e1f64126..40eb2ff88e9e 100644
--- a/arch/s390/include/asm/setup.h
+++ b/arch/s390/include/asm/setup.h
@@ -22,19 +22,19 @@
22#include <asm/lowcore.h> 22#include <asm/lowcore.h>
23#include <asm/types.h> 23#include <asm/types.h>
24 24
25#ifndef __s390x__ 25#ifndef CONFIG_64BIT
26#define IPL_DEVICE (*(unsigned long *) (0x10404)) 26#define IPL_DEVICE (*(unsigned long *) (0x10404))
27#define INITRD_START (*(unsigned long *) (0x1040C)) 27#define INITRD_START (*(unsigned long *) (0x1040C))
28#define INITRD_SIZE (*(unsigned long *) (0x10414)) 28#define INITRD_SIZE (*(unsigned long *) (0x10414))
29#define OLDMEM_BASE (*(unsigned long *) (0x1041C)) 29#define OLDMEM_BASE (*(unsigned long *) (0x1041C))
30#define OLDMEM_SIZE (*(unsigned long *) (0x10424)) 30#define OLDMEM_SIZE (*(unsigned long *) (0x10424))
31#else /* __s390x__ */ 31#else /* CONFIG_64BIT */
32#define IPL_DEVICE (*(unsigned long *) (0x10400)) 32#define IPL_DEVICE (*(unsigned long *) (0x10400))
33#define INITRD_START (*(unsigned long *) (0x10408)) 33#define INITRD_START (*(unsigned long *) (0x10408))
34#define INITRD_SIZE (*(unsigned long *) (0x10410)) 34#define INITRD_SIZE (*(unsigned long *) (0x10410))
35#define OLDMEM_BASE (*(unsigned long *) (0x10418)) 35#define OLDMEM_BASE (*(unsigned long *) (0x10418))
36#define OLDMEM_SIZE (*(unsigned long *) (0x10420)) 36#define OLDMEM_SIZE (*(unsigned long *) (0x10420))
37#endif /* __s390x__ */ 37#endif /* CONFIG_64BIT */
38#define COMMAND_LINE ((char *) (0x10480)) 38#define COMMAND_LINE ((char *) (0x10480))
39 39
40#define CHUNK_READ_WRITE 0 40#define CHUNK_READ_WRITE 0
@@ -89,7 +89,7 @@ extern unsigned int user_mode;
89 89
90#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C) 90#define MACHINE_HAS_DIAG9C (S390_lowcore.machine_flags & MACHINE_FLAG_DIAG9C)
91 91
92#ifndef __s390x__ 92#ifndef CONFIG_64BIT
93#define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE) 93#define MACHINE_HAS_IEEE (S390_lowcore.machine_flags & MACHINE_FLAG_IEEE)
94#define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP) 94#define MACHINE_HAS_CSP (S390_lowcore.machine_flags & MACHINE_FLAG_CSP)
95#define MACHINE_HAS_IDTE (0) 95#define MACHINE_HAS_IDTE (0)
@@ -100,7 +100,7 @@ extern unsigned int user_mode;
100#define MACHINE_HAS_PFMF (0) 100#define MACHINE_HAS_PFMF (0)
101#define MACHINE_HAS_SPP (0) 101#define MACHINE_HAS_SPP (0)
102#define MACHINE_HAS_TOPOLOGY (0) 102#define MACHINE_HAS_TOPOLOGY (0)
103#else /* __s390x__ */ 103#else /* CONFIG_64BIT */
104#define MACHINE_HAS_IEEE (1) 104#define MACHINE_HAS_IEEE (1)
105#define MACHINE_HAS_CSP (1) 105#define MACHINE_HAS_CSP (1)
106#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE) 106#define MACHINE_HAS_IDTE (S390_lowcore.machine_flags & MACHINE_FLAG_IDTE)
@@ -111,7 +111,7 @@ extern unsigned int user_mode;
111#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF) 111#define MACHINE_HAS_PFMF (S390_lowcore.machine_flags & MACHINE_FLAG_PFMF)
112#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP) 112#define MACHINE_HAS_SPP (S390_lowcore.machine_flags & MACHINE_FLAG_SPP)
113#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY) 113#define MACHINE_HAS_TOPOLOGY (S390_lowcore.machine_flags & MACHINE_FLAG_TOPOLOGY)
114#endif /* __s390x__ */ 114#endif /* CONFIG_64BIT */
115 115
116#define ZFCPDUMP_HSA_SIZE (32UL<<20) 116#define ZFCPDUMP_HSA_SIZE (32UL<<20)
117#define ZFCPDUMP_HSA_SIZE_MAX (64UL<<20) 117#define ZFCPDUMP_HSA_SIZE_MAX (64UL<<20)
@@ -153,19 +153,19 @@ extern void (*_machine_power_off)(void);
153 153
154#else /* __ASSEMBLY__ */ 154#else /* __ASSEMBLY__ */
155 155
156#ifndef __s390x__ 156#ifndef CONFIG_64BIT
157#define IPL_DEVICE 0x10404 157#define IPL_DEVICE 0x10404
158#define INITRD_START 0x1040C 158#define INITRD_START 0x1040C
159#define INITRD_SIZE 0x10414 159#define INITRD_SIZE 0x10414
160#define OLDMEM_BASE 0x1041C 160#define OLDMEM_BASE 0x1041C
161#define OLDMEM_SIZE 0x10424 161#define OLDMEM_SIZE 0x10424
162#else /* __s390x__ */ 162#else /* CONFIG_64BIT */
163#define IPL_DEVICE 0x10400 163#define IPL_DEVICE 0x10400
164#define INITRD_START 0x10408 164#define INITRD_START 0x10408
165#define INITRD_SIZE 0x10410 165#define INITRD_SIZE 0x10410
166#define OLDMEM_BASE 0x10418 166#define OLDMEM_BASE 0x10418
167#define OLDMEM_SIZE 0x10420 167#define OLDMEM_SIZE 0x10420
168#endif /* __s390x__ */ 168#endif /* CONFIG_64BIT */
169#define COMMAND_LINE 0x10480 169#define COMMAND_LINE 0x10480
170 170
171#endif /* __ASSEMBLY__ */ 171#endif /* __ASSEMBLY__ */
diff --git a/arch/s390/include/asm/sfp-util.h b/arch/s390/include/asm/sfp-util.h
index ca3f8814e361..5959bfb3b693 100644
--- a/arch/s390/include/asm/sfp-util.h
+++ b/arch/s390/include/asm/sfp-util.h
@@ -51,7 +51,7 @@
51 wl = __wl; \ 51 wl = __wl; \
52}) 52})
53 53
54#ifdef __s390x__ 54#ifdef CONFIG_64BIT
55#define udiv_qrnnd(q, r, n1, n0, d) \ 55#define udiv_qrnnd(q, r, n1, n0, d) \
56 do { unsigned long __n; \ 56 do { unsigned long __n; \
57 unsigned int __r, __d; \ 57 unsigned int __r, __d; \
diff --git a/arch/s390/include/asm/string.h b/arch/s390/include/asm/string.h
index cd0241db5a46..8cc160c9e1cb 100644
--- a/arch/s390/include/asm/string.h
+++ b/arch/s390/include/asm/string.h
@@ -9,8 +9,6 @@
9#ifndef _S390_STRING_H_ 9#ifndef _S390_STRING_H_
10#define _S390_STRING_H_ 10#define _S390_STRING_H_
11 11
12#ifdef __KERNEL__
13
14#ifndef _LINUX_TYPES_H 12#ifndef _LINUX_TYPES_H
15#include <linux/types.h> 13#include <linux/types.h>
16#endif 14#endif
@@ -152,6 +150,4 @@ size_t strlen(const char *s);
152size_t strnlen(const char * s, size_t n); 150size_t strnlen(const char * s, size_t n);
153#endif /* !IN_ARCH_STRING_C */ 151#endif /* !IN_ARCH_STRING_C */
154 152
155#endif /* __KERNEL__ */
156
157#endif /* __S390_STRING_H_ */ 153#endif /* __S390_STRING_H_ */
diff --git a/arch/s390/include/asm/thread_info.h b/arch/s390/include/asm/thread_info.h
index 003b04edcff6..4e40b25cd060 100644
--- a/arch/s390/include/asm/thread_info.h
+++ b/arch/s390/include/asm/thread_info.h
@@ -9,15 +9,13 @@
9#ifndef _ASM_THREAD_INFO_H 9#ifndef _ASM_THREAD_INFO_H
10#define _ASM_THREAD_INFO_H 10#define _ASM_THREAD_INFO_H
11 11
12#ifdef __KERNEL__
13
14/* 12/*
15 * Size of kernel stack for each process 13 * Size of kernel stack for each process
16 */ 14 */
17#ifndef __s390x__ 15#ifndef CONFIG_64BIT
18#define THREAD_ORDER 1 16#define THREAD_ORDER 1
19#define ASYNC_ORDER 1 17#define ASYNC_ORDER 1
20#else /* __s390x__ */ 18#else /* CONFIG_64BIT */
21#ifndef __SMALL_STACK 19#ifndef __SMALL_STACK
22#define THREAD_ORDER 2 20#define THREAD_ORDER 2
23#define ASYNC_ORDER 2 21#define ASYNC_ORDER 2
@@ -25,7 +23,7 @@
25#define THREAD_ORDER 1 23#define THREAD_ORDER 1
26#define ASYNC_ORDER 1 24#define ASYNC_ORDER 1
27#endif 25#endif
28#endif /* __s390x__ */ 26#endif /* CONFIG_64BIT */
29 27
30#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER) 28#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
31#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER) 29#define ASYNC_SIZE (PAGE_SIZE << ASYNC_ORDER)
@@ -123,8 +121,6 @@ static inline struct thread_info *current_thread_info(void)
123#define is_32bit_task() (1) 121#define is_32bit_task() (1)
124#endif 122#endif
125 123
126#endif /* __KERNEL__ */
127
128#define PREEMPT_ACTIVE 0x4000000 124#define PREEMPT_ACTIVE 0x4000000
129 125
130#endif /* _ASM_THREAD_INFO_H */ 126#endif /* _ASM_THREAD_INFO_H */
diff --git a/arch/s390/include/asm/timer.h b/arch/s390/include/asm/timer.h
index e63069ba39e3..15d647901e5c 100644
--- a/arch/s390/include/asm/timer.h
+++ b/arch/s390/include/asm/timer.h
@@ -10,8 +10,6 @@
10#ifndef _ASM_S390_TIMER_H 10#ifndef _ASM_S390_TIMER_H
11#define _ASM_S390_TIMER_H 11#define _ASM_S390_TIMER_H
12 12
13#ifdef __KERNEL__
14
15#include <linux/timer.h> 13#include <linux/timer.h>
16 14
17#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL) 15#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
@@ -50,6 +48,4 @@ extern void vtime_init(void);
50extern void vtime_stop_cpu(void); 48extern void vtime_stop_cpu(void);
51extern void vtime_start_leave(void); 49extern void vtime_start_leave(void);
52 50
53#endif /* __KERNEL__ */
54
55#endif /* _ASM_S390_TIMER_H */ 51#endif /* _ASM_S390_TIMER_H */
diff --git a/arch/s390/include/asm/tlb.h b/arch/s390/include/asm/tlb.h
index 775a5eea8f9e..06e5acbc84bd 100644
--- a/arch/s390/include/asm/tlb.h
+++ b/arch/s390/include/asm/tlb.h
@@ -106,7 +106,7 @@ static inline void pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
106static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd, 106static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
107 unsigned long address) 107 unsigned long address)
108{ 108{
109#ifdef __s390x__ 109#ifdef CONFIG_64BIT
110 if (tlb->mm->context.asce_limit <= (1UL << 31)) 110 if (tlb->mm->context.asce_limit <= (1UL << 31))
111 return; 111 return;
112 if (!tlb->fullmm) 112 if (!tlb->fullmm)
@@ -125,7 +125,7 @@ static inline void pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
125static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud, 125static inline void pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
126 unsigned long address) 126 unsigned long address)
127{ 127{
128#ifdef __s390x__ 128#ifdef CONFIG_64BIT
129 if (tlb->mm->context.asce_limit <= (1UL << 42)) 129 if (tlb->mm->context.asce_limit <= (1UL << 42))
130 return; 130 return;
131 if (!tlb->fullmm) 131 if (!tlb->fullmm)
diff --git a/arch/s390/include/asm/tlbflush.h b/arch/s390/include/asm/tlbflush.h
index 1d8648cf2fea..9fde315f3a7c 100644
--- a/arch/s390/include/asm/tlbflush.h
+++ b/arch/s390/include/asm/tlbflush.h
@@ -27,12 +27,12 @@ static inline void __tlb_flush_global(void)
27 register unsigned long reg4 asm("4"); 27 register unsigned long reg4 asm("4");
28 long dummy; 28 long dummy;
29 29
30#ifndef __s390x__ 30#ifndef CONFIG_64BIT
31 if (!MACHINE_HAS_CSP) { 31 if (!MACHINE_HAS_CSP) {
32 smp_ptlb_all(); 32 smp_ptlb_all();
33 return; 33 return;
34 } 34 }
35#endif /* __s390x__ */ 35#endif /* CONFIG_64BIT */
36 36
37 dummy = 0; 37 dummy = 0;
38 reg2 = reg3 = 0; 38 reg2 = reg3 = 0;
diff --git a/arch/s390/include/asm/types.h b/arch/s390/include/asm/types.h
index 05ebbcdbbf6b..6c8c35f8df14 100644
--- a/arch/s390/include/asm/types.h
+++ b/arch/s390/include/asm/types.h
@@ -28,7 +28,7 @@ typedef __signed__ long saddr_t;
28 28
29#ifndef __ASSEMBLY__ 29#ifndef __ASSEMBLY__
30 30
31#ifndef __s390x__ 31#ifndef CONFIG_64BIT
32typedef union { 32typedef union {
33 unsigned long long pair; 33 unsigned long long pair;
34 struct { 34 struct {
@@ -37,7 +37,7 @@ typedef union {
37 } subreg; 37 } subreg;
38} register_pair; 38} register_pair;
39 39
40#endif /* ! __s390x__ */ 40#endif /* ! CONFIG_64BIT */
41#endif /* __ASSEMBLY__ */ 41#endif /* __ASSEMBLY__ */
42#endif /* __KERNEL__ */ 42#endif /* __KERNEL__ */
43#endif /* _S390_TYPES_H */ 43#endif /* _S390_TYPES_H */
diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
index 8f2cada4f7c9..1f3a79bcd262 100644
--- a/arch/s390/include/asm/uaccess.h
+++ b/arch/s390/include/asm/uaccess.h
@@ -50,10 +50,15 @@
50 50
51#define segment_eq(a,b) ((a).ar4 == (b).ar4) 51#define segment_eq(a,b) ((a).ar4 == (b).ar4)
52 52
53#define __access_ok(addr, size) \ 53static inline int __range_ok(unsigned long addr, unsigned long size)
54({ \ 54{
55 __chk_user_ptr(addr); \ 55 return 1;
56 1; \ 56}
57
58#define __access_ok(addr, size) \
59({ \
60 __chk_user_ptr(addr); \
61 __range_ok((unsigned long)(addr), (size)); \
57}) 62})
58 63
59#define access_ok(type, addr, size) __access_ok(addr, size) 64#define access_ok(type, addr, size) __access_ok(addr, size)
@@ -377,7 +382,7 @@ clear_user(void __user *to, unsigned long n)
377} 382}
378 383
379extern int memcpy_real(void *, void *, size_t); 384extern int memcpy_real(void *, void *, size_t);
380extern void copy_to_absolute_zero(void *dest, void *src, size_t count); 385extern void memcpy_absolute(void *, void *, size_t);
381extern int copy_to_user_real(void __user *dest, void *src, size_t count); 386extern int copy_to_user_real(void __user *dest, void *src, size_t count);
382extern int copy_from_user_real(void *dest, void __user *src, size_t count); 387extern int copy_from_user_real(void *dest, void __user *src, size_t count);
383 388
diff --git a/arch/s390/include/asm/vdso.h b/arch/s390/include/asm/vdso.h
index c4a11cfad3c8..a73eb2e1e918 100644
--- a/arch/s390/include/asm/vdso.h
+++ b/arch/s390/include/asm/vdso.h
@@ -1,8 +1,6 @@
1#ifndef __S390_VDSO_H__ 1#ifndef __S390_VDSO_H__
2#define __S390_VDSO_H__ 2#define __S390_VDSO_H__
3 3
4#ifdef __KERNEL__
5
6/* Default link addresses for the vDSOs */ 4/* Default link addresses for the vDSOs */
7#define VDSO32_LBASE 0 5#define VDSO32_LBASE 0
8#define VDSO64_LBASE 0 6#define VDSO64_LBASE 0
@@ -45,7 +43,4 @@ void vdso_free_per_cpu(struct _lowcore *lowcore);
45#endif 43#endif
46 44
47#endif /* __ASSEMBLY__ */ 45#endif /* __ASSEMBLY__ */
48
49#endif /* __KERNEL__ */
50
51#endif /* __S390_VDSO_H__ */ 46#endif /* __S390_VDSO_H__ */
diff --git a/arch/s390/kernel/base.S b/arch/s390/kernel/base.S
index 3aa4d00aaf50..c880ff72db44 100644
--- a/arch/s390/kernel/base.S
+++ b/arch/s390/kernel/base.S
@@ -88,6 +88,9 @@ ENTRY(diag308_reset)
88 stctg %c0,%c15,0(%r4) 88 stctg %c0,%c15,0(%r4)
89 larl %r4,.Lfpctl # Floating point control register 89 larl %r4,.Lfpctl # Floating point control register
90 stfpc 0(%r4) 90 stfpc 0(%r4)
91 larl %r4,.Lcontinue_psw # Save PSW flags
92 epsw %r2,%r3
93 stm %r2,%r3,0(%r4)
91 larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0 94 larl %r4,.Lrestart_psw # Setup restart PSW at absolute 0
92 lghi %r3,0 95 lghi %r3,0
93 lg %r4,0(%r4) # Save PSW 96 lg %r4,0(%r4) # Save PSW
@@ -103,11 +106,20 @@ ENTRY(diag308_reset)
103 lctlg %c0,%c15,0(%r4) 106 lctlg %c0,%c15,0(%r4)
104 larl %r4,.Lfpctl # Restore floating point ctl register 107 larl %r4,.Lfpctl # Restore floating point ctl register
105 lfpc 0(%r4) 108 lfpc 0(%r4)
109 larl %r4,.Lcontinue_psw # Restore PSW flags
110 lpswe 0(%r4)
111.Lcontinue:
106 br %r14 112 br %r14
107.align 16 113.align 16
108.Lrestart_psw: 114.Lrestart_psw:
109 .long 0x00080000,0x80000000 + .Lrestart_part2 115 .long 0x00080000,0x80000000 + .Lrestart_part2
110 116
117 .section .data..nosave,"aw",@progbits
118.align 8
119.Lcontinue_psw:
120 .quad 0,.Lcontinue
121 .previous
122
111 .section .bss 123 .section .bss
112.align 8 124.align 8
113.Lctlregs: 125.Lctlregs:
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 377c096ca4a7..3c0c19830c37 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -32,8 +32,6 @@
32#include "compat_ptrace.h" 32#include "compat_ptrace.h"
33#include "entry.h" 33#include "entry.h"
34 34
35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
36
37typedef struct 35typedef struct
38{ 36{
39 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32]; 37 __u8 callee_used_stack[__SIGNAL_FRAMESIZE32];
@@ -364,7 +362,6 @@ asmlinkage long sys32_sigreturn(void)
364 goto badframe; 362 goto badframe;
365 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32)) 363 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE32))
366 goto badframe; 364 goto badframe;
367 sigdelsetmask(&set, ~_BLOCKABLE);
368 set_current_blocked(&set); 365 set_current_blocked(&set);
369 if (restore_sigregs32(regs, &frame->sregs)) 366 if (restore_sigregs32(regs, &frame->sregs))
370 goto badframe; 367 goto badframe;
@@ -390,7 +387,6 @@ asmlinkage long sys32_rt_sigreturn(void)
390 goto badframe; 387 goto badframe;
391 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 388 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
392 goto badframe; 389 goto badframe;
393 sigdelsetmask(&set, ~_BLOCKABLE);
394 set_current_blocked(&set); 390 set_current_blocked(&set);
395 if (restore_sigregs32(regs, &frame->uc.uc_mcontext)) 391 if (restore_sigregs32(regs, &frame->uc.uc_mcontext))
396 goto badframe; 392 goto badframe;
@@ -572,7 +568,7 @@ give_sigsegv:
572 * OK, we're invoking a handler 568 * OK, we're invoking a handler
573 */ 569 */
574 570
575int handle_signal32(unsigned long sig, struct k_sigaction *ka, 571void handle_signal32(unsigned long sig, struct k_sigaction *ka,
576 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) 572 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs)
577{ 573{
578 int ret; 574 int ret;
@@ -583,8 +579,8 @@ int handle_signal32(unsigned long sig, struct k_sigaction *ka,
583 else 579 else
584 ret = setup_frame32(sig, ka, oldset, regs); 580 ret = setup_frame32(sig, ka, oldset, regs);
585 if (ret) 581 if (ret)
586 return ret; 582 return;
587 block_sigmask(ka, sig); 583 signal_delivered(sig, info, ka, regs,
588 return 0; 584 test_thread_flag(TIF_SINGLE_STEP));
589} 585}
590 586
diff --git a/arch/s390/kernel/early.c b/arch/s390/kernel/early.c
index d84181f1f5e8..6684fff17558 100644
--- a/arch/s390/kernel/early.c
+++ b/arch/s390/kernel/early.c
@@ -237,7 +237,7 @@ static noinline __init void detect_machine_type(void)
237 S390_lowcore.machine_flags |= MACHINE_FLAG_VM; 237 S390_lowcore.machine_flags |= MACHINE_FLAG_VM;
238} 238}
239 239
240static __init void early_pgm_check_handler(void) 240static void early_pgm_check_handler(void)
241{ 241{
242 unsigned long addr; 242 unsigned long addr;
243 const struct exception_table_entry *fixup; 243 const struct exception_table_entry *fixup;
diff --git a/arch/s390/kernel/entry.h b/arch/s390/kernel/entry.h
index 6cdddac93a2e..f66a229ab0b3 100644
--- a/arch/s390/kernel/entry.h
+++ b/arch/s390/kernel/entry.h
@@ -31,7 +31,7 @@ void do_per_trap(struct pt_regs *regs);
31void syscall_trace(struct pt_regs *regs, int entryexit); 31void syscall_trace(struct pt_regs *regs, int entryexit);
32void kernel_stack_overflow(struct pt_regs * regs); 32void kernel_stack_overflow(struct pt_regs * regs);
33void do_signal(struct pt_regs *regs); 33void do_signal(struct pt_regs *regs);
34int handle_signal32(unsigned long sig, struct k_sigaction *ka, 34void handle_signal32(unsigned long sig, struct k_sigaction *ka,
35 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs); 35 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs);
36void do_notify_resume(struct pt_regs *regs); 36void do_notify_resume(struct pt_regs *regs);
37 37
diff --git a/arch/s390/kernel/head_kdump.S b/arch/s390/kernel/head_kdump.S
index e1ac3893e972..796c976b5fdc 100644
--- a/arch/s390/kernel/head_kdump.S
+++ b/arch/s390/kernel/head_kdump.S
@@ -85,11 +85,6 @@ startup_kdump_relocated:
85 basr %r13,0 85 basr %r13,0
860: 860:
87 mvc 0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW 87 mvc 0(8,%r0),.Lrestart_psw-0b(%r13) # Setup restart PSW
88 mvc 464(16,%r0),.Lpgm_psw-0b(%r13) # Setup pgm check PSW
89 lhi %r1,1 # Start new kernel
90 diag %r1,%r1,0x308 # with diag 308
91
92.Lno_diag308: # No diag 308
93 sam31 # Switch to 31 bit addr mode 88 sam31 # Switch to 31 bit addr mode
94 sr %r1,%r1 # Erase register r1 89 sr %r1,%r1 # Erase register r1
95 sr %r2,%r2 # Erase register r2 90 sr %r2,%r2 # Erase register r2
@@ -98,8 +93,6 @@ startup_kdump_relocated:
98.align 8 93.align 8
99.Lrestart_psw: 94.Lrestart_psw:
100 .long 0x00080000,0x80000000 + startup 95 .long 0x00080000,0x80000000 + startup
101.Lpgm_psw:
102 .quad 0x0000000180000000,0x0000000000000000 + .Lno_diag308
103#else 96#else
104.align 2 97.align 2
105.Lep_startup_kdump: 98.Lep_startup_kdump:
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index 8342e65a140d..2f6cfd460cb6 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -1528,12 +1528,15 @@ static struct shutdown_action __refdata dump_action = {
1528 1528
1529static void dump_reipl_run(struct shutdown_trigger *trigger) 1529static void dump_reipl_run(struct shutdown_trigger *trigger)
1530{ 1530{
1531 u32 csum; 1531 struct {
1532 1532 void *addr;
1533 csum = csum_partial(reipl_block_actual, reipl_block_actual->hdr.len, 0); 1533 __u32 csum;
1534 copy_to_absolute_zero(&S390_lowcore.ipib_checksum, &csum, sizeof(csum)); 1534 } __packed ipib;
1535 copy_to_absolute_zero(&S390_lowcore.ipib, &reipl_block_actual, 1535
1536 sizeof(reipl_block_actual)); 1536 ipib.csum = csum_partial(reipl_block_actual,
1537 reipl_block_actual->hdr.len, 0);
1538 ipib.addr = reipl_block_actual;
1539 memcpy_absolute(&S390_lowcore.ipib, &ipib, sizeof(ipib));
1537 dump_run(trigger); 1540 dump_run(trigger);
1538} 1541}
1539 1542
@@ -1750,6 +1753,7 @@ static struct kobj_attribute on_restart_attr =
1750 1753
1751static void __do_restart(void *ignore) 1754static void __do_restart(void *ignore)
1752{ 1755{
1756 __arch_local_irq_stosm(0x04); /* enable DAT */
1753 smp_send_stop(); 1757 smp_send_stop();
1754#ifdef CONFIG_CRASH_DUMP 1758#ifdef CONFIG_CRASH_DUMP
1755 crash_kexec(NULL); 1759 crash_kexec(NULL);
diff --git a/arch/s390/kernel/irq.c b/arch/s390/kernel/irq.c
index 8a22c27219dd..b4f4a7133fa1 100644
--- a/arch/s390/kernel/irq.c
+++ b/arch/s390/kernel/irq.c
@@ -42,7 +42,8 @@ static const struct irq_class intrclass_names[] = {
42 {.name = "VRT", .desc = "[EXT] Virtio" }, 42 {.name = "VRT", .desc = "[EXT] Virtio" },
43 {.name = "SCP", .desc = "[EXT] Service Call" }, 43 {.name = "SCP", .desc = "[EXT] Service Call" },
44 {.name = "IUC", .desc = "[EXT] IUCV" }, 44 {.name = "IUC", .desc = "[EXT] IUCV" },
45 {.name = "CPM", .desc = "[EXT] CPU Measurement" }, 45 {.name = "CMS", .desc = "[EXT] CPU-Measurement: Sampling" },
46 {.name = "CMC", .desc = "[EXT] CPU-Measurement: Counter" },
46 {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" }, 47 {.name = "CIO", .desc = "[I/O] Common I/O Layer Interrupt" },
47 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" }, 48 {.name = "QAI", .desc = "[I/O] QDIO Adapter Interrupt" },
48 {.name = "DAS", .desc = "[I/O] DASD" }, 49 {.name = "DAS", .desc = "[I/O] DASD" },
diff --git a/arch/s390/kernel/machine_kexec.c b/arch/s390/kernel/machine_kexec.c
index bdad47d54478..cdacf8f91b2d 100644
--- a/arch/s390/kernel/machine_kexec.c
+++ b/arch/s390/kernel/machine_kexec.c
@@ -24,6 +24,7 @@
24#include <asm/ipl.h> 24#include <asm/ipl.h>
25#include <asm/diag.h> 25#include <asm/diag.h>
26#include <asm/asm-offsets.h> 26#include <asm/asm-offsets.h>
27#include <asm/os_info.h>
27 28
28typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long); 29typedef void (*relocate_kernel_t)(kimage_entry_t *, unsigned long);
29 30
@@ -79,8 +80,8 @@ static void __do_machine_kdump(void *image)
79#ifdef CONFIG_CRASH_DUMP 80#ifdef CONFIG_CRASH_DUMP
80 int (*start_kdump)(int) = (void *)((struct kimage *) image)->start; 81 int (*start_kdump)(int) = (void *)((struct kimage *) image)->start;
81 82
82 __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
83 setup_regs(); 83 setup_regs();
84 __load_psw_mask(PSW_MASK_BASE | PSW_DEFAULT_KEY | PSW_MASK_EA | PSW_MASK_BA);
84 start_kdump(1); 85 start_kdump(1);
85#endif 86#endif
86} 87}
@@ -114,8 +115,13 @@ static void crash_map_pages(int enable)
114 size % KEXEC_CRASH_MEM_ALIGN); 115 size % KEXEC_CRASH_MEM_ALIGN);
115 if (enable) 116 if (enable)
116 vmem_add_mapping(crashk_res.start, size); 117 vmem_add_mapping(crashk_res.start, size);
117 else 118 else {
118 vmem_remove_mapping(crashk_res.start, size); 119 vmem_remove_mapping(crashk_res.start, size);
120 if (size)
121 os_info_crashkernel_add(crashk_res.start, size);
122 else
123 os_info_crashkernel_add(0, 0);
124 }
119} 125}
120 126
121/* 127/*
@@ -208,6 +214,7 @@ static void __machine_kexec(void *data)
208{ 214{
209 struct kimage *image = data; 215 struct kimage *image = data;
210 216
217 __arch_local_irq_stosm(0x04); /* enable DAT */
211 pfault_fini(); 218 pfault_fini();
212 tracing_off(); 219 tracing_off();
213 debug_locks_off(); 220 debug_locks_off();
diff --git a/arch/s390/kernel/os_info.c b/arch/s390/kernel/os_info.c
index e8d6c214d498..95fa5ac6c4ce 100644
--- a/arch/s390/kernel/os_info.c
+++ b/arch/s390/kernel/os_info.c
@@ -60,7 +60,7 @@ void __init os_info_init(void)
60 os_info.version_minor = OS_INFO_VERSION_MINOR; 60 os_info.version_minor = OS_INFO_VERSION_MINOR;
61 os_info.magic = OS_INFO_MAGIC; 61 os_info.magic = OS_INFO_MAGIC;
62 os_info.csum = os_info_csum(&os_info); 62 os_info.csum = os_info_csum(&os_info);
63 copy_to_absolute_zero(&S390_lowcore.os_info, &ptr, sizeof(ptr)); 63 memcpy_absolute(&S390_lowcore.os_info, &ptr, sizeof(ptr));
64} 64}
65 65
66#ifdef CONFIG_CRASH_DUMP 66#ifdef CONFIG_CRASH_DUMP
@@ -138,7 +138,6 @@ static void os_info_old_init(void)
138 goto fail_free; 138 goto fail_free;
139 os_info_old_alloc(OS_INFO_VMCOREINFO, 1); 139 os_info_old_alloc(OS_INFO_VMCOREINFO, 1);
140 os_info_old_alloc(OS_INFO_REIPL_BLOCK, 1); 140 os_info_old_alloc(OS_INFO_REIPL_BLOCK, 1);
141 os_info_old_alloc(OS_INFO_INIT_FN, PAGE_SIZE);
142 pr_info("crashkernel: addr=0x%lx size=%lu\n", 141 pr_info("crashkernel: addr=0x%lx size=%lu\n",
143 (unsigned long) os_info_old->crashkernel_addr, 142 (unsigned long) os_info_old->crashkernel_addr,
144 (unsigned long) os_info_old->crashkernel_size); 143 (unsigned long) os_info_old->crashkernel_size);
diff --git a/arch/s390/kernel/perf_cpum_cf.c b/arch/s390/kernel/perf_cpum_cf.c
index cb019f429e88..9871b1971ed7 100644
--- a/arch/s390/kernel/perf_cpum_cf.c
+++ b/arch/s390/kernel/perf_cpum_cf.c
@@ -225,7 +225,7 @@ static void cpumf_measurement_alert(struct ext_code ext_code,
225 if (!(alert & CPU_MF_INT_CF_MASK)) 225 if (!(alert & CPU_MF_INT_CF_MASK))
226 return; 226 return;
227 227
228 kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++; 228 kstat_cpu(smp_processor_id()).irqs[EXTINT_CMC]++;
229 cpuhw = &__get_cpu_var(cpu_hw_events); 229 cpuhw = &__get_cpu_var(cpu_hw_events);
230 230
231 /* Measurement alerts are shared and might happen when the PMU 231 /* Measurement alerts are shared and might happen when the PMU
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 06264ae8ccd9..489d1d8d96b0 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -428,10 +428,12 @@ static void __init setup_lowcore(void)
428 lc->restart_fn = (unsigned long) do_restart; 428 lc->restart_fn = (unsigned long) do_restart;
429 lc->restart_data = 0; 429 lc->restart_data = 0;
430 lc->restart_source = -1UL; 430 lc->restart_source = -1UL;
431 memcpy(&S390_lowcore.restart_stack, &lc->restart_stack, 431
432 4*sizeof(unsigned long)); 432 /* Setup absolute zero lowcore */
433 copy_to_absolute_zero(&S390_lowcore.restart_psw, 433 memcpy_absolute(&S390_lowcore.restart_stack, &lc->restart_stack,
434 &lc->restart_psw, sizeof(psw_t)); 434 4 * sizeof(unsigned long));
435 memcpy_absolute(&S390_lowcore.restart_psw, &lc->restart_psw,
436 sizeof(lc->restart_psw));
435 437
436 set_prefix((u32)(unsigned long) lc); 438 set_prefix((u32)(unsigned long) lc);
437 lowcore_ptr[0] = lc; 439 lowcore_ptr[0] = lc;
@@ -598,7 +600,7 @@ static void __init setup_vmcoreinfo(void)
598#ifdef CONFIG_KEXEC 600#ifdef CONFIG_KEXEC
599 unsigned long ptr = paddr_vmcoreinfo_note(); 601 unsigned long ptr = paddr_vmcoreinfo_note();
600 602
601 copy_to_absolute_zero(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr)); 603 memcpy_absolute(&S390_lowcore.vmcore_info, &ptr, sizeof(ptr));
602#endif 604#endif
603} 605}
604 606
diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c
index f626232e216c..ac565b44aabb 100644
--- a/arch/s390/kernel/signal.c
+++ b/arch/s390/kernel/signal.c
@@ -33,9 +33,6 @@
33#include <asm/switch_to.h> 33#include <asm/switch_to.h>
34#include "entry.h" 34#include "entry.h"
35 35
36#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
37
38
39typedef struct 36typedef struct
40{ 37{
41 __u8 callee_used_stack[__SIGNAL_FRAMESIZE]; 38 __u8 callee_used_stack[__SIGNAL_FRAMESIZE];
@@ -169,7 +166,6 @@ SYSCALL_DEFINE0(sigreturn)
169 goto badframe; 166 goto badframe;
170 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE)) 167 if (__copy_from_user(&set.sig, &frame->sc.oldmask, _SIGMASK_COPY_SIZE))
171 goto badframe; 168 goto badframe;
172 sigdelsetmask(&set, ~_BLOCKABLE);
173 set_current_blocked(&set); 169 set_current_blocked(&set);
174 if (restore_sigregs(regs, &frame->sregs)) 170 if (restore_sigregs(regs, &frame->sregs))
175 goto badframe; 171 goto badframe;
@@ -189,7 +185,6 @@ SYSCALL_DEFINE0(rt_sigreturn)
189 goto badframe; 185 goto badframe;
190 if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set))) 186 if (__copy_from_user(&set.sig, &frame->uc.uc_sigmask, sizeof(set)))
191 goto badframe; 187 goto badframe;
192 sigdelsetmask(&set, ~_BLOCKABLE);
193 set_current_blocked(&set); 188 set_current_blocked(&set);
194 if (restore_sigregs(regs, &frame->uc.uc_mcontext)) 189 if (restore_sigregs(regs, &frame->uc.uc_mcontext))
195 goto badframe; 190 goto badframe;
@@ -367,7 +362,7 @@ give_sigsegv:
367 return -EFAULT; 362 return -EFAULT;
368} 363}
369 364
370static int handle_signal(unsigned long sig, struct k_sigaction *ka, 365static void handle_signal(unsigned long sig, struct k_sigaction *ka,
371 siginfo_t *info, sigset_t *oldset, 366 siginfo_t *info, sigset_t *oldset,
372 struct pt_regs *regs) 367 struct pt_regs *regs)
373{ 368{
@@ -379,9 +374,9 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
379 else 374 else
380 ret = setup_frame(sig, ka, oldset, regs); 375 ret = setup_frame(sig, ka, oldset, regs);
381 if (ret) 376 if (ret)
382 return ret; 377 return;
383 block_sigmask(ka, sig); 378 signal_delivered(sig, info, ka, regs,
384 return 0; 379 test_thread_flag(TIF_SINGLE_STEP));
385} 380}
386 381
387/* 382/*
@@ -398,12 +393,7 @@ void do_signal(struct pt_regs *regs)
398 siginfo_t info; 393 siginfo_t info;
399 int signr; 394 int signr;
400 struct k_sigaction ka; 395 struct k_sigaction ka;
401 sigset_t *oldset; 396 sigset_t *oldset = sigmask_to_save();
402
403 if (test_thread_flag(TIF_RESTORE_SIGMASK))
404 oldset = &current->saved_sigmask;
405 else
406 oldset = &current->blocked;
407 397
408 /* 398 /*
409 * Get signal to deliver. When running under ptrace, at this point 399 * Get signal to deliver. When running under ptrace, at this point
@@ -441,24 +431,10 @@ void do_signal(struct pt_regs *regs)
441 /* No longer in a system call */ 431 /* No longer in a system call */
442 clear_thread_flag(TIF_SYSCALL); 432 clear_thread_flag(TIF_SYSCALL);
443 433
444 if ((is_compat_task() ? 434 if (is_compat_task())
445 handle_signal32(signr, &ka, &info, oldset, regs) : 435 handle_signal32(signr, &ka, &info, oldset, regs);
446 handle_signal(signr, &ka, &info, oldset, regs)) == 0) { 436 else
447 /* 437 handle_signal(signr, &ka, &info, oldset, regs);
448 * A signal was successfully delivered; the saved
449 * sigmask will have been stored in the signal frame,
450 * and will be restored by sigreturn, so we can simply
451 * clear the TIF_RESTORE_SIGMASK flag.
452 */
453 if (test_thread_flag(TIF_RESTORE_SIGMASK))
454 clear_thread_flag(TIF_RESTORE_SIGMASK);
455
456 /*
457 * Let tracing know that we've done the handler setup.
458 */
459 tracehook_signal_handler(signr, &info, &ka, regs,
460 test_thread_flag(TIF_SINGLE_STEP));
461 }
462 return; 438 return;
463 } 439 }
464 440
@@ -484,16 +460,11 @@ void do_signal(struct pt_regs *regs)
484 /* 460 /*
485 * If there's no signal to deliver, we just put the saved sigmask back. 461 * If there's no signal to deliver, we just put the saved sigmask back.
486 */ 462 */
487 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 463 restore_saved_sigmask();
488 clear_thread_flag(TIF_RESTORE_SIGMASK);
489 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
490 }
491} 464}
492 465
493void do_notify_resume(struct pt_regs *regs) 466void do_notify_resume(struct pt_regs *regs)
494{ 467{
495 clear_thread_flag(TIF_NOTIFY_RESUME); 468 clear_thread_flag(TIF_NOTIFY_RESUME);
496 tracehook_notify_resume(regs); 469 tracehook_notify_resume(regs);
497 if (current->replacement_session_keyring)
498 key_replace_session_keyring();
499} 470}
diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
index 647ba9425893..15cca26ccb6c 100644
--- a/arch/s390/kernel/smp.c
+++ b/arch/s390/kernel/smp.c
@@ -297,26 +297,27 @@ static void pcpu_start_fn(struct pcpu *pcpu, void (*func)(void *), void *data)
297static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *), 297static void pcpu_delegate(struct pcpu *pcpu, void (*func)(void *),
298 void *data, unsigned long stack) 298 void *data, unsigned long stack)
299{ 299{
300 struct _lowcore *lc = pcpu->lowcore; 300 struct _lowcore *lc = lowcore_ptr[pcpu - pcpu_devices];
301 unsigned short this_cpu; 301 struct {
302 unsigned long stack;
303 void *func;
304 void *data;
305 unsigned long source;
306 } restart = { stack, func, data, stap() };
302 307
303 __load_psw_mask(psw_kernel_bits); 308 __load_psw_mask(psw_kernel_bits);
304 this_cpu = stap(); 309 if (pcpu->address == restart.source)
305 if (pcpu->address == this_cpu)
306 func(data); /* should not return */ 310 func(data); /* should not return */
307 /* Stop target cpu (if func returns this stops the current cpu). */ 311 /* Stop target cpu (if func returns this stops the current cpu). */
308 pcpu_sigp_retry(pcpu, sigp_stop, 0); 312 pcpu_sigp_retry(pcpu, sigp_stop, 0);
309 /* Restart func on the target cpu and stop the current cpu. */ 313 /* Restart func on the target cpu and stop the current cpu. */
310 lc->restart_stack = stack; 314 memcpy_absolute(&lc->restart_stack, &restart, sizeof(restart));
311 lc->restart_fn = (unsigned long) func;
312 lc->restart_data = (unsigned long) data;
313 lc->restart_source = (unsigned long) this_cpu;
314 asm volatile( 315 asm volatile(
315 "0: sigp 0,%0,6 # sigp restart to target cpu\n" 316 "0: sigp 0,%0,6 # sigp restart to target cpu\n"
316 " brc 2,0b # busy, try again\n" 317 " brc 2,0b # busy, try again\n"
317 "1: sigp 0,%1,5 # sigp stop to current cpu\n" 318 "1: sigp 0,%1,5 # sigp stop to current cpu\n"
318 " brc 2,1b # busy, try again\n" 319 " brc 2,1b # busy, try again\n"
319 : : "d" (pcpu->address), "d" (this_cpu) : "0", "1", "cc"); 320 : : "d" (pcpu->address), "d" (restart.source) : "0", "1", "cc");
320 for (;;) ; 321 for (;;) ;
321} 322}
322 323
@@ -800,17 +801,6 @@ void __noreturn cpu_die(void)
800 801
801#endif /* CONFIG_HOTPLUG_CPU */ 802#endif /* CONFIG_HOTPLUG_CPU */
802 803
803static void smp_call_os_info_init_fn(void)
804{
805 int (*init_fn)(void);
806 unsigned long size;
807
808 init_fn = os_info_old_entry(OS_INFO_INIT_FN, &size);
809 if (!init_fn)
810 return;
811 init_fn();
812}
813
814void __init smp_prepare_cpus(unsigned int max_cpus) 804void __init smp_prepare_cpus(unsigned int max_cpus)
815{ 805{
816 /* request the 0x1201 emergency signal external interrupt */ 806 /* request the 0x1201 emergency signal external interrupt */
@@ -819,7 +809,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
819 /* request the 0x1202 external call external interrupt */ 809 /* request the 0x1202 external call external interrupt */
820 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0) 810 if (register_external_interrupt(0x1202, do_ext_call_interrupt) != 0)
821 panic("Couldn't request external interrupt 0x1202"); 811 panic("Couldn't request external interrupt 0x1202");
822 smp_call_os_info_init_fn();
823 smp_detect_cpus(); 812 smp_detect_cpus();
824} 813}
825 814
@@ -943,19 +932,6 @@ static struct attribute_group cpu_common_attr_group = {
943 .attrs = cpu_common_attrs, 932 .attrs = cpu_common_attrs,
944}; 933};
945 934
946static ssize_t show_capability(struct device *dev,
947 struct device_attribute *attr, char *buf)
948{
949 unsigned int capability;
950 int rc;
951
952 rc = get_cpu_capability(&capability);
953 if (rc)
954 return rc;
955 return sprintf(buf, "%u\n", capability);
956}
957static DEVICE_ATTR(capability, 0444, show_capability, NULL);
958
959static ssize_t show_idle_count(struct device *dev, 935static ssize_t show_idle_count(struct device *dev,
960 struct device_attribute *attr, char *buf) 936 struct device_attribute *attr, char *buf)
961{ 937{
@@ -993,7 +969,6 @@ static ssize_t show_idle_time(struct device *dev,
993static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL); 969static DEVICE_ATTR(idle_time_us, 0444, show_idle_time, NULL);
994 970
995static struct attribute *cpu_online_attrs[] = { 971static struct attribute *cpu_online_attrs[] = {
996 &dev_attr_capability.attr,
997 &dev_attr_idle_count.attr, 972 &dev_attr_idle_count.attr,
998 &dev_attr_idle_time_us.attr, 973 &dev_attr_idle_time_us.attr,
999 NULL, 974 NULL,
diff --git a/arch/s390/kernel/sysinfo.c b/arch/s390/kernel/sysinfo.c
index 2a94b774695c..fa0eb238dac7 100644
--- a/arch/s390/kernel/sysinfo.c
+++ b/arch/s390/kernel/sysinfo.c
@@ -393,27 +393,6 @@ static __init int create_proc_service_level(void)
393subsys_initcall(create_proc_service_level); 393subsys_initcall(create_proc_service_level);
394 394
395/* 395/*
396 * Bogomips calculation based on cpu capability.
397 */
398int get_cpu_capability(unsigned int *capability)
399{
400 struct sysinfo_1_2_2 *info;
401 int rc;
402
403 info = (void *) get_zeroed_page(GFP_KERNEL);
404 if (!info)
405 return -ENOMEM;
406 rc = stsi(info, 1, 2, 2);
407 if (rc == -ENOSYS)
408 goto out;
409 rc = 0;
410 *capability = info->capability;
411out:
412 free_page((unsigned long) info);
413 return rc;
414}
415
416/*
417 * CPU capability might have changed. Therefore recalculate loops_per_jiffy. 396 * CPU capability might have changed. Therefore recalculate loops_per_jiffy.
418 */ 397 */
419void s390_adjust_jiffies(void) 398void s390_adjust_jiffies(void)
diff --git a/arch/s390/lib/uaccess_mvcos.c b/arch/s390/lib/uaccess_mvcos.c
index 60455f104ea3..58a75a8ae90c 100644
--- a/arch/s390/lib/uaccess_mvcos.c
+++ b/arch/s390/lib/uaccess_mvcos.c
@@ -14,7 +14,7 @@
14#include <asm/futex.h> 14#include <asm/futex.h>
15#include "uaccess.h" 15#include "uaccess.h"
16 16
17#ifndef __s390x__ 17#ifndef CONFIG_64BIT
18#define AHI "ahi" 18#define AHI "ahi"
19#define ALR "alr" 19#define ALR "alr"
20#define CLR "clr" 20#define CLR "clr"
diff --git a/arch/s390/lib/uaccess_std.c b/arch/s390/lib/uaccess_std.c
index bb1a7eed42ce..57e94298539b 100644
--- a/arch/s390/lib/uaccess_std.c
+++ b/arch/s390/lib/uaccess_std.c
@@ -15,7 +15,7 @@
15#include <asm/futex.h> 15#include <asm/futex.h>
16#include "uaccess.h" 16#include "uaccess.h"
17 17
18#ifndef __s390x__ 18#ifndef CONFIG_64BIT
19#define AHI "ahi" 19#define AHI "ahi"
20#define ALR "alr" 20#define ALR "alr"
21#define CLR "clr" 21#define CLR "clr"
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index 795a0a9bb2eb..921fa541dc04 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -101,19 +101,27 @@ int memcpy_real(void *dest, void *src, size_t count)
101} 101}
102 102
103/* 103/*
104 * Copy memory to absolute zero 104 * Copy memory in absolute mode (kernel to kernel)
105 */ 105 */
106void copy_to_absolute_zero(void *dest, void *src, size_t count) 106void memcpy_absolute(void *dest, void *src, size_t count)
107{ 107{
108 unsigned long cr0; 108 unsigned long cr0, flags, prefix;
109 109
110 BUG_ON((unsigned long) dest + count >= sizeof(struct _lowcore)); 110 flags = arch_local_irq_save();
111 preempt_disable();
112 __ctl_store(cr0, 0, 0); 111 __ctl_store(cr0, 0, 0);
113 __ctl_clear_bit(0, 28); /* disable lowcore protection */ 112 __ctl_clear_bit(0, 28); /* disable lowcore protection */
114 memcpy_real(dest + store_prefix(), src, count); 113 prefix = store_prefix();
114 if (prefix) {
115 local_mcck_disable();
116 set_prefix(0);
117 memcpy(dest, src, count);
118 set_prefix(prefix);
119 local_mcck_enable();
120 } else {
121 memcpy(dest, src, count);
122 }
115 __ctl_load(cr0, 0, 0); 123 __ctl_load(cr0, 0, 0);
116 preempt_enable(); 124 arch_local_irq_restore(flags);
117} 125}
118 126
119/* 127/*
@@ -188,20 +196,6 @@ static int is_swapped(unsigned long addr)
188} 196}
189 197
190/* 198/*
191 * Return swapped prefix or zero page address
192 */
193static unsigned long get_swapped(unsigned long addr)
194{
195 unsigned long prefix = store_prefix();
196
197 if (addr < sizeof(struct _lowcore))
198 return addr + prefix;
199 if (addr >= prefix && addr < prefix + sizeof(struct _lowcore))
200 return addr - prefix;
201 return addr;
202}
203
204/*
205 * Convert a physical pointer for /dev/mem access 199 * Convert a physical pointer for /dev/mem access
206 * 200 *
207 * For swapped prefix pages a new buffer is returned that contains a copy of 201 * For swapped prefix pages a new buffer is returned that contains a copy of
@@ -218,7 +212,7 @@ void *xlate_dev_mem_ptr(unsigned long addr)
218 size = PAGE_SIZE - (addr & ~PAGE_MASK); 212 size = PAGE_SIZE - (addr & ~PAGE_MASK);
219 bounce = (void *) __get_free_page(GFP_ATOMIC); 213 bounce = (void *) __get_free_page(GFP_ATOMIC);
220 if (bounce) 214 if (bounce)
221 memcpy_real(bounce, (void *) get_swapped(addr), size); 215 memcpy_absolute(bounce, (void *) addr, size);
222 } 216 }
223 preempt_enable(); 217 preempt_enable();
224 put_online_cpus(); 218 put_online_cpus();
diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c
index 4799383e2df9..71ae20df674e 100644
--- a/arch/s390/mm/vmem.c
+++ b/arch/s390/mm/vmem.c
@@ -109,7 +109,7 @@ static int vmem_add_mem(unsigned long start, unsigned long size, int ro)
109 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0)); 109 pte = mk_pte_phys(address, __pgprot(ro ? _PAGE_RO : 0));
110 pm_dir = pmd_offset(pu_dir, address); 110 pm_dir = pmd_offset(pu_dir, address);
111 111
112#ifdef __s390x__ 112#ifdef CONFIG_64BIT
113 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) && 113 if (MACHINE_HAS_HPAGE && !(address & ~HPAGE_MASK) &&
114 (address + HPAGE_SIZE <= start + size) && 114 (address + HPAGE_SIZE <= start + size) &&
115 (address >= HPAGE_SIZE)) { 115 (address >= HPAGE_SIZE)) {
diff --git a/arch/s390/oprofile/hwsampler.c b/arch/s390/oprofile/hwsampler.c
index c6646de07bf4..a4a89fa980d6 100644
--- a/arch/s390/oprofile/hwsampler.c
+++ b/arch/s390/oprofile/hwsampler.c
@@ -235,7 +235,7 @@ static void hws_ext_handler(struct ext_code ext_code,
235 if (!(param32 & CPU_MF_INT_SF_MASK)) 235 if (!(param32 & CPU_MF_INT_SF_MASK))
236 return; 236 return;
237 237
238 kstat_cpu(smp_processor_id()).irqs[EXTINT_CPM]++; 238 kstat_cpu(smp_processor_id()).irqs[EXTINT_CMS]++;
239 atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32); 239 atomic_xchg(&cb->ext_params, atomic_read(&cb->ext_params) | param32);
240 240
241 if (hws_wq) 241 if (hws_wq)
diff --git a/arch/score/kernel/signal.c b/arch/score/kernel/signal.c
index d4a49011c48a..e382c52ca0d9 100644
--- a/arch/score/kernel/signal.c
+++ b/arch/score/kernel/signal.c
@@ -34,8 +34,6 @@
34#include <asm/syscalls.h> 34#include <asm/syscalls.h>
35#include <asm/ucontext.h> 35#include <asm/ucontext.h>
36 36
37#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
38
39struct rt_sigframe { 37struct rt_sigframe {
40 u32 rs_ass[4]; /* argument save space */ 38 u32 rs_ass[4]; /* argument save space */
41 u32 rs_code[2]; /* signal trampoline */ 39 u32 rs_code[2]; /* signal trampoline */
@@ -162,7 +160,6 @@ score_rt_sigreturn(struct pt_regs *regs)
162 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) 160 if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
163 goto badframe; 161 goto badframe;
164 162
165 sigdelsetmask(&set, ~_BLOCKABLE);
166 set_current_blocked(&set); 163 set_current_blocked(&set);
167 164
168 sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); 165 sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
@@ -241,11 +238,9 @@ give_sigsegv:
241 return -EFAULT; 238 return -EFAULT;
242} 239}
243 240
244static int handle_signal(unsigned long sig, siginfo_t *info, 241static void handle_signal(unsigned long sig, siginfo_t *info,
245 struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) 242 struct k_sigaction *ka, struct pt_regs *regs)
246{ 243{
247 int ret;
248
249 if (regs->is_syscall) { 244 if (regs->is_syscall) {
250 switch (regs->regs[4]) { 245 switch (regs->regs[4]) {
251 case ERESTART_RESTARTBLOCK: 246 case ERESTART_RESTARTBLOCK:
@@ -269,18 +264,15 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
269 /* 264 /*
270 * Set up the stack frame 265 * Set up the stack frame
271 */ 266 */
272 ret = setup_rt_frame(ka, regs, sig, oldset, info); 267 if (setup_rt_frame(ka, regs, sig, sigmask_to_save(), info) < 0)
273 268 return;
274 if (ret == 0)
275 block_sigmask(ka, sig);
276 269
277 return ret; 270 signal_delivered(sig, info, ka, regs, 0);
278} 271}
279 272
280static void do_signal(struct pt_regs *regs) 273static void do_signal(struct pt_regs *regs)
281{ 274{
282 struct k_sigaction ka; 275 struct k_sigaction ka;
283 sigset_t *oldset;
284 siginfo_t info; 276 siginfo_t info;
285 int signr; 277 int signr;
286 278
@@ -292,25 +284,10 @@ static void do_signal(struct pt_regs *regs)
292 if (!user_mode(regs)) 284 if (!user_mode(regs))
293 return; 285 return;
294 286
295 if (test_thread_flag(TIF_RESTORE_SIGMASK))
296 oldset = &current->saved_sigmask;
297 else
298 oldset = &current->blocked;
299
300 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 287 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
301 if (signr > 0) { 288 if (signr > 0) {
302 /* Actually deliver the signal. */ 289 /* Actually deliver the signal. */
303 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 290 handle_signal(signr, &info, &ka, regs);
304 /*
305 * A signal was successfully delivered; the saved
306 * sigmask will have been stored in the signal frame,
307 * and will be restored by sigreturn, so we can simply
308 * clear the TIF_RESTORE_SIGMASK flag.
309 */
310 if (test_thread_flag(TIF_RESTORE_SIGMASK))
311 clear_thread_flag(TIF_RESTORE_SIGMASK);
312 }
313
314 return; 291 return;
315 } 292 }
316 293
@@ -337,10 +314,7 @@ static void do_signal(struct pt_regs *regs)
337 * If there's no signal to deliver, we just put the saved sigmask 314 * If there's no signal to deliver, we just put the saved sigmask
338 * back 315 * back
339 */ 316 */
340 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 317 restore_saved_sigmask();
341 clear_thread_flag(TIF_RESTORE_SIGMASK);
342 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
343 }
344} 318}
345 319
346/* 320/*
@@ -356,7 +330,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
356 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 330 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
357 clear_thread_flag(TIF_NOTIFY_RESUME); 331 clear_thread_flag(TIF_NOTIFY_RESUME);
358 tracehook_notify_resume(regs); 332 tracehook_notify_resume(regs);
359 if (current->replacement_session_keyring)
360 key_replace_session_keyring();
361 } 333 }
362} 334}
diff --git a/arch/sh/Kconfig b/arch/sh/Kconfig
index 99bcd0ee838d..31d9db7913e4 100644
--- a/arch/sh/Kconfig
+++ b/arch/sh/Kconfig
@@ -32,6 +32,8 @@ config SUPERH
32 select GENERIC_SMP_IDLE_THREAD 32 select GENERIC_SMP_IDLE_THREAD
33 select GENERIC_CLOCKEVENTS 33 select GENERIC_CLOCKEVENTS
34 select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST 34 select GENERIC_CMOS_UPDATE if SH_SH03 || SH_DREAMCAST
35 select GENERIC_STRNCPY_FROM_USER
36 select GENERIC_STRNLEN_USER
35 help 37 help
36 The SuperH is a RISC processor targeted for use in embedded systems 38 The SuperH is a RISC processor targeted for use in embedded systems
37 and consumer electronics; it was also used in the Sega Dreamcast 39 and consumer electronics; it was also used in the Sega Dreamcast
diff --git a/arch/sh/Makefile b/arch/sh/Makefile
index 46edf070da1c..aed701c7b11b 100644
--- a/arch/sh/Makefile
+++ b/arch/sh/Makefile
@@ -9,6 +9,12 @@
9# License. See the file "COPYING" in the main directory of this archive 9# License. See the file "COPYING" in the main directory of this archive
10# for more details. 10# for more details.
11# 11#
12ifneq ($(SUBARCH),$(ARCH))
13 ifeq ($(CROSS_COMPILE),)
14 CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
15 endif
16endif
17
12isa-y := any 18isa-y := any
13isa-$(CONFIG_SH_DSP) := sh 19isa-$(CONFIG_SH_DSP) := sh
14isa-$(CONFIG_CPU_SH2) := sh2 20isa-$(CONFIG_CPU_SH2) := sh2
@@ -106,19 +112,13 @@ LDFLAGS_vmlinux += --defsym phys_stext=_stext-$(CONFIG_PAGE_OFFSET) \
106KBUILD_DEFCONFIG := cayman_defconfig 112KBUILD_DEFCONFIG := cayman_defconfig
107endif 113endif
108 114
109ifneq ($(SUBARCH),$(ARCH))
110 ifeq ($(CROSS_COMPILE),)
111 CROSS_COMPILE := $(call cc-cross-prefix, $(UTS_MACHINE)-linux- $(UTS_MACHINE)-linux-gnu- $(UTS_MACHINE)-unknown-linux-gnu-)
112 endif
113endif
114
115ifdef CONFIG_CPU_LITTLE_ENDIAN 115ifdef CONFIG_CPU_LITTLE_ENDIAN
116ld-bfd := elf32-$(UTS_MACHINE)-linux 116ld-bfd := elf32-$(UTS_MACHINE)-linux
117LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64' --oformat $(ld-bfd) 117LDFLAGS_vmlinux += --defsym jiffies=jiffies_64 --oformat $(ld-bfd)
118LDFLAGS += -EL 118LDFLAGS += -EL
119else 119else
120ld-bfd := elf32-$(UTS_MACHINE)big-linux 120ld-bfd := elf32-$(UTS_MACHINE)big-linux
121LDFLAGS_vmlinux += --defsym 'jiffies=jiffies_64+4' --oformat $(ld-bfd) 121LDFLAGS_vmlinux += --defsym jiffies=jiffies_64+4 --oformat $(ld-bfd)
122LDFLAGS += -EB 122LDFLAGS += -EB
123endif 123endif
124 124
diff --git a/arch/sh/boards/mach-migor/setup.c b/arch/sh/boards/mach-migor/setup.c
index 34cd0c5ff2e1..a8a1ca741c85 100644
--- a/arch/sh/boards/mach-migor/setup.c
+++ b/arch/sh/boards/mach-migor/setup.c
@@ -188,7 +188,6 @@ static struct platform_nand_data migor_nand_flash_data = {
188 .partitions = migor_nand_flash_partitions, 188 .partitions = migor_nand_flash_partitions,
189 .nr_partitions = ARRAY_SIZE(migor_nand_flash_partitions), 189 .nr_partitions = ARRAY_SIZE(migor_nand_flash_partitions),
190 .chip_delay = 20, 190 .chip_delay = 20,
191 .part_probe_types = (const char *[]) { "cmdlinepart", NULL },
192 }, 191 },
193 .ctrl = { 192 .ctrl = {
194 .dev_ready = migor_nand_flash_ready, 193 .dev_ready = migor_nand_flash_ready,
diff --git a/arch/sh/include/asm/Kbuild b/arch/sh/include/asm/Kbuild
index 7beb42322f60..7b673ddcd555 100644
--- a/arch/sh/include/asm/Kbuild
+++ b/arch/sh/include/asm/Kbuild
@@ -1,5 +1,39 @@
1include include/asm-generic/Kbuild.asm 1include include/asm-generic/Kbuild.asm
2 2
3generic-y += bitsperlong.h
4generic-y += cputime.h
5generic-y += current.h
6generic-y += delay.h
7generic-y += div64.h
8generic-y += emergency-restart.h
9generic-y += errno.h
10generic-y += fcntl.h
11generic-y += ioctl.h
12generic-y += ipcbuf.h
13generic-y += irq_regs.h
14generic-y += kvm_para.h
15generic-y += local.h
16generic-y += local64.h
17generic-y += param.h
18generic-y += parport.h
19generic-y += percpu.h
20generic-y += poll.h
21generic-y += mman.h
22generic-y += msgbuf.h
23generic-y += resource.h
24generic-y += scatterlist.h
25generic-y += sembuf.h
26generic-y += serial.h
27generic-y += shmbuf.h
28generic-y += siginfo.h
29generic-y += sizes.h
30generic-y += socket.h
31generic-y += statfs.h
32generic-y += termbits.h
33generic-y += termios.h
34generic-y += ucontext.h
35generic-y += xor.h
36
3header-y += cachectl.h 37header-y += cachectl.h
4header-y += cpu-features.h 38header-y += cpu-features.h
5header-y += hw_breakpoint.h 39header-y += hw_breakpoint.h
diff --git a/arch/sh/include/asm/bitsperlong.h b/arch/sh/include/asm/bitsperlong.h
deleted file mode 100644
index 6dc0bb0c13b2..000000000000
--- a/arch/sh/include/asm/bitsperlong.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/bitsperlong.h>
diff --git a/arch/sh/include/asm/cputime.h b/arch/sh/include/asm/cputime.h
deleted file mode 100644
index 6ca395d1393e..000000000000
--- a/arch/sh/include/asm/cputime.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __SH_CPUTIME_H
2#define __SH_CPUTIME_H
3
4#include <asm-generic/cputime.h>
5
6#endif /* __SH_CPUTIME_H */
diff --git a/arch/sh/include/asm/current.h b/arch/sh/include/asm/current.h
deleted file mode 100644
index 4c51401b5537..000000000000
--- a/arch/sh/include/asm/current.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/current.h>
diff --git a/arch/sh/include/asm/delay.h b/arch/sh/include/asm/delay.h
deleted file mode 100644
index 9670e127b7b2..000000000000
--- a/arch/sh/include/asm/delay.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/delay.h>
diff --git a/arch/sh/include/asm/div64.h b/arch/sh/include/asm/div64.h
deleted file mode 100644
index 6cd978cefb28..000000000000
--- a/arch/sh/include/asm/div64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/div64.h>
diff --git a/arch/sh/include/asm/emergency-restart.h b/arch/sh/include/asm/emergency-restart.h
deleted file mode 100644
index 108d8c48e42e..000000000000
--- a/arch/sh/include/asm/emergency-restart.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef _ASM_EMERGENCY_RESTART_H
2#define _ASM_EMERGENCY_RESTART_H
3
4#include <asm-generic/emergency-restart.h>
5
6#endif /* _ASM_EMERGENCY_RESTART_H */
diff --git a/arch/sh/include/asm/errno.h b/arch/sh/include/asm/errno.h
deleted file mode 100644
index 51cf6f9cebb8..000000000000
--- a/arch/sh/include/asm/errno.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH_ERRNO_H
2#define __ASM_SH_ERRNO_H
3
4#include <asm-generic/errno.h>
5
6#endif /* __ASM_SH_ERRNO_H */
diff --git a/arch/sh/include/asm/fcntl.h b/arch/sh/include/asm/fcntl.h
deleted file mode 100644
index 46ab12db5739..000000000000
--- a/arch/sh/include/asm/fcntl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/fcntl.h>
diff --git a/arch/sh/include/asm/ioctl.h b/arch/sh/include/asm/ioctl.h
deleted file mode 100644
index b279fe06dfe5..000000000000
--- a/arch/sh/include/asm/ioctl.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ioctl.h>
diff --git a/arch/sh/include/asm/ipcbuf.h b/arch/sh/include/asm/ipcbuf.h
deleted file mode 100644
index 84c7e51cb6d0..000000000000
--- a/arch/sh/include/asm/ipcbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ipcbuf.h>
diff --git a/arch/sh/include/asm/irq_regs.h b/arch/sh/include/asm/irq_regs.h
deleted file mode 100644
index 3dd9c0b70270..000000000000
--- a/arch/sh/include/asm/irq_regs.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/irq_regs.h>
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h
deleted file mode 100644
index 14fab8f0b957..000000000000
--- a/arch/sh/include/asm/kvm_para.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/kvm_para.h>
diff --git a/arch/sh/include/asm/local.h b/arch/sh/include/asm/local.h
deleted file mode 100644
index 9ed9b9cb459a..000000000000
--- a/arch/sh/include/asm/local.h
+++ /dev/null
@@ -1,7 +0,0 @@
1#ifndef __ASM_SH_LOCAL_H
2#define __ASM_SH_LOCAL_H
3
4#include <asm-generic/local.h>
5
6#endif /* __ASM_SH_LOCAL_H */
7
diff --git a/arch/sh/include/asm/local64.h b/arch/sh/include/asm/local64.h
deleted file mode 100644
index 36c93b5cc239..000000000000
--- a/arch/sh/include/asm/local64.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/local64.h>
diff --git a/arch/sh/include/asm/mman.h b/arch/sh/include/asm/mman.h
deleted file mode 100644
index 8eebf89f5ab1..000000000000
--- a/arch/sh/include/asm/mman.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/mman.h>
diff --git a/arch/sh/include/asm/msgbuf.h b/arch/sh/include/asm/msgbuf.h
deleted file mode 100644
index 809134c644a6..000000000000
--- a/arch/sh/include/asm/msgbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/msgbuf.h>
diff --git a/arch/sh/include/asm/param.h b/arch/sh/include/asm/param.h
deleted file mode 100644
index 965d45427975..000000000000
--- a/arch/sh/include/asm/param.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/param.h>
diff --git a/arch/sh/include/asm/parport.h b/arch/sh/include/asm/parport.h
deleted file mode 100644
index cf252af64590..000000000000
--- a/arch/sh/include/asm/parport.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/parport.h>
diff --git a/arch/sh/include/asm/percpu.h b/arch/sh/include/asm/percpu.h
deleted file mode 100644
index 4db4b39a4399..000000000000
--- a/arch/sh/include/asm/percpu.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ARCH_SH_PERCPU
2#define __ARCH_SH_PERCPU
3
4#include <asm-generic/percpu.h>
5
6#endif /* __ARCH_SH_PERCPU */
diff --git a/arch/sh/include/asm/poll.h b/arch/sh/include/asm/poll.h
deleted file mode 100644
index c98509d3149e..000000000000
--- a/arch/sh/include/asm/poll.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/poll.h>
diff --git a/arch/sh/include/asm/posix_types_32.h b/arch/sh/include/asm/posix_types_32.h
index abda58467ece..ba0bdc423b07 100644
--- a/arch/sh/include/asm/posix_types_32.h
+++ b/arch/sh/include/asm/posix_types_32.h
@@ -3,8 +3,6 @@
3 3
4typedef unsigned short __kernel_mode_t; 4typedef unsigned short __kernel_mode_t;
5#define __kernel_mode_t __kernel_mode_t 5#define __kernel_mode_t __kernel_mode_t
6typedef unsigned short __kernel_nlink_t;
7#define __kernel_nlink_t __kernel_nlink_t
8typedef unsigned short __kernel_ipc_pid_t; 6typedef unsigned short __kernel_ipc_pid_t;
9#define __kernel_ipc_pid_t __kernel_ipc_pid_t 7#define __kernel_ipc_pid_t __kernel_ipc_pid_t
10typedef unsigned short __kernel_uid_t; 8typedef unsigned short __kernel_uid_t;
diff --git a/arch/sh/include/asm/posix_types_64.h b/arch/sh/include/asm/posix_types_64.h
index fcda07b4a616..244f7e950e17 100644
--- a/arch/sh/include/asm/posix_types_64.h
+++ b/arch/sh/include/asm/posix_types_64.h
@@ -3,8 +3,6 @@
3 3
4typedef unsigned short __kernel_mode_t; 4typedef unsigned short __kernel_mode_t;
5#define __kernel_mode_t __kernel_mode_t 5#define __kernel_mode_t __kernel_mode_t
6typedef unsigned short __kernel_nlink_t;
7#define __kernel_nlink_t __kernel_nlink_t
8typedef unsigned short __kernel_ipc_pid_t; 6typedef unsigned short __kernel_ipc_pid_t;
9#define __kernel_ipc_pid_t __kernel_ipc_pid_t 7#define __kernel_ipc_pid_t __kernel_ipc_pid_t
10typedef unsigned short __kernel_uid_t; 8typedef unsigned short __kernel_uid_t;
diff --git a/arch/sh/include/asm/resource.h b/arch/sh/include/asm/resource.h
deleted file mode 100644
index 9c2499a86ec0..000000000000
--- a/arch/sh/include/asm/resource.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH_RESOURCE_H
2#define __ASM_SH_RESOURCE_H
3
4#include <asm-generic/resource.h>
5
6#endif /* __ASM_SH_RESOURCE_H */
diff --git a/arch/sh/include/asm/scatterlist.h b/arch/sh/include/asm/scatterlist.h
deleted file mode 100644
index 98dfc3510f10..000000000000
--- a/arch/sh/include/asm/scatterlist.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH_SCATTERLIST_H
2#define __ASM_SH_SCATTERLIST_H
3
4#include <asm-generic/scatterlist.h>
5
6#endif /* __ASM_SH_SCATTERLIST_H */
diff --git a/arch/sh/include/asm/sembuf.h b/arch/sh/include/asm/sembuf.h
deleted file mode 100644
index 7673b83cfef7..000000000000
--- a/arch/sh/include/asm/sembuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/sembuf.h>
diff --git a/arch/sh/include/asm/serial.h b/arch/sh/include/asm/serial.h
deleted file mode 100644
index a0cb0caff152..000000000000
--- a/arch/sh/include/asm/serial.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/serial.h>
diff --git a/arch/sh/include/asm/shmbuf.h b/arch/sh/include/asm/shmbuf.h
deleted file mode 100644
index 83c05fc2de38..000000000000
--- a/arch/sh/include/asm/shmbuf.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/shmbuf.h>
diff --git a/arch/sh/include/asm/siginfo.h b/arch/sh/include/asm/siginfo.h
deleted file mode 100644
index 813040ed68a9..000000000000
--- a/arch/sh/include/asm/siginfo.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH_SIGINFO_H
2#define __ASM_SH_SIGINFO_H
3
4#include <asm-generic/siginfo.h>
5
6#endif /* __ASM_SH_SIGINFO_H */
diff --git a/arch/sh/include/asm/sizes.h b/arch/sh/include/asm/sizes.h
deleted file mode 100644
index dd248c2e1085..000000000000
--- a/arch/sh/include/asm/sizes.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/sizes.h>
diff --git a/arch/sh/include/asm/socket.h b/arch/sh/include/asm/socket.h
deleted file mode 100644
index 6b71384b9d8b..000000000000
--- a/arch/sh/include/asm/socket.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/socket.h>
diff --git a/arch/sh/include/asm/statfs.h b/arch/sh/include/asm/statfs.h
deleted file mode 100644
index 9202a023328f..000000000000
--- a/arch/sh/include/asm/statfs.h
+++ /dev/null
@@ -1,6 +0,0 @@
1#ifndef __ASM_SH_STATFS_H
2#define __ASM_SH_STATFS_H
3
4#include <asm-generic/statfs.h>
5
6#endif /* __ASM_SH_STATFS_H */
diff --git a/arch/sh/include/asm/termbits.h b/arch/sh/include/asm/termbits.h
deleted file mode 100644
index 3935b106de79..000000000000
--- a/arch/sh/include/asm/termbits.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/termbits.h>
diff --git a/arch/sh/include/asm/termios.h b/arch/sh/include/asm/termios.h
deleted file mode 100644
index 280d78a9d966..000000000000
--- a/arch/sh/include/asm/termios.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/termios.h>
diff --git a/arch/sh/include/asm/thread_info.h b/arch/sh/include/asm/thread_info.h
index 0c04ffc4f12c..bc13b57cdc83 100644
--- a/arch/sh/include/asm/thread_info.h
+++ b/arch/sh/include/asm/thread_info.h
@@ -169,7 +169,7 @@ static inline void set_restore_sigmask(void)
169{ 169{
170 struct thread_info *ti = current_thread_info(); 170 struct thread_info *ti = current_thread_info();
171 ti->status |= TS_RESTORE_SIGMASK; 171 ti->status |= TS_RESTORE_SIGMASK;
172 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); 172 WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
173} 173}
174 174
175#define TI_FLAG_FAULT_CODE_SHIFT 24 175#define TI_FLAG_FAULT_CODE_SHIFT 24
@@ -189,6 +189,23 @@ static inline unsigned int get_thread_fault_code(void)
189 struct thread_info *ti = current_thread_info(); 189 struct thread_info *ti = current_thread_info();
190 return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT; 190 return ti->flags >> TI_FLAG_FAULT_CODE_SHIFT;
191} 191}
192
193static inline void clear_restore_sigmask(void)
194{
195 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
196}
197static inline bool test_restore_sigmask(void)
198{
199 return current_thread_info()->status & TS_RESTORE_SIGMASK;
200}
201static inline bool test_and_clear_restore_sigmask(void)
202{
203 struct thread_info *ti = current_thread_info();
204 if (!(ti->status & TS_RESTORE_SIGMASK))
205 return false;
206 ti->status &= ~TS_RESTORE_SIGMASK;
207 return true;
208}
192#endif /* !__ASSEMBLY__ */ 209#endif /* !__ASSEMBLY__ */
193 210
194#endif /* __KERNEL__ */ 211#endif /* __KERNEL__ */
diff --git a/arch/sh/include/asm/uaccess.h b/arch/sh/include/asm/uaccess.h
index 050f221fa898..8698a80ed00c 100644
--- a/arch/sh/include/asm/uaccess.h
+++ b/arch/sh/include/asm/uaccess.h
@@ -25,6 +25,8 @@
25 (__chk_user_ptr(addr), \ 25 (__chk_user_ptr(addr), \
26 __access_ok((unsigned long __force)(addr), (size))) 26 __access_ok((unsigned long __force)(addr), (size)))
27 27
28#define user_addr_max() (current_thread_info()->addr_limit.seg)
29
28/* 30/*
29 * Uh, these should become the main single-value transfer routines ... 31 * Uh, these should become the main single-value transfer routines ...
30 * They automatically use the right size if we just have the right 32 * They automatically use the right size if we just have the right
@@ -100,6 +102,11 @@ struct __large_struct { unsigned long buf[100]; };
100# include "uaccess_64.h" 102# include "uaccess_64.h"
101#endif 103#endif
102 104
105extern long strncpy_from_user(char *dest, const char __user *src, long count);
106
107extern __must_check long strlen_user(const char __user *str);
108extern __must_check long strnlen_user(const char __user *str, long n);
109
103/* Generic arbitrary sized copy. */ 110/* Generic arbitrary sized copy. */
104/* Return the number of bytes NOT copied */ 111/* Return the number of bytes NOT copied */
105__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n); 112__kernel_size_t __copy_user(void *to, const void *from, __kernel_size_t n);
@@ -137,37 +144,6 @@ __kernel_size_t __clear_user(void *addr, __kernel_size_t size);
137 __cl_size; \ 144 __cl_size; \
138}) 145})
139 146
140/**
141 * strncpy_from_user: - Copy a NUL terminated string from userspace.
142 * @dst: Destination address, in kernel space. This buffer must be at
143 * least @count bytes long.
144 * @src: Source address, in user space.
145 * @count: Maximum number of bytes to copy, including the trailing NUL.
146 *
147 * Copies a NUL-terminated string from userspace to kernel space.
148 *
149 * On success, returns the length of the string (not including the trailing
150 * NUL).
151 *
152 * If access to userspace fails, returns -EFAULT (some data may have been
153 * copied).
154 *
155 * If @count is smaller than the length of the string, copies @count bytes
156 * and returns @count.
157 */
158#define strncpy_from_user(dest,src,count) \
159({ \
160 unsigned long __sfu_src = (unsigned long)(src); \
161 int __sfu_count = (int)(count); \
162 long __sfu_res = -EFAULT; \
163 \
164 if (__access_ok(__sfu_src, __sfu_count)) \
165 __sfu_res = __strncpy_from_user((unsigned long)(dest), \
166 __sfu_src, __sfu_count); \
167 \
168 __sfu_res; \
169})
170
171static inline unsigned long 147static inline unsigned long
172copy_from_user(void *to, const void __user *from, unsigned long n) 148copy_from_user(void *to, const void __user *from, unsigned long n)
173{ 149{
@@ -192,43 +168,6 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
192 return __copy_size; 168 return __copy_size;
193} 169}
194 170
195/**
196 * strnlen_user: - Get the size of a string in user space.
197 * @s: The string to measure.
198 * @n: The maximum valid length
199 *
200 * Context: User context only. This function may sleep.
201 *
202 * Get the size of a NUL-terminated string in user space.
203 *
204 * Returns the size of the string INCLUDING the terminating NUL.
205 * On exception, returns 0.
206 * If the string is too long, returns a value greater than @n.
207 */
208static inline long strnlen_user(const char __user *s, long n)
209{
210 if (!__addr_ok(s))
211 return 0;
212 else
213 return __strnlen_user(s, n);
214}
215
216/**
217 * strlen_user: - Get the size of a string in user space.
218 * @str: The string to measure.
219 *
220 * Context: User context only. This function may sleep.
221 *
222 * Get the size of a NUL-terminated string in user space.
223 *
224 * Returns the size of the string INCLUDING the terminating NUL.
225 * On exception, returns 0.
226 *
227 * If there is a limit on the length of a valid string, you may wish to
228 * consider using strnlen_user() instead.
229 */
230#define strlen_user(str) strnlen_user(str, ~0UL >> 1)
231
232/* 171/*
233 * The exception table consists of pairs of addresses: the first is the 172 * The exception table consists of pairs of addresses: the first is the
234 * address of an instruction that is allowed to fault, and the second is 173 * address of an instruction that is allowed to fault, and the second is
diff --git a/arch/sh/include/asm/uaccess_32.h b/arch/sh/include/asm/uaccess_32.h
index ae0d24f6653f..c0de7ee35ab7 100644
--- a/arch/sh/include/asm/uaccess_32.h
+++ b/arch/sh/include/asm/uaccess_32.h
@@ -170,79 +170,4 @@ __asm__ __volatile__( \
170 170
171extern void __put_user_unknown(void); 171extern void __put_user_unknown(void);
172 172
173static inline int
174__strncpy_from_user(unsigned long __dest, unsigned long __user __src, int __count)
175{
176 __kernel_size_t res;
177 unsigned long __dummy, _d, _s, _c;
178
179 __asm__ __volatile__(
180 "9:\n"
181 "mov.b @%2+, %1\n\t"
182 "cmp/eq #0, %1\n\t"
183 "bt/s 2f\n"
184 "1:\n"
185 "mov.b %1, @%3\n\t"
186 "dt %4\n\t"
187 "bf/s 9b\n\t"
188 " add #1, %3\n\t"
189 "2:\n\t"
190 "sub %4, %0\n"
191 "3:\n"
192 ".section .fixup,\"ax\"\n"
193 "4:\n\t"
194 "mov.l 5f, %1\n\t"
195 "jmp @%1\n\t"
196 " mov %9, %0\n\t"
197 ".balign 4\n"
198 "5: .long 3b\n"
199 ".previous\n"
200 ".section __ex_table,\"a\"\n"
201 " .balign 4\n"
202 " .long 9b,4b\n"
203 ".previous"
204 : "=r" (res), "=&z" (__dummy), "=r" (_s), "=r" (_d), "=r"(_c)
205 : "0" (__count), "2" (__src), "3" (__dest), "4" (__count),
206 "i" (-EFAULT)
207 : "memory", "t");
208
209 return res;
210}
211
212/*
213 * Return the size of a string (including the ending 0 even when we have
214 * exceeded the maximum string length).
215 */
216static inline long __strnlen_user(const char __user *__s, long __n)
217{
218 unsigned long res;
219 unsigned long __dummy;
220
221 __asm__ __volatile__(
222 "1:\t"
223 "mov.b @(%0,%3), %1\n\t"
224 "cmp/eq %4, %0\n\t"
225 "bt/s 2f\n\t"
226 " add #1, %0\n\t"
227 "tst %1, %1\n\t"
228 "bf 1b\n\t"
229 "2:\n"
230 ".section .fixup,\"ax\"\n"
231 "3:\n\t"
232 "mov.l 4f, %1\n\t"
233 "jmp @%1\n\t"
234 " mov #0, %0\n"
235 ".balign 4\n"
236 "4: .long 2b\n"
237 ".previous\n"
238 ".section __ex_table,\"a\"\n"
239 " .balign 4\n"
240 " .long 1b,3b\n"
241 ".previous"
242 : "=z" (res), "=&r" (__dummy)
243 : "0" (0), "r" (__s), "r" (__n)
244 : "t");
245 return res;
246}
247
248#endif /* __ASM_SH_UACCESS_32_H */ 173#endif /* __ASM_SH_UACCESS_32_H */
diff --git a/arch/sh/include/asm/uaccess_64.h b/arch/sh/include/asm/uaccess_64.h
index 56fd20b8cdcc..2e07e0f40c6a 100644
--- a/arch/sh/include/asm/uaccess_64.h
+++ b/arch/sh/include/asm/uaccess_64.h
@@ -84,8 +84,4 @@ extern long __put_user_asm_l(void *, long);
84extern long __put_user_asm_q(void *, long); 84extern long __put_user_asm_q(void *, long);
85extern void __put_user_unknown(void); 85extern void __put_user_unknown(void);
86 86
87extern long __strnlen_user(const char *__s, long __n);
88extern int __strncpy_from_user(unsigned long __dest,
89 unsigned long __user __src, int __count);
90
91#endif /* __ASM_SH_UACCESS_64_H */ 87#endif /* __ASM_SH_UACCESS_64_H */
diff --git a/arch/sh/include/asm/ucontext.h b/arch/sh/include/asm/ucontext.h
deleted file mode 100644
index 9bc07b9f30fb..000000000000
--- a/arch/sh/include/asm/ucontext.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/ucontext.h>
diff --git a/arch/sh/include/asm/word-at-a-time.h b/arch/sh/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..6e38953ff7fd
--- /dev/null
+++ b/arch/sh/include/asm/word-at-a-time.h
@@ -0,0 +1,53 @@
1#ifndef __ASM_SH_WORD_AT_A_TIME_H
2#define __ASM_SH_WORD_AT_A_TIME_H
3
4#ifdef CONFIG_CPU_BIG_ENDIAN
5# include <asm-generic/word-at-a-time.h>
6#else
7/*
8 * Little-endian version cribbed from x86.
9 */
10struct word_at_a_time {
11 const unsigned long one_bits, high_bits;
12};
13
14#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
15
16/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
17static inline long count_masked_bytes(long mask)
18{
19 /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
20 long a = (0x0ff0001+mask) >> 23;
21 /* Fix the 1 for 00 case */
22 return a & mask;
23}
24
25/* Return nonzero if it has a zero */
26static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
27{
28 unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
29 *bits = mask;
30 return mask;
31}
32
33static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
34{
35 return bits;
36}
37
38static inline unsigned long create_zero_mask(unsigned long bits)
39{
40 bits = (bits - 1) & ~bits;
41 return bits >> 7;
42}
43
44/* The mask we created is directly usable as a bytemask */
45#define zero_bytemask(mask) (mask)
46
47static inline unsigned long find_zero(unsigned long mask)
48{
49 return count_masked_bytes(mask);
50}
51#endif
52
53#endif
diff --git a/arch/sh/include/asm/xor.h b/arch/sh/include/asm/xor.h
deleted file mode 100644
index c82eb12a5b18..000000000000
--- a/arch/sh/include/asm/xor.h
+++ /dev/null
@@ -1 +0,0 @@
1#include <asm-generic/xor.h>
diff --git a/arch/sh/include/cpu-sh2a/cpu/ubc.h b/arch/sh/include/cpu-sh2a/cpu/ubc.h
deleted file mode 100644
index 1192e1c761a7..000000000000
--- a/arch/sh/include/cpu-sh2a/cpu/ubc.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/*
2 * SH-2A UBC definitions
3 *
4 * Copyright (C) 2008 Kieran Bingham
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10
11#ifndef __ASM_CPU_SH2A_UBC_H
12#define __ASM_CPU_SH2A_UBC_H
13
14#define UBC_BARA 0xfffc0400
15#define UBC_BAMRA 0xfffc0404
16#define UBC_BBRA 0xfffc04a0 /* 16 bit access */
17#define UBC_BDRA 0xfffc0408
18#define UBC_BDMRA 0xfffc040c
19
20#define UBC_BARB 0xfffc0410
21#define UBC_BAMRB 0xfffc0414
22#define UBC_BBRB 0xfffc04b0 /* 16 bit access */
23#define UBC_BDRB 0xfffc0418
24#define UBC_BDMRB 0xfffc041c
25
26#define UBC_BRCR 0xfffc04c0
27
28#endif /* __ASM_CPU_SH2A_UBC_H */
diff --git a/arch/sh/kernel/cpu/sh5/entry.S b/arch/sh/kernel/cpu/sh5/entry.S
index ff1f0e6e9bec..b7cf6a547f11 100644
--- a/arch/sh/kernel/cpu/sh5/entry.S
+++ b/arch/sh/kernel/cpu/sh5/entry.S
@@ -1569,86 +1569,6 @@ ___clear_user_exit:
1569#endif /* CONFIG_MMU */ 1569#endif /* CONFIG_MMU */
1570 1570
1571/* 1571/*
1572 * int __strncpy_from_user(unsigned long __dest, unsigned long __src,
1573 * int __count)
1574 *
1575 * Inputs:
1576 * (r2) target address
1577 * (r3) source address
1578 * (r4) maximum size in bytes
1579 *
1580 * Ouputs:
1581 * (*r2) copied data
1582 * (r2) -EFAULT (in case of faulting)
1583 * copied data (otherwise)
1584 */
1585 .global __strncpy_from_user
1586__strncpy_from_user:
1587 pta ___strncpy_from_user1, tr0
1588 pta ___strncpy_from_user_done, tr1
1589 or r4, ZERO, r5 /* r5 = original count */
1590 beq/u r4, r63, tr1 /* early exit if r4==0 */
1591 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1592 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1593
1594___strncpy_from_user1:
1595 ld.b r3, 0, r7 /* Fault address: only in reading */
1596 st.b r2, 0, r7
1597 addi r2, 1, r2
1598 addi r3, 1, r3
1599 beq/u ZERO, r7, tr1
1600 addi r4, -1, r4 /* return real number of copied bytes */
1601 bne/l ZERO, r4, tr0
1602
1603___strncpy_from_user_done:
1604 sub r5, r4, r6 /* If done, return copied */
1605
1606___strncpy_from_user_exit:
1607 or r6, ZERO, r2
1608 ptabs LINK, tr0
1609 blink tr0, ZERO
1610
1611/*
1612 * extern long __strnlen_user(const char *__s, long __n)
1613 *
1614 * Inputs:
1615 * (r2) source address
1616 * (r3) source size in bytes
1617 *
1618 * Ouputs:
1619 * (r2) -EFAULT (in case of faulting)
1620 * string length (otherwise)
1621 */
1622 .global __strnlen_user
1623__strnlen_user:
1624 pta ___strnlen_user_set_reply, tr0
1625 pta ___strnlen_user1, tr1
1626 or ZERO, ZERO, r5 /* r5 = counter */
1627 movi -(EFAULT), r6 /* r6 = reply, no real fixup */
1628 or ZERO, ZERO, r7 /* r7 = data, clear top byte of data */
1629 beq r3, ZERO, tr0
1630
1631___strnlen_user1:
1632 ldx.b r2, r5, r7 /* Fault address: only in reading */
1633 addi r3, -1, r3 /* No real fixup */
1634 addi r5, 1, r5
1635 beq r3, ZERO, tr0
1636 bne r7, ZERO, tr1
1637! The line below used to be active. This meant led to a junk byte lying between each pair
1638! of entries in the argv & envp structures in memory. Whilst the program saw the right data
1639! via the argv and envp arguments to main, it meant the 'flat' representation visible through
1640! /proc/$pid/cmdline was corrupt, causing trouble with ps, for example.
1641! addi r5, 1, r5 /* Include '\0' */
1642
1643___strnlen_user_set_reply:
1644 or r5, ZERO, r6 /* If done, return counter */
1645
1646___strnlen_user_exit:
1647 or r6, ZERO, r2
1648 ptabs LINK, tr0
1649 blink tr0, ZERO
1650
1651/*
1652 * extern long __get_user_asm_?(void *val, long addr) 1572 * extern long __get_user_asm_?(void *val, long addr)
1653 * 1573 *
1654 * Inputs: 1574 * Inputs:
@@ -1982,8 +1902,6 @@ asm_uaccess_start:
1982 .long ___copy_user2, ___copy_user_exit 1902 .long ___copy_user2, ___copy_user_exit
1983 .long ___clear_user1, ___clear_user_exit 1903 .long ___clear_user1, ___clear_user_exit
1984#endif 1904#endif
1985 .long ___strncpy_from_user1, ___strncpy_from_user_exit
1986 .long ___strnlen_user1, ___strnlen_user_exit
1987 .long ___get_user_asm_b1, ___get_user_asm_b_exit 1905 .long ___get_user_asm_b1, ___get_user_asm_b_exit
1988 .long ___get_user_asm_w1, ___get_user_asm_w_exit 1906 .long ___get_user_asm_w1, ___get_user_asm_w_exit
1989 .long ___get_user_asm_l1, ___get_user_asm_l_exit 1907 .long ___get_user_asm_l1, ___get_user_asm_l_exit
diff --git a/arch/sh/kernel/process.c b/arch/sh/kernel/process.c
index 9b7a459a4613..055d91b70305 100644
--- a/arch/sh/kernel/process.c
+++ b/arch/sh/kernel/process.c
@@ -4,6 +4,7 @@
4#include <linux/sched.h> 4#include <linux/sched.h>
5#include <linux/export.h> 5#include <linux/export.h>
6#include <linux/stackprotector.h> 6#include <linux/stackprotector.h>
7#include <asm/fpu.h>
7 8
8struct kmem_cache *task_xstate_cachep = NULL; 9struct kmem_cache *task_xstate_cachep = NULL;
9unsigned int xstate_size; 10unsigned int xstate_size;
diff --git a/arch/sh/kernel/process_64.c b/arch/sh/kernel/process_64.c
index 4264583eabac..602545b12a86 100644
--- a/arch/sh/kernel/process_64.c
+++ b/arch/sh/kernel/process_64.c
@@ -33,6 +33,7 @@
33#include <asm/switch_to.h> 33#include <asm/switch_to.h>
34 34
35struct task_struct *last_task_used_math = NULL; 35struct task_struct *last_task_used_math = NULL;
36struct pt_regs fake_swapper_regs = { 0, };
36 37
37void show_regs(struct pt_regs *regs) 38void show_regs(struct pt_regs *regs)
38{ 39{
diff --git a/arch/sh/kernel/sh_ksyms_64.c b/arch/sh/kernel/sh_ksyms_64.c
index 45afa5c51f67..26a0774f5272 100644
--- a/arch/sh/kernel/sh_ksyms_64.c
+++ b/arch/sh/kernel/sh_ksyms_64.c
@@ -32,8 +32,6 @@ EXPORT_SYMBOL(__get_user_asm_b);
32EXPORT_SYMBOL(__get_user_asm_w); 32EXPORT_SYMBOL(__get_user_asm_w);
33EXPORT_SYMBOL(__get_user_asm_l); 33EXPORT_SYMBOL(__get_user_asm_l);
34EXPORT_SYMBOL(__get_user_asm_q); 34EXPORT_SYMBOL(__get_user_asm_q);
35EXPORT_SYMBOL(__strnlen_user);
36EXPORT_SYMBOL(__strncpy_from_user);
37EXPORT_SYMBOL(__clear_user); 35EXPORT_SYMBOL(__clear_user);
38EXPORT_SYMBOL(copy_page); 36EXPORT_SYMBOL(copy_page);
39EXPORT_SYMBOL(__copy_user); 37EXPORT_SYMBOL(__copy_user);
diff --git a/arch/sh/kernel/signal_32.c b/arch/sh/kernel/signal_32.c
index cb4172c8af7d..d6b7b6154f87 100644
--- a/arch/sh/kernel/signal_32.c
+++ b/arch/sh/kernel/signal_32.c
@@ -32,8 +32,6 @@
32#include <asm/syscalls.h> 32#include <asm/syscalls.h>
33#include <asm/fpu.h> 33#include <asm/fpu.h>
34 34
35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
36
37struct fdpic_func_descriptor { 35struct fdpic_func_descriptor {
38 unsigned long text; 36 unsigned long text;
39 unsigned long GOT; 37 unsigned long GOT;
@@ -226,7 +224,6 @@ asmlinkage int sys_sigreturn(unsigned long r4, unsigned long r5,
226 sizeof(frame->extramask)))) 224 sizeof(frame->extramask))))
227 goto badframe; 225 goto badframe;
228 226
229 sigdelsetmask(&set, ~_BLOCKABLE);
230 set_current_blocked(&set); 227 set_current_blocked(&set);
231 228
232 if (restore_sigcontext(regs, &frame->sc, &r0)) 229 if (restore_sigcontext(regs, &frame->sc, &r0))
@@ -256,7 +253,6 @@ asmlinkage int sys_rt_sigreturn(unsigned long r4, unsigned long r5,
256 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 253 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
257 goto badframe; 254 goto badframe;
258 255
259 sigdelsetmask(&set, ~_BLOCKABLE);
260 set_current_blocked(&set); 256 set_current_blocked(&set);
261 257
262 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0)) 258 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &r0))
@@ -522,10 +518,11 @@ handle_syscall_restart(unsigned long save_r0, struct pt_regs *regs,
522/* 518/*
523 * OK, we're invoking a handler 519 * OK, we're invoking a handler
524 */ 520 */
525static int 521static void
526handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info, 522handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
527 sigset_t *oldset, struct pt_regs *regs, unsigned int save_r0) 523 struct pt_regs *regs, unsigned int save_r0)
528{ 524{
525 sigset_t *oldset = sigmask_to_save();
529 int ret; 526 int ret;
530 527
531 /* Set up the stack frame */ 528 /* Set up the stack frame */
@@ -534,10 +531,10 @@ handle_signal(unsigned long sig, struct k_sigaction *ka, siginfo_t *info,
534 else 531 else
535 ret = setup_frame(sig, ka, oldset, regs); 532 ret = setup_frame(sig, ka, oldset, regs);
536 533
537 if (ret == 0) 534 if (ret)
538 block_sigmask(ka, sig); 535 return;
539 536 signal_delivered(sig, info, ka, regs,
540 return ret; 537 test_thread_flag(TIF_SINGLESTEP));
541} 538}
542 539
543/* 540/*
@@ -554,7 +551,6 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
554 siginfo_t info; 551 siginfo_t info;
555 int signr; 552 int signr;
556 struct k_sigaction ka; 553 struct k_sigaction ka;
557 sigset_t *oldset;
558 554
559 /* 555 /*
560 * We want the common case to go fast, which 556 * We want the common case to go fast, which
@@ -565,30 +561,12 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
565 if (!user_mode(regs)) 561 if (!user_mode(regs))
566 return; 562 return;
567 563
568 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
569 oldset = &current->saved_sigmask;
570 else
571 oldset = &current->blocked;
572
573 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 564 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
574 if (signr > 0) { 565 if (signr > 0) {
575 handle_syscall_restart(save_r0, regs, &ka.sa); 566 handle_syscall_restart(save_r0, regs, &ka.sa);
576 567
577 /* Whee! Actually deliver the signal. */ 568 /* Whee! Actually deliver the signal. */
578 if (handle_signal(signr, &ka, &info, oldset, 569 handle_signal(signr, &ka, &info, regs, save_r0);
579 regs, save_r0) == 0) {
580 /*
581 * A signal was successfully delivered; the saved
582 * sigmask will have been stored in the signal frame,
583 * and will be restored by sigreturn, so we can simply
584 * clear the TS_RESTORE_SIGMASK flag
585 */
586 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
587
588 tracehook_signal_handler(signr, &info, &ka, regs,
589 test_thread_flag(TIF_SINGLESTEP));
590 }
591
592 return; 570 return;
593 } 571 }
594 572
@@ -610,10 +588,7 @@ static void do_signal(struct pt_regs *regs, unsigned int save_r0)
610 * If there's no signal to deliver, we just put the saved sigmask 588 * If there's no signal to deliver, we just put the saved sigmask
611 * back. 589 * back.
612 */ 590 */
613 if (current_thread_info()->status & TS_RESTORE_SIGMASK) { 591 restore_saved_sigmask();
614 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
615 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
616 }
617} 592}
618 593
619asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0, 594asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
@@ -626,7 +601,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned int save_r0,
626 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 601 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
627 clear_thread_flag(TIF_NOTIFY_RESUME); 602 clear_thread_flag(TIF_NOTIFY_RESUME);
628 tracehook_notify_resume(regs); 603 tracehook_notify_resume(regs);
629 if (current->replacement_session_keyring)
630 key_replace_session_keyring();
631 } 604 }
632} 605}
diff --git a/arch/sh/kernel/signal_64.c b/arch/sh/kernel/signal_64.c
index b589a354c069..6b5b3dfe886b 100644
--- a/arch/sh/kernel/signal_64.c
+++ b/arch/sh/kernel/signal_64.c
@@ -41,11 +41,9 @@
41 41
42#define DEBUG_SIG 0 42#define DEBUG_SIG 0
43 43
44#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) 44static void
45
46static int
47handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 45handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
48 sigset_t *oldset, struct pt_regs * regs); 46 struct pt_regs * regs);
49 47
50static inline void 48static inline void
51handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa) 49handle_syscall_restart(struct pt_regs *regs, struct sigaction *sa)
@@ -88,7 +86,6 @@ static void do_signal(struct pt_regs *regs)
88 siginfo_t info; 86 siginfo_t info;
89 int signr; 87 int signr;
90 struct k_sigaction ka; 88 struct k_sigaction ka;
91 sigset_t *oldset;
92 89
93 /* 90 /*
94 * We want the common case to go fast, which 91 * We want the common case to go fast, which
@@ -99,28 +96,13 @@ static void do_signal(struct pt_regs *regs)
99 if (!user_mode(regs)) 96 if (!user_mode(regs))
100 return; 97 return;
101 98
102 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
103 oldset = &current->saved_sigmask;
104 else
105 oldset = &current->blocked;
106
107 signr = get_signal_to_deliver(&info, &ka, regs, 0); 99 signr = get_signal_to_deliver(&info, &ka, regs, 0);
108 if (signr > 0) { 100 if (signr > 0) {
109 handle_syscall_restart(regs, &ka.sa); 101 handle_syscall_restart(regs, &ka.sa);
110 102
111 /* Whee! Actually deliver the signal. */ 103 /* Whee! Actually deliver the signal. */
112 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 104 handle_signal(signr, &info, &ka, regs);
113 /* 105 return;
114 * If a signal was successfully delivered, the
115 * saved sigmask is in its frame, and we can
116 * clear the TS_RESTORE_SIGMASK flag.
117 */
118 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
119
120 tracehook_signal_handler(signr, &info, &ka, regs,
121 test_thread_flag(TIF_SINGLESTEP));
122 return;
123 }
124 } 106 }
125 107
126 /* Did we come from a system call? */ 108 /* Did we come from a system call? */
@@ -143,12 +125,7 @@ static void do_signal(struct pt_regs *regs)
143 } 125 }
144 126
145 /* No signal to deliver -- put the saved sigmask back */ 127 /* No signal to deliver -- put the saved sigmask back */
146 if (current_thread_info()->status & TS_RESTORE_SIGMASK) { 128 restore_saved_sigmask();
147 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
148 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
149 }
150
151 return;
152} 129}
153 130
154/* 131/*
@@ -351,7 +328,6 @@ asmlinkage int sys_sigreturn(unsigned long r2, unsigned long r3,
351 sizeof(frame->extramask)))) 328 sizeof(frame->extramask))))
352 goto badframe; 329 goto badframe;
353 330
354 sigdelsetmask(&set, ~_BLOCKABLE);
355 set_current_blocked(&set); 331 set_current_blocked(&set);
356 332
357 if (restore_sigcontext(regs, &frame->sc, &ret)) 333 if (restore_sigcontext(regs, &frame->sc, &ret))
@@ -384,7 +360,6 @@ asmlinkage int sys_rt_sigreturn(unsigned long r2, unsigned long r3,
384 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 360 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
385 goto badframe; 361 goto badframe;
386 362
387 sigdelsetmask(&set, ~_BLOCKABLE);
388 set_current_blocked(&set); 363 set_current_blocked(&set);
389 364
390 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret)) 365 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ret))
@@ -659,10 +634,11 @@ give_sigsegv:
659/* 634/*
660 * OK, we're invoking a handler 635 * OK, we're invoking a handler
661 */ 636 */
662static int 637static void
663handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 638handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
664 sigset_t *oldset, struct pt_regs * regs) 639 struct pt_regs * regs)
665{ 640{
641 sigset_t *oldset = sigmask_to_save();
666 int ret; 642 int ret;
667 643
668 /* Set up the stack frame */ 644 /* Set up the stack frame */
@@ -671,10 +647,11 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
671 else 647 else
672 ret = setup_frame(sig, ka, oldset, regs); 648 ret = setup_frame(sig, ka, oldset, regs);
673 649
674 if (ret == 0) 650 if (ret)
675 block_sigmask(ka, sig); 651 return;
676 652
677 return ret; 653 signal_delivered(sig, info, ka, regs,
654 test_thread_flag(TIF_SINGLESTEP));
678} 655}
679 656
680asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) 657asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags)
@@ -685,7 +662,5 @@ asmlinkage void do_notify_resume(struct pt_regs *regs, unsigned long thread_info
685 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 662 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
686 clear_thread_flag(TIF_NOTIFY_RESUME); 663 clear_thread_flag(TIF_NOTIFY_RESUME);
687 tracehook_notify_resume(regs); 664 tracehook_notify_resume(regs);
688 if (current->replacement_session_keyring)
689 key_replace_session_keyring();
690 } 665 }
691} 666}
diff --git a/arch/sh/kernel/smp.c b/arch/sh/kernel/smp.c
index b86e9ca79455..2062aa88af41 100644
--- a/arch/sh/kernel/smp.c
+++ b/arch/sh/kernel/smp.c
@@ -123,7 +123,6 @@ void native_play_dead(void)
123int __cpu_disable(void) 123int __cpu_disable(void)
124{ 124{
125 unsigned int cpu = smp_processor_id(); 125 unsigned int cpu = smp_processor_id();
126 struct task_struct *p;
127 int ret; 126 int ret;
128 127
129 ret = mp_ops->cpu_disable(cpu); 128 ret = mp_ops->cpu_disable(cpu);
@@ -153,11 +152,7 @@ int __cpu_disable(void)
153 flush_cache_all(); 152 flush_cache_all();
154 local_flush_tlb_all(); 153 local_flush_tlb_all();
155 154
156 read_lock(&tasklist_lock); 155 clear_tasks_mm_cpumask(cpu);
157 for_each_process(p)
158 if (p->mm)
159 cpumask_clear_cpu(cpu, mm_cpumask(p->mm));
160 read_unlock(&tasklist_lock);
161 156
162 return 0; 157 return 0;
163} 158}
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 83bd051754e1..e74ff1377626 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -41,7 +41,6 @@ config SPARC32
41 def_bool !64BIT 41 def_bool !64BIT
42 select GENERIC_ATOMIC64 42 select GENERIC_ATOMIC64
43 select CLZ_TAB 43 select CLZ_TAB
44 select ARCH_USES_GETTIMEOFFSET
45 44
46config SPARC64 45config SPARC64
47 def_bool 64BIT 46 def_bool 64BIT
diff --git a/arch/sparc/include/asm/asi.h b/arch/sparc/include/asm/asi.h
index cbb93e5141de..61ebe7411ceb 100644
--- a/arch/sparc/include/asm/asi.h
+++ b/arch/sparc/include/asm/asi.h
@@ -40,11 +40,7 @@
40#define ASI_M_UNA01 0x01 /* Same here... */ 40#define ASI_M_UNA01 0x01 /* Same here... */
41#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */ 41#define ASI_M_MXCC 0x02 /* Access to TI VIKING MXCC registers */
42#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */ 42#define ASI_M_FLUSH_PROBE 0x03 /* Reference MMU Flush/Probe; rw, ss */
43#ifndef CONFIG_SPARC_LEON
44#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */ 43#define ASI_M_MMUREGS 0x04 /* MMU Registers; rw, ss */
45#else
46#define ASI_M_MMUREGS 0x19
47#endif /* CONFIG_SPARC_LEON */
48#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */ 44#define ASI_M_TLBDIAG 0x05 /* MMU TLB only Diagnostics */
49#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */ 45#define ASI_M_DIAGS 0x06 /* Reference MMU Diagnostics */
50#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */ 46#define ASI_M_IODIAG 0x07 /* MMU I/O TLB only Diagnostics */
diff --git a/arch/sparc/include/asm/asmmacro.h b/arch/sparc/include/asm/asmmacro.h
index 02a172fb193a..a0e28ef02558 100644
--- a/arch/sparc/include/asm/asmmacro.h
+++ b/arch/sparc/include/asm/asmmacro.h
@@ -20,4 +20,26 @@
20/* All traps low-level code here must end with this macro. */ 20/* All traps low-level code here must end with this macro. */
21#define RESTORE_ALL b ret_trap_entry; clr %l6; 21#define RESTORE_ALL b ret_trap_entry; clr %l6;
22 22
23/* Support for run-time patching of single instructions.
24 * This is used to handle the differences in the ASI for
25 * MMUREGS for LEON and SUN.
26 *
27 * Sample:
28 * LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0
29 * SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0
30 * PI == Patch Instruction
31 *
32 * For LEON we will use the first variant,
33 * and for all other we will use the SUN variant.
34 * The order is important.
35 */
36#define LEON_PI(...) \
37662: __VA_ARGS__
38
39#define SUN_PI_(...) \
40 .section .leon_1insn_patch, "ax"; \
41 .word 662b; \
42 __VA_ARGS__; \
43 .previous
44
23#endif /* !(_SPARC_ASMMACRO_H) */ 45#endif /* !(_SPARC_ASMMACRO_H) */
diff --git a/arch/sparc/include/asm/cmt.h b/arch/sparc/include/asm/cmt.h
deleted file mode 100644
index 870db5928577..000000000000
--- a/arch/sparc/include/asm/cmt.h
+++ /dev/null
@@ -1,59 +0,0 @@
1#ifndef _SPARC64_CMT_H
2#define _SPARC64_CMT_H
3
4/* cmt.h: Chip Multi-Threading register definitions
5 *
6 * Copyright (C) 2004 David S. Miller (davem@redhat.com)
7 */
8
9/* ASI_CORE_ID - private */
10#define LP_ID 0x0000000000000010UL
11#define LP_ID_MAX 0x00000000003f0000UL
12#define LP_ID_ID 0x000000000000003fUL
13
14/* ASI_INTR_ID - private */
15#define LP_INTR_ID 0x0000000000000000UL
16#define LP_INTR_ID_ID 0x00000000000003ffUL
17
18/* ASI_CESR_ID - private */
19#define CESR_ID 0x0000000000000040UL
20#define CESR_ID_ID 0x00000000000000ffUL
21
22/* ASI_CORE_AVAILABLE - shared */
23#define LP_AVAIL 0x0000000000000000UL
24#define LP_AVAIL_1 0x0000000000000002UL
25#define LP_AVAIL_0 0x0000000000000001UL
26
27/* ASI_CORE_ENABLE_STATUS - shared */
28#define LP_ENAB_STAT 0x0000000000000010UL
29#define LP_ENAB_STAT_1 0x0000000000000002UL
30#define LP_ENAB_STAT_0 0x0000000000000001UL
31
32/* ASI_CORE_ENABLE - shared */
33#define LP_ENAB 0x0000000000000020UL
34#define LP_ENAB_1 0x0000000000000002UL
35#define LP_ENAB_0 0x0000000000000001UL
36
37/* ASI_CORE_RUNNING - shared */
38#define LP_RUNNING_RW 0x0000000000000050UL
39#define LP_RUNNING_W1S 0x0000000000000060UL
40#define LP_RUNNING_W1C 0x0000000000000068UL
41#define LP_RUNNING_1 0x0000000000000002UL
42#define LP_RUNNING_0 0x0000000000000001UL
43
44/* ASI_CORE_RUNNING_STAT - shared */
45#define LP_RUN_STAT 0x0000000000000058UL
46#define LP_RUN_STAT_1 0x0000000000000002UL
47#define LP_RUN_STAT_0 0x0000000000000001UL
48
49/* ASI_XIR_STEERING - shared */
50#define LP_XIR_STEER 0x0000000000000030UL
51#define LP_XIR_STEER_1 0x0000000000000002UL
52#define LP_XIR_STEER_0 0x0000000000000001UL
53
54/* ASI_CMT_ERROR_STEERING - shared */
55#define CMT_ER_STEER 0x0000000000000040UL
56#define CMT_ER_STEER_1 0x0000000000000002UL
57#define CMT_ER_STEER_0 0x0000000000000001UL
58
59#endif /* _SPARC64_CMT_H */
diff --git a/arch/sparc/include/asm/dma-mapping.h b/arch/sparc/include/asm/dma-mapping.h
index 48a7c65731d2..8493fd3c7ba5 100644
--- a/arch/sparc/include/asm/dma-mapping.h
+++ b/arch/sparc/include/asm/dma-mapping.h
@@ -12,13 +12,18 @@ extern int dma_supported(struct device *dev, u64 mask);
12#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) 12#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) 13#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
14 14
15extern struct dma_map_ops *dma_ops, pci32_dma_ops; 15extern struct dma_map_ops *dma_ops;
16extern struct dma_map_ops *leon_dma_ops;
17extern struct dma_map_ops pci32_dma_ops;
18
16extern struct bus_type pci_bus_type; 19extern struct bus_type pci_bus_type;
17 20
18static inline struct dma_map_ops *get_dma_ops(struct device *dev) 21static inline struct dma_map_ops *get_dma_ops(struct device *dev)
19{ 22{
20#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) 23#if defined(CONFIG_SPARC32) && defined(CONFIG_PCI)
21 if (dev->bus == &pci_bus_type) 24 if (sparc_cpu_model == sparc_leon)
25 return leon_dma_ops;
26 else if (dev->bus == &pci_bus_type)
22 return &pci32_dma_ops; 27 return &pci32_dma_ops;
23#endif 28#endif
24 return dma_ops; 29 return dma_ops;
diff --git a/arch/sparc/include/asm/leon.h b/arch/sparc/include/asm/leon.h
index 07659124c140..3375c6293893 100644
--- a/arch/sparc/include/asm/leon.h
+++ b/arch/sparc/include/asm/leon.h
@@ -8,8 +8,6 @@
8#ifndef LEON_H_INCLUDE 8#ifndef LEON_H_INCLUDE
9#define LEON_H_INCLUDE 9#define LEON_H_INCLUDE
10 10
11#ifdef CONFIG_SPARC_LEON
12
13/* mmu register access, ASI_LEON_MMUREGS */ 11/* mmu register access, ASI_LEON_MMUREGS */
14#define LEON_CNR_CTRL 0x000 12#define LEON_CNR_CTRL 0x000
15#define LEON_CNR_CTXP 0x100 13#define LEON_CNR_CTXP 0x100
@@ -62,15 +60,6 @@
62 60
63#ifndef __ASSEMBLY__ 61#ifndef __ASSEMBLY__
64 62
65/* do a virtual address read without cache */
66static inline unsigned long leon_readnobuffer_reg(unsigned long paddr)
67{
68 unsigned long retval;
69 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
70 "=r"(retval) : "r"(paddr), "i"(ASI_LEON_NOCACHE));
71 return retval;
72}
73
74/* do a physical address bypass write, i.e. for 0x80000000 */ 63/* do a physical address bypass write, i.e. for 0x80000000 */
75static inline void leon_store_reg(unsigned long paddr, unsigned long value) 64static inline void leon_store_reg(unsigned long paddr, unsigned long value)
76{ 65{
@@ -87,47 +76,16 @@ static inline unsigned long leon_load_reg(unsigned long paddr)
87 return retval; 76 return retval;
88} 77}
89 78
90static inline void leon_srmmu_disabletlb(void)
91{
92 unsigned int retval;
93 __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
94 "i"(ASI_LEON_MMUREGS));
95 retval |= LEON_CNR_CTRL_TLBDIS;
96 __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
97 "i"(ASI_LEON_MMUREGS) : "memory");
98}
99
100static inline void leon_srmmu_enabletlb(void)
101{
102 unsigned int retval;
103 __asm__ __volatile__("lda [%%g0] %2, %0\n\t" : "=r"(retval) : "r"(0),
104 "i"(ASI_LEON_MMUREGS));
105 retval = retval & ~LEON_CNR_CTRL_TLBDIS;
106 __asm__ __volatile__("sta %0, [%%g0] %2\n\t" : : "r"(retval), "r"(0),
107 "i"(ASI_LEON_MMUREGS) : "memory");
108}
109
110/* macro access for leon_load_reg() and leon_store_reg() */ 79/* macro access for leon_load_reg() and leon_store_reg() */
111#define LEON3_BYPASS_LOAD_PA(x) (leon_load_reg((unsigned long)(x))) 80#define LEON3_BYPASS_LOAD_PA(x) (leon_load_reg((unsigned long)(x)))
112#define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v))) 81#define LEON3_BYPASS_STORE_PA(x, v) (leon_store_reg((unsigned long)(x), (unsigned long)(v)))
113#define LEON3_BYPASS_ANDIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) & v)
114#define LEON3_BYPASS_ORIN_PA(x, v) LEON3_BYPASS_STORE_PA(x, LEON3_BYPASS_LOAD_PA(x) | v)
115#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x)) 82#define LEON_BYPASS_LOAD_PA(x) leon_load_reg((unsigned long)(x))
116#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v)) 83#define LEON_BYPASS_STORE_PA(x, v) leon_store_reg((unsigned long)(x), (unsigned long)(v))
117#define LEON_REGLOAD_PA(x) leon_load_reg((unsigned long)(x)+LEON_PREGS)
118#define LEON_REGSTORE_PA(x, v) leon_store_reg((unsigned long)(x)+LEON_PREGS, (unsigned long)(v))
119#define LEON_REGSTORE_OR_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) | (unsigned long)(v))
120#define LEON_REGSTORE_AND_PA(x, v) LEON_REGSTORE_PA(x, LEON_REGLOAD_PA(x) & (unsigned long)(v))
121
122/* macro access for leon_readnobuffer_reg() */
123#define LEON_BYPASSCACHE_LOAD_VA(x) leon_readnobuffer_reg((unsigned long)(x))
124 84
125extern void leon_init(void); 85extern void leon_init(void);
126extern void leon_switch_mm(void); 86extern void leon_switch_mm(void);
127extern void leon_init_IRQ(void); 87extern void leon_init_IRQ(void);
128 88
129extern unsigned long last_valid_pfn;
130
131static inline unsigned long sparc_leon3_get_dcachecfg(void) 89static inline unsigned long sparc_leon3_get_dcachecfg(void)
132{ 90{
133 unsigned int retval; 91 unsigned int retval;
@@ -230,9 +188,6 @@ static inline int sparc_leon3_cpuid(void)
230#error cannot determine LEON_PAGE_SIZE_LEON 188#error cannot determine LEON_PAGE_SIZE_LEON
231#endif 189#endif
232 190
233#define PAGE_MIN_SHIFT (12)
234#define PAGE_MIN_SIZE (1UL << PAGE_MIN_SHIFT)
235
236#define LEON3_XCCR_SETS_MASK 0x07000000UL 191#define LEON3_XCCR_SETS_MASK 0x07000000UL
237#define LEON3_XCCR_SSIZE_MASK 0x00f00000UL 192#define LEON3_XCCR_SSIZE_MASK 0x00f00000UL
238 193
@@ -242,7 +197,7 @@ static inline int sparc_leon3_cpuid(void)
242#ifndef __ASSEMBLY__ 197#ifndef __ASSEMBLY__
243struct vm_area_struct; 198struct vm_area_struct;
244 199
245extern unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr); 200extern unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr);
246extern void leon_flush_icache_all(void); 201extern void leon_flush_icache_all(void);
247extern void leon_flush_dcache_all(void); 202extern void leon_flush_dcache_all(void);
248extern void leon_flush_cache_all(void); 203extern void leon_flush_cache_all(void);
@@ -258,15 +213,7 @@ struct leon3_cacheregs {
258 unsigned long dccr; /* 0x0c - Data Cache Configuration Register */ 213 unsigned long dccr; /* 0x0c - Data Cache Configuration Register */
259}; 214};
260 215
261/* struct that hold LEON2 cache configuration register 216#include <linux/irq.h>
262 * & configuration register
263 */
264struct leon2_cacheregs {
265 unsigned long ccr, cfg;
266};
267
268#ifdef __KERNEL__
269
270#include <linux/interrupt.h> 217#include <linux/interrupt.h>
271 218
272struct device_node; 219struct device_node;
@@ -292,24 +239,15 @@ extern void leon_smp_done(void);
292extern void leon_boot_cpus(void); 239extern void leon_boot_cpus(void);
293extern int leon_boot_one_cpu(int i, struct task_struct *); 240extern int leon_boot_one_cpu(int i, struct task_struct *);
294void leon_init_smp(void); 241void leon_init_smp(void);
295extern void cpu_idle(void);
296extern void init_IRQ(void);
297extern void cpu_panic(void);
298extern int __leon_processor_id(void);
299void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu); 242void leon_enable_irq_cpu(unsigned int irq_nr, unsigned int cpu);
300extern irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused); 243extern irqreturn_t leon_percpu_timer_interrupt(int irq, void *unused);
301 244
302extern unsigned int real_irq_entry[];
303extern unsigned int smpleon_ipi[]; 245extern unsigned int smpleon_ipi[];
304extern unsigned int patchme_maybe_smp_msg[]; 246extern unsigned int linux_trap_ipi15_leon[];
305extern unsigned int t_nmi[], linux_trap_ipi15_leon[];
306extern unsigned int linux_trap_ipi15_sun4m[];
307extern int leon_ipi_irq; 247extern int leon_ipi_irq;
308 248
309#endif /* CONFIG_SMP */ 249#endif /* CONFIG_SMP */
310 250
311#endif /* __KERNEL__ */
312
313#endif /* __ASSEMBLY__ */ 251#endif /* __ASSEMBLY__ */
314 252
315/* macros used in leon_mm.c */ 253/* macros used in leon_mm.c */
@@ -317,18 +255,4 @@ extern int leon_ipi_irq;
317#define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base))) 255#define _pfn_valid(pfn) ((pfn < last_valid_pfn) && (pfn >= PFN(phys_base)))
318#define _SRMMU_PTE_PMASK_LEON 0xffffffff 256#define _SRMMU_PTE_PMASK_LEON 0xffffffff
319 257
320#else /* defined(CONFIG_SPARC_LEON) */
321
322/* nop definitions for !LEON case */
323#define leon_init() do {} while (0)
324#define leon_switch_mm() do {} while (0)
325#define leon_init_IRQ() do {} while (0)
326#define init_leon() do {} while (0)
327#define leon_smp_done() do {} while (0)
328#define leon_boot_cpus() do {} while (0)
329#define leon_boot_one_cpu(i, t) 1
330#define leon_init_smp() do {} while (0)
331
332#endif /* !defined(CONFIG_SPARC_LEON) */
333
334#endif 258#endif
diff --git a/arch/sparc/include/asm/leon_amba.h b/arch/sparc/include/asm/leon_amba.h
index e50f326e71bd..f3034eddf468 100644
--- a/arch/sparc/include/asm/leon_amba.h
+++ b/arch/sparc/include/asm/leon_amba.h
@@ -87,8 +87,6 @@ struct amba_prom_registers {
87#define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7) 87#define LEON3_GPTIMER_CONFIG_NRTIMERS(c) ((c)->config & 0x7)
88#define LEON3_GPTIMER_CTRL_ISPENDING(r) (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0) 88#define LEON3_GPTIMER_CTRL_ISPENDING(r) (((r)&LEON3_GPTIMER_CTRL_PENDING) ? 1 : 0)
89 89
90#ifdef CONFIG_SPARC_LEON
91
92#ifndef __ASSEMBLY__ 90#ifndef __ASSEMBLY__
93 91
94struct leon3_irqctrl_regs_map { 92struct leon3_irqctrl_regs_map {
@@ -264,6 +262,4 @@ extern unsigned int sparc_leon_eirq;
264 262
265#define amba_device(x) (((x) >> 12) & 0xfff) 263#define amba_device(x) (((x) >> 12) & 0xfff)
266 264
267#endif /* !defined(CONFIG_SPARC_LEON) */
268
269#endif 265#endif
diff --git a/arch/sparc/include/asm/mpmbox.h b/arch/sparc/include/asm/mpmbox.h
deleted file mode 100644
index f8423039b242..000000000000
--- a/arch/sparc/include/asm/mpmbox.h
+++ /dev/null
@@ -1,67 +0,0 @@
1/*
2 * mpmbox.h: Interface and defines for the OpenProm mailbox
3 * facilities for MP machines under Linux.
4 *
5 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
6 */
7
8#ifndef _SPARC_MPMBOX_H
9#define _SPARC_MPMBOX_H
10
11/* The prom allocates, for each CPU on the machine an unsigned
12 * byte in physical ram. You probe the device tree prom nodes
13 * for these values. The purpose of this byte is to be able to
14 * pass messages from one cpu to another.
15 */
16
17/* These are the main message types we have to look for in our
18 * Cpu mailboxes, based upon these values we decide what course
19 * of action to take.
20 */
21
22/* The CPU is executing code in the kernel. */
23#define MAILBOX_ISRUNNING 0xf0
24
25/* Another CPU called romvec->pv_exit(), you should call
26 * prom_stopcpu() when you see this in your mailbox.
27 */
28#define MAILBOX_EXIT 0xfb
29
30/* Another CPU called romvec->pv_enter(), you should call
31 * prom_cpuidle() when this is seen.
32 */
33#define MAILBOX_GOSPIN 0xfc
34
35/* Another CPU has hit a breakpoint either into kadb or the prom
36 * itself. Just like MAILBOX_GOSPIN, you should call prom_cpuidle()
37 * at this point.
38 */
39#define MAILBOX_BPT_SPIN 0xfd
40
41/* Oh geese, some other nitwit got a damn watchdog reset. The party's
42 * over so go call prom_stopcpu().
43 */
44#define MAILBOX_WDOG_STOP 0xfe
45
46#ifndef __ASSEMBLY__
47
48/* Handy macro's to determine a cpu's state. */
49
50/* Is the cpu still in Power On Self Test? */
51#define MBOX_POST_P(letter) ((letter) >= 0x00 && (letter) <= 0x7f)
52
53/* Is the cpu at the 'ok' prompt of the PROM? */
54#define MBOX_PROMPROMPT_P(letter) ((letter) >= 0x80 && (letter) <= 0x8f)
55
56/* Is the cpu spinning in the PROM? */
57#define MBOX_PROMSPIN_P(letter) ((letter) >= 0x90 && (letter) <= 0xef)
58
59/* Sanity check... This is junk mail, throw it out. */
60#define MBOX_BOGON_P(letter) ((letter) >= 0xf1 && (letter) <= 0xfa)
61
62/* Is the cpu actively running an application/kernel-code? */
63#define MBOX_RUNNING_P(letter) ((letter) == MAILBOX_ISRUNNING)
64
65#endif /* !(__ASSEMBLY__) */
66
67#endif /* !(_SPARC_MPMBOX_H) */
diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
index cb828703a63a..79da17866fa8 100644
--- a/arch/sparc/include/asm/pgtsrmmu.h
+++ b/arch/sparc/include/asm/pgtsrmmu.h
@@ -139,6 +139,7 @@
139 restore %g0, %g0, %g0; 139 restore %g0, %g0, %g0;
140 140
141#ifndef __ASSEMBLY__ 141#ifndef __ASSEMBLY__
142extern unsigned long last_valid_pfn;
142 143
143/* This makes sense. Honest it does - Anton */ 144/* This makes sense. Honest it does - Anton */
144/* XXX Yes but it's ugly as sin. FIXME. -KMW */ 145/* XXX Yes but it's ugly as sin. FIXME. -KMW */
@@ -148,67 +149,13 @@ extern void *srmmu_nocache_pool;
148#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR)) 149#define __nocache_fix(VADDR) __va(__nocache_pa(VADDR))
149 150
150/* Accessing the MMU control register. */ 151/* Accessing the MMU control register. */
151static inline unsigned int srmmu_get_mmureg(void) 152unsigned int srmmu_get_mmureg(void);
152{ 153void srmmu_set_mmureg(unsigned long regval);
153 unsigned int retval; 154void srmmu_set_ctable_ptr(unsigned long paddr);
154 __asm__ __volatile__("lda [%%g0] %1, %0\n\t" : 155void srmmu_set_context(int context);
155 "=r" (retval) : 156int srmmu_get_context(void);
156 "i" (ASI_M_MMUREGS)); 157unsigned int srmmu_get_fstatus(void);
157 return retval; 158unsigned int srmmu_get_faddr(void);
158}
159
160static inline void srmmu_set_mmureg(unsigned long regval)
161{
162 __asm__ __volatile__("sta %0, [%%g0] %1\n\t" : :
163 "r" (regval), "i" (ASI_M_MMUREGS) : "memory");
164
165}
166
167static inline void srmmu_set_ctable_ptr(unsigned long paddr)
168{
169 paddr = ((paddr >> 4) & SRMMU_CTX_PMASK);
170 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
171 "r" (paddr), "r" (SRMMU_CTXTBL_PTR),
172 "i" (ASI_M_MMUREGS) :
173 "memory");
174}
175
176static inline void srmmu_set_context(int context)
177{
178 __asm__ __volatile__("sta %0, [%1] %2\n\t" : :
179 "r" (context), "r" (SRMMU_CTX_REG),
180 "i" (ASI_M_MMUREGS) : "memory");
181}
182
183static inline int srmmu_get_context(void)
184{
185 register int retval;
186 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
187 "=r" (retval) :
188 "r" (SRMMU_CTX_REG),
189 "i" (ASI_M_MMUREGS));
190 return retval;
191}
192
193static inline unsigned int srmmu_get_fstatus(void)
194{
195 unsigned int retval;
196
197 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
198 "=r" (retval) :
199 "r" (SRMMU_FAULT_STATUS), "i" (ASI_M_MMUREGS));
200 return retval;
201}
202
203static inline unsigned int srmmu_get_faddr(void)
204{
205 unsigned int retval;
206
207 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
208 "=r" (retval) :
209 "r" (SRMMU_FAULT_ADDR), "i" (ASI_M_MMUREGS));
210 return retval;
211}
212 159
213/* This is guaranteed on all SRMMU's. */ 160/* This is guaranteed on all SRMMU's. */
214static inline void srmmu_flush_whole_tlb(void) 161static inline void srmmu_flush_whole_tlb(void)
@@ -219,23 +166,6 @@ static inline void srmmu_flush_whole_tlb(void)
219 166
220} 167}
221 168
222/* These flush types are not available on all chips... */
223#ifndef CONFIG_SPARC_LEON
224static inline unsigned long srmmu_hwprobe(unsigned long vaddr)
225{
226 unsigned long retval;
227
228 vaddr &= PAGE_MASK;
229 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
230 "=r" (retval) :
231 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
232
233 return retval;
234}
235#else
236#define srmmu_hwprobe(addr) srmmu_swprobe(addr, 0)
237#endif
238
239static inline int 169static inline int
240srmmu_get_pte (unsigned long addr) 170srmmu_get_pte (unsigned long addr)
241{ 171{
diff --git a/arch/sparc/include/asm/posix_types.h b/arch/sparc/include/asm/posix_types.h
index 3070f25ae90a..156220ed99eb 100644
--- a/arch/sparc/include/asm/posix_types.h
+++ b/arch/sparc/include/asm/posix_types.h
@@ -9,8 +9,6 @@
9 9
10#if defined(__sparc__) && defined(__arch64__) 10#if defined(__sparc__) && defined(__arch64__)
11/* sparc 64 bit */ 11/* sparc 64 bit */
12typedef unsigned int __kernel_nlink_t;
13#define __kernel_nlink_t __kernel_nlink_t
14 12
15typedef unsigned short __kernel_old_uid_t; 13typedef unsigned short __kernel_old_uid_t;
16typedef unsigned short __kernel_old_gid_t; 14typedef unsigned short __kernel_old_gid_t;
@@ -38,9 +36,6 @@ typedef unsigned short __kernel_gid_t;
38typedef unsigned short __kernel_mode_t; 36typedef unsigned short __kernel_mode_t;
39#define __kernel_mode_t __kernel_mode_t 37#define __kernel_mode_t __kernel_mode_t
40 38
41typedef short __kernel_nlink_t;
42#define __kernel_nlink_t __kernel_nlink_t
43
44typedef long __kernel_daddr_t; 39typedef long __kernel_daddr_t;
45#define __kernel_daddr_t __kernel_daddr_t 40#define __kernel_daddr_t __kernel_daddr_t
46 41
diff --git a/arch/sparc/include/asm/psr.h b/arch/sparc/include/asm/psr.h
index b8c0e5f0a66b..cee7ed9c927d 100644
--- a/arch/sparc/include/asm/psr.h
+++ b/arch/sparc/include/asm/psr.h
@@ -35,6 +35,14 @@
35#define PSR_VERS 0x0f000000 /* cpu-version field */ 35#define PSR_VERS 0x0f000000 /* cpu-version field */
36#define PSR_IMPL 0xf0000000 /* cpu-implementation field */ 36#define PSR_IMPL 0xf0000000 /* cpu-implementation field */
37 37
38#define PSR_VERS_SHIFT 24
39#define PSR_IMPL_SHIFT 28
40#define PSR_VERS_SHIFTED_MASK 0xf
41#define PSR_IMPL_SHIFTED_MASK 0xf
42
43#define PSR_IMPL_TI 0x4
44#define PSR_IMPL_LEON 0xf
45
38#ifdef __KERNEL__ 46#ifdef __KERNEL__
39 47
40#ifndef __ASSEMBLY__ 48#ifndef __ASSEMBLY__
diff --git a/arch/sparc/include/asm/sections.h b/arch/sparc/include/asm/sections.h
index 0b0553bbd8a0..f300d1a9b2b6 100644
--- a/arch/sparc/include/asm/sections.h
+++ b/arch/sparc/include/asm/sections.h
@@ -7,4 +7,7 @@
7/* sparc entry point */ 7/* sparc entry point */
8extern char _start[]; 8extern char _start[];
9 9
10extern char __leon_1insn_patch[];
11extern char __leon_1insn_patch_end[];
12
10#endif 13#endif
diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
index 5af664932452..e6cd224506a9 100644
--- a/arch/sparc/include/asm/thread_info_32.h
+++ b/arch/sparc/include/asm/thread_info_32.h
@@ -131,8 +131,7 @@ register struct thread_info *current_thread_info_reg asm("g6");
131#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG) 131#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
132 132
133#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \ 133#define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | \
134 _TIF_SIGPENDING | \ 134 _TIF_SIGPENDING)
135 _TIF_RESTORE_SIGMASK)
136 135
137#endif /* __KERNEL__ */ 136#endif /* __KERNEL__ */
138 137
diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
index 7f0981b09451..cfa8c38fb9c8 100644
--- a/arch/sparc/include/asm/thread_info_64.h
+++ b/arch/sparc/include/asm/thread_info_64.h
@@ -238,7 +238,23 @@ static inline void set_restore_sigmask(void)
238{ 238{
239 struct thread_info *ti = current_thread_info(); 239 struct thread_info *ti = current_thread_info();
240 ti->status |= TS_RESTORE_SIGMASK; 240 ti->status |= TS_RESTORE_SIGMASK;
241 set_bit(TIF_SIGPENDING, &ti->flags); 241 WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
242}
243static inline void clear_restore_sigmask(void)
244{
245 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
246}
247static inline bool test_restore_sigmask(void)
248{
249 return current_thread_info()->status & TS_RESTORE_SIGMASK;
250}
251static inline bool test_and_clear_restore_sigmask(void)
252{
253 struct thread_info *ti = current_thread_info();
254 if (!(ti->status & TS_RESTORE_SIGMASK))
255 return false;
256 ti->status &= ~TS_RESTORE_SIGMASK;
257 return true;
242} 258}
243#endif /* !__ASSEMBLY__ */ 259#endif /* !__ASSEMBLY__ */
244 260
diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
index 72308f9b0096..6cf591b7e1c6 100644
--- a/arch/sparc/kernel/Makefile
+++ b/arch/sparc/kernel/Makefile
@@ -51,8 +51,8 @@ obj-y += of_device_common.o
51obj-y += of_device_$(BITS).o 51obj-y += of_device_$(BITS).o
52obj-$(CONFIG_SPARC64) += prom_irqtrans.o 52obj-$(CONFIG_SPARC64) += prom_irqtrans.o
53 53
54obj-$(CONFIG_SPARC_LEON)+= leon_kernel.o 54obj-$(CONFIG_SPARC32) += leon_kernel.o
55obj-$(CONFIG_SPARC_LEON)+= leon_pmc.o 55obj-$(CONFIG_SPARC32) += leon_pmc.o
56 56
57obj-$(CONFIG_SPARC64) += reboot.o 57obj-$(CONFIG_SPARC64) += reboot.o
58obj-$(CONFIG_SPARC64) += sysfs.o 58obj-$(CONFIG_SPARC64) += sysfs.o
diff --git a/arch/sparc/kernel/cpu.c b/arch/sparc/kernel/cpu.c
index 2d1819641769..a6c94a2bf9d4 100644
--- a/arch/sparc/kernel/cpu.c
+++ b/arch/sparc/kernel/cpu.c
@@ -121,7 +121,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
121 FPU(-1, NULL) 121 FPU(-1, NULL)
122 } 122 }
123},{ 123},{
124 4, 124 PSR_IMPL_TI,
125 .cpu_info = { 125 .cpu_info = {
126 CPU(0, "Texas Instruments, Inc. - SuperSparc-(II)"), 126 CPU(0, "Texas Instruments, Inc. - SuperSparc-(II)"),
127 /* SparcClassic -- borned STP1010TAB-50*/ 127 /* SparcClassic -- borned STP1010TAB-50*/
@@ -191,7 +191,7 @@ static const struct manufacturer_info __initconst manufacturer_info[] = {
191 FPU(-1, NULL) 191 FPU(-1, NULL)
192 } 192 }
193},{ 193},{
194 0xF, /* Aeroflex Gaisler */ 194 PSR_IMPL_LEON, /* Aeroflex Gaisler */
195 .cpu_info = { 195 .cpu_info = {
196 CPU(3, "LEON"), 196 CPU(3, "LEON"),
197 CPU(-1, NULL) 197 CPU(-1, NULL)
@@ -440,16 +440,16 @@ static int __init cpu_type_probe(void)
440 int psr_impl, psr_vers, fpu_vers; 440 int psr_impl, psr_vers, fpu_vers;
441 int psr; 441 int psr;
442 442
443 psr_impl = ((get_psr() >> 28) & 0xf); 443 psr_impl = ((get_psr() >> PSR_IMPL_SHIFT) & PSR_IMPL_SHIFTED_MASK);
444 psr_vers = ((get_psr() >> 24) & 0xf); 444 psr_vers = ((get_psr() >> PSR_VERS_SHIFT) & PSR_VERS_SHIFTED_MASK);
445 445
446 psr = get_psr(); 446 psr = get_psr();
447 put_psr(psr | PSR_EF); 447 put_psr(psr | PSR_EF);
448#ifdef CONFIG_SPARC_LEON 448
449 fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7; 449 if (psr_impl == PSR_IMPL_LEON)
450#else 450 fpu_vers = get_psr() & PSR_EF ? ((get_fsr() >> 17) & 0x7) : 7;
451 fpu_vers = ((get_fsr() >> 17) & 0x7); 451 else
452#endif 452 fpu_vers = ((get_fsr() >> 17) & 0x7);
453 453
454 put_psr(psr); 454 put_psr(psr);
455 455
diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S
index 2dbe1806e530..dcaa1cf0de40 100644
--- a/arch/sparc/kernel/entry.S
+++ b/arch/sparc/kernel/entry.S
@@ -393,7 +393,6 @@ linux_trap_ipi15_sun4d:
393 /* FIXME */ 393 /* FIXME */
3941: b,a 1b 3941: b,a 1b
395 395
396#ifdef CONFIG_SPARC_LEON
397 .globl smpleon_ipi 396 .globl smpleon_ipi
398 .extern leon_ipi_interrupt 397 .extern leon_ipi_interrupt
399 /* SMP per-cpu IPI interrupts are handled specially. */ 398 /* SMP per-cpu IPI interrupts are handled specially. */
@@ -424,8 +423,6 @@ linux_trap_ipi15_leon:
424 b ret_trap_lockless_ipi 423 b ret_trap_lockless_ipi
425 clr %l6 424 clr %l6
426 425
427#endif /* CONFIG_SPARC_LEON */
428
429#endif /* CONFIG_SMP */ 426#endif /* CONFIG_SMP */
430 427
431 /* This routine handles illegal instructions and privileged 428 /* This routine handles illegal instructions and privileged
@@ -770,8 +767,11 @@ srmmu_fault:
770 mov 0x400, %l5 767 mov 0x400, %l5
771 mov 0x300, %l4 768 mov 0x300, %l4
772 769
773 lda [%l5] ASI_M_MMUREGS, %l6 ! read sfar first 770LEON_PI(lda [%l5] ASI_LEON_MMUREGS, %l6) ! read sfar first
774 lda [%l4] ASI_M_MMUREGS, %l5 ! read sfsr last 771SUN_PI_(lda [%l5] ASI_M_MMUREGS, %l6) ! read sfar first
772
773LEON_PI(lda [%l4] ASI_LEON_MMUREGS, %l5) ! read sfsr last
774SUN_PI_(lda [%l4] ASI_M_MMUREGS, %l5) ! read sfsr last
775 775
776 andn %l6, 0xfff, %l6 776 andn %l6, 0xfff, %l6
777 srl %l5, 6, %l5 ! and encode all info into l7 777 srl %l5, 6, %l5 ! and encode all info into l7
diff --git a/arch/sparc/kernel/etrap_32.S b/arch/sparc/kernel/etrap_32.S
index 84b5f0d2afde..e3e80d65e39a 100644
--- a/arch/sparc/kernel/etrap_32.S
+++ b/arch/sparc/kernel/etrap_32.S
@@ -234,7 +234,8 @@ tsetup_srmmu_stackchk:
234 234
235 cmp %glob_tmp, %sp 235 cmp %glob_tmp, %sp
236 bleu,a 1f 236 bleu,a 1f
237 lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control 237LEON_PI( lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control
238SUN_PI_( lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control
238 239
239trap_setup_user_stack_is_bolixed: 240trap_setup_user_stack_is_bolixed:
240 /* From user/kernel into invalid window w/bad user 241 /* From user/kernel into invalid window w/bad user
@@ -249,18 +250,25 @@ trap_setup_user_stack_is_bolixed:
2491: 2501:
250 /* Clear the fault status and turn on the no_fault bit. */ 251 /* Clear the fault status and turn on the no_fault bit. */
251 or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit 252 or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
252 sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it 253LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it
254SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it
253 255
254 /* Dump the registers and cross fingers. */ 256 /* Dump the registers and cross fingers. */
255 STORE_WINDOW(sp) 257 STORE_WINDOW(sp)
256 258
257 /* Clear the no_fault bit and check the status. */ 259 /* Clear the no_fault bit and check the status. */
258 andn %glob_tmp, 0x2, %glob_tmp 260 andn %glob_tmp, 0x2, %glob_tmp
259 sta %glob_tmp, [%g0] ASI_M_MMUREGS 261LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)
262SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)
263
260 mov AC_M_SFAR, %glob_tmp 264 mov AC_M_SFAR, %glob_tmp
261 lda [%glob_tmp] ASI_M_MMUREGS, %g0 265LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0)
266SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0)
267
262 mov AC_M_SFSR, %glob_tmp 268 mov AC_M_SFSR, %glob_tmp
263 lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp ! save away status of winstore 269LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore
270SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp) ! save away status of winstore
271
264 andcc %glob_tmp, 0x2, %g0 ! did we fault? 272 andcc %glob_tmp, 0x2, %g0 ! did we fault?
265 bne trap_setup_user_stack_is_bolixed ! failure 273 bne trap_setup_user_stack_is_bolixed ! failure
266 nop 274 nop
diff --git a/arch/sparc/kernel/head_32.S b/arch/sparc/kernel/head_32.S
index a0f5c20e4b9c..afeb1d770303 100644
--- a/arch/sparc/kernel/head_32.S
+++ b/arch/sparc/kernel/head_32.S
@@ -30,10 +30,6 @@
30 * the cpu-type 30 * the cpu-type
31 */ 31 */
32 .align 4 32 .align 4
33cputyp:
34 .word 1
35
36 .align 4
37 .globl cputypval 33 .globl cputypval
38cputypval: 34cputypval:
39 .asciz "sun4m" 35 .asciz "sun4m"
@@ -46,8 +42,8 @@ cputypvar:
46 42
47 .align 4 43 .align 4
48 44
49sun4c_notsup: 45notsup:
50 .asciz "Sparc-Linux sun4/sun4c support does no longer exist.\n\n" 46 .asciz "Sparc-Linux sun4/sun4c or MMU-less not supported\n\n"
51 .align 4 47 .align 4
52 48
53sun4e_notsup: 49sun4e_notsup:
@@ -123,7 +119,7 @@ current_pc:
123 tst %o0 119 tst %o0
124 be no_sun4u_here 120 be no_sun4u_here
125 mov %g4, %o7 /* Previous %o7. */ 121 mov %g4, %o7 /* Previous %o7. */
126 122
127 mov %o0, %l0 ! stash away romvec 123 mov %o0, %l0 ! stash away romvec
128 mov %o0, %g7 ! put it here too 124 mov %o0, %g7 ! put it here too
129 mov %o1, %l1 ! stash away debug_vec too 125 mov %o1, %l1 ! stash away debug_vec too
@@ -132,7 +128,7 @@ current_pc:
132 set current_pc, %g5 128 set current_pc, %g5
133 cmp %g3, %g5 129 cmp %g3, %g5
134 be already_mapped 130 be already_mapped
135 nop 131 nop
136 132
137 /* %l6 will hold the offset we have to subtract 133 /* %l6 will hold the offset we have to subtract
138 * from absolute symbols in order to access areas 134 * from absolute symbols in order to access areas
@@ -192,9 +188,9 @@ copy_prom_done:
192 bne not_a_sun4 188 bne not_a_sun4
193 nop 189 nop
194 190
195halt_sun4_or_sun4c: 191halt_notsup:
196 ld [%g7 + 0x68], %o1 192 ld [%g7 + 0x68], %o1
197 set sun4c_notsup, %o0 193 set notsup, %o0
198 sub %o0, %l6, %o0 194 sub %o0, %l6, %o0
199 call %o1 195 call %o1
200 nop 196 nop
@@ -202,18 +198,31 @@ halt_sun4_or_sun4c:
202 nop 198 nop
203 199
204not_a_sun4: 200not_a_sun4:
201 /* It looks like this is a machine we support.
202 * Now find out what MMU we are dealing with
203 * LEON - identified by the psr.impl field
204 * Viking - identified by the psr.impl field
205 * In all other cases a sun4m srmmu.
206 * We check that the MMU is enabled in all cases.
207 */
208
209 /* Check if this is a LEON CPU */
210 rd %psr, %g3
211 srl %g3, PSR_IMPL_SHIFT, %g3
212 and %g3, PSR_IMPL_SHIFTED_MASK, %g3
213 cmp %g3, PSR_IMPL_LEON
214 be leon_remap /* It is a LEON - jump */
215 nop
216
217 /* Sanity-check, is MMU enabled */
205 lda [%g0] ASI_M_MMUREGS, %g1 218 lda [%g0] ASI_M_MMUREGS, %g1
206 andcc %g1, 1, %g0 219 andcc %g1, 1, %g0
207 be halt_sun4_or_sun4c 220 be halt_notsup
208 nop 221 nop
209 222
210srmmu_remap: 223 /* Check for a viking (TI) module. */
211 /* First, check for a viking (TI) module. */ 224 cmp %g3, PSR_IMPL_TI
212 set 0x40000000, %g2 225 bne srmmu_not_viking
213 rd %psr, %g3
214 and %g2, %g3, %g3
215 subcc %g3, 0x0, %g0
216 bz srmmu_nviking
217 nop 226 nop
218 227
219 /* Figure out what kind of viking we are on. 228 /* Figure out what kind of viking we are on.
@@ -228,14 +237,14 @@ srmmu_remap:
228 lda [%g0] ASI_M_MMUREGS, %g3 ! peek in the control reg 237 lda [%g0] ASI_M_MMUREGS, %g3 ! peek in the control reg
229 and %g2, %g3, %g3 238 and %g2, %g3, %g3
230 subcc %g3, 0x0, %g0 239 subcc %g3, 0x0, %g0
231 bnz srmmu_nviking ! is in mbus mode 240 bnz srmmu_not_viking ! is in mbus mode
232 nop 241 nop
233 242
234 rd %psr, %g3 ! DO NOT TOUCH %g3 243 rd %psr, %g3 ! DO NOT TOUCH %g3
235 andn %g3, PSR_ET, %g2 244 andn %g3, PSR_ET, %g2
236 wr %g2, 0x0, %psr 245 wr %g2, 0x0, %psr
237 WRITE_PAUSE 246 WRITE_PAUSE
238 247
239 /* Get context table pointer, then convert to 248 /* Get context table pointer, then convert to
240 * a physical address, which is 36 bits. 249 * a physical address, which is 36 bits.
241 */ 250 */
@@ -258,12 +267,12 @@ srmmu_remap:
258 lda [%g4] ASI_M_BYPASS, %o1 ! This is a level 1 ptr 267 lda [%g4] ASI_M_BYPASS, %o1 ! This is a level 1 ptr
259 srl %o1, 0x4, %o1 ! Clear low 4 bits 268 srl %o1, 0x4, %o1 ! Clear low 4 bits
260 sll %o1, 0x8, %o1 ! Make physical 269 sll %o1, 0x8, %o1 ! Make physical
261 270
262 /* Ok, pull in the PTD. */ 271 /* Ok, pull in the PTD. */
263 lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd 272 lda [%o1] ASI_M_BYPASS, %o2 ! This is the 0x0 16MB pgd
264 273
265 /* Calculate to KERNBASE entry. */ 274 /* Calculate to KERNBASE entry. */
266 add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3 275 add %o1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %o3
267 276
268 /* Poke the entry into the calculated address. */ 277 /* Poke the entry into the calculated address. */
269 sta %o2, [%o3] ASI_M_BYPASS 278 sta %o2, [%o3] ASI_M_BYPASS
@@ -293,12 +302,12 @@ srmmu_remap:
293 b go_to_highmem 302 b go_to_highmem
294 nop 303 nop
295 304
305srmmu_not_viking:
296 /* This works on viking's in Mbus mode and all 306 /* This works on viking's in Mbus mode and all
297 * other MBUS modules. It is virtually the same as 307 * other MBUS modules. It is virtually the same as
298 * the above madness sans turning traps off and flipping 308 * the above madness sans turning traps off and flipping
299 * the AC bit. 309 * the AC bit.
300 */ 310 */
301srmmu_nviking:
302 set AC_M_CTPR, %g1 311 set AC_M_CTPR, %g1
303 lda [%g1] ASI_M_MMUREGS, %g1 ! get ctx table ptr 312 lda [%g1] ASI_M_MMUREGS, %g1 ! get ctx table ptr
304 sll %g1, 0x4, %g1 ! make physical addr 313 sll %g1, 0x4, %g1 ! make physical addr
@@ -313,6 +322,29 @@ srmmu_nviking:
313 nop ! wheee.... 322 nop ! wheee....
314 323
315 324
325leon_remap:
326 /* Sanity-check, is MMU enabled */
327 lda [%g0] ASI_LEON_MMUREGS, %g1
328 andcc %g1, 1, %g0
329 be halt_notsup
330 nop
331
332 /* Same code as in the srmmu_not_viking case,
333 * with the LEON ASI for mmuregs
334 */
335 set AC_M_CTPR, %g1
336 lda [%g1] ASI_LEON_MMUREGS, %g1 ! get ctx table ptr
337 sll %g1, 0x4, %g1 ! make physical addr
338 lda [%g1] ASI_M_BYPASS, %g1 ! ptr to level 1 pg_table
339 srl %g1, 0x4, %g1
340 sll %g1, 0x8, %g1 ! make phys addr for l1 tbl
341
342 lda [%g1] ASI_M_BYPASS, %g2 ! get level1 entry for 0x0
343 add %g1, KERNBASE >> (SRMMU_PGDIR_SHIFT - 2), %g3
344 sta %g2, [%g3] ASI_M_BYPASS ! place at KERNBASE entry
345 b go_to_highmem
346 nop ! wheee....
347
316/* Now do a non-relative jump so that PC is in high-memory */ 348/* Now do a non-relative jump so that PC is in high-memory */
317go_to_highmem: 349go_to_highmem:
318 set execute_in_high_mem, %g1 350 set execute_in_high_mem, %g1
@@ -336,8 +368,9 @@ execute_in_high_mem:
336 sethi %hi(linux_dbvec), %g1 368 sethi %hi(linux_dbvec), %g1
337 st %o1, [%g1 + %lo(linux_dbvec)] 369 st %o1, [%g1 + %lo(linux_dbvec)]
338 370
339/* Get the machine type via the mysterious romvec node operations. */ 371 /* Get the machine type via the romvec
340 372 * getprops node operation
373 */
341 add %g7, 0x1c, %l1 374 add %g7, 0x1c, %l1
342 ld [%l1], %l0 375 ld [%l1], %l0
343 ld [%l0], %l0 376 ld [%l0], %l0
@@ -356,9 +389,42 @@ execute_in_high_mem:
356 ! to a buf where above string 389 ! to a buf where above string
357 ! will get stored by the prom. 390 ! will get stored by the prom.
358 391
359#ifdef CONFIG_SPARC_LEON
360 /* no cpu-type check is needed, it is a SPARC-LEON */
361 392
393 /* Check value of "compatible" property.
394 * "value" => "model"
395 * leon => sparc_leon
396 * sun4m => sun4m
397 * sun4s => sun4m
398 * sun4d => sun4d
399 * sun4e => "no_sun4e_here"
400 * '*' => "no_sun4u_here"
401 * Check single letters only
402 */
403
404 set cputypval, %o2
405 /* If cputypval[0] == 'l' (lower case letter L) this is leon */
406 ldub [%o2], %l1
407 cmp %l1, 'l'
408 be leon_init
409 nop
410
411 /* Check cputypval[4] to find the sun model */
412 ldub [%o2 + 0x4], %l1
413
414 cmp %l1, 'm'
415 be sun4m_init
416 cmp %l1, 's'
417 be sun4m_init
418 cmp %l1, 'd'
419 be sun4d_init
420 cmp %l1, 'e'
421 be no_sun4e_here ! Could be a sun4e.
422 nop
423 b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
424 nop
425
426leon_init:
427 /* LEON CPU - set boot_cpu_id */
362 sethi %hi(boot_cpu_id), %g2 ! boot-cpu index 428 sethi %hi(boot_cpu_id), %g2 ! boot-cpu index
363 429
364#ifdef CONFIG_SMP 430#ifdef CONFIG_SMP
@@ -376,26 +442,6 @@ execute_in_high_mem:
376 442
377 ba continue_boot 443 ba continue_boot
378 nop 444 nop
379#endif
380
381/* Check to cputype. We may be booted on a sun4u (64 bit box),
382 * and sun4d needs special treatment.
383 */
384
385 set cputypval, %o2
386 ldub [%o2 + 0x4], %l1
387
388 cmp %l1, 'm'
389 be sun4m_init
390 cmp %l1, 's'
391 be sun4m_init
392 cmp %l1, 'd'
393 be sun4d_init
394 cmp %l1, 'e'
395 be no_sun4e_here ! Could be a sun4e.
396 nop
397 b no_sun4u_here ! AIEEE, a V9 sun4u... Get our BIG BROTHER kernel :))
398 nop
399 445
400/* CPUID in bootbus can be found at PA 0xff0140000 */ 446/* CPUID in bootbus can be found at PA 0xff0140000 */
401#define SUN4D_BOOTBUS_CPUID 0xf0140000 447#define SUN4D_BOOTBUS_CPUID 0xf0140000
@@ -431,9 +477,9 @@ sun4m_init:
431/* This sucks, apparently this makes Vikings call prom panic, will fix later */ 477/* This sucks, apparently this makes Vikings call prom panic, will fix later */
4322: 4782:
433 rd %psr, %o1 479 rd %psr, %o1
434 srl %o1, 28, %o1 ! Get a type of the CPU 480 srl %o1, PSR_IMPL_SHIFT, %o1 ! Get a type of the CPU
435 481
436 subcc %o1, 4, %g0 ! TI: Viking or MicroSPARC 482 subcc %o1, PSR_IMPL_TI, %g0 ! TI: Viking or MicroSPARC
437 be continue_boot 483 be continue_boot
438 nop 484 nop
439 485
@@ -459,10 +505,6 @@ continue_boot:
459/* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's 505/* Aieee, now set PC and nPC, enable traps, give ourselves a stack and it's
460 * show-time! 506 * show-time!
461 */ 507 */
462
463 sethi %hi(cputyp), %o0
464 st %g4, [%o0 + %lo(cputyp)]
465
466 /* Turn on Supervisor, EnableFloating, and all the PIL bits. 508 /* Turn on Supervisor, EnableFloating, and all the PIL bits.
467 * Also puts us in register window zero with traps off. 509 * Also puts us in register window zero with traps off.
468 */ 510 */
@@ -480,7 +522,7 @@ continue_boot:
480 set __bss_start , %o0 ! First address of BSS 522 set __bss_start , %o0 ! First address of BSS
481 set _end , %o1 ! Last address of BSS 523 set _end , %o1 ! Last address of BSS
482 add %o0, 0x1, %o0 524 add %o0, 0x1, %o0
4831: 5251:
484 stb %g0, [%o0] 526 stb %g0, [%o0]
485 subcc %o0, %o1, %g0 527 subcc %o0, %o1, %g0
486 bl 1b 528 bl 1b
@@ -546,7 +588,7 @@ continue_boot:
546 set dest, %g2; \ 588 set dest, %g2; \
547 ld [%g5], %g4; \ 589 ld [%g5], %g4; \
548 st %g4, [%g2]; 590 st %g4, [%g2];
549 591
550 /* Patch for window spills... */ 592 /* Patch for window spills... */
551 PATCH_INSN(spnwin_patch1_7win, spnwin_patch1) 593 PATCH_INSN(spnwin_patch1_7win, spnwin_patch1)
552 PATCH_INSN(spnwin_patch2_7win, spnwin_patch2) 594 PATCH_INSN(spnwin_patch2_7win, spnwin_patch2)
@@ -597,7 +639,7 @@ continue_boot:
597 st %g4, [%g5 + 0x18] 639 st %g4, [%g5 + 0x18]
598 st %g4, [%g5 + 0x1c] 640 st %g4, [%g5 + 0x1c]
599 641
6002: 6422:
601 sethi %hi(nwindows), %g4 643 sethi %hi(nwindows), %g4
602 st %g3, [%g4 + %lo(nwindows)] ! store final value 644 st %g3, [%g4 + %lo(nwindows)] ! store final value
603 sub %g3, 0x1, %g3 645 sub %g3, 0x1, %g3
@@ -617,18 +659,12 @@ continue_boot:
617 wr %g3, PSR_ET, %psr 659 wr %g3, PSR_ET, %psr
618 WRITE_PAUSE 660 WRITE_PAUSE
619 661
620 /* First we call prom_init() to set up PROMLIB, then 662 /* Call sparc32_start_kernel(struct linux_romvec *rp) */
621 * off to start_kernel().
622 */
623
624 sethi %hi(prom_vector_p), %g5 663 sethi %hi(prom_vector_p), %g5
625 ld [%g5 + %lo(prom_vector_p)], %o0 664 ld [%g5 + %lo(prom_vector_p)], %o0
626 call prom_init 665 call sparc32_start_kernel
627 nop 666 nop
628 667
629 call start_kernel
630 nop
631
632 /* We should not get here. */ 668 /* We should not get here. */
633 call halt_me 669 call halt_me
634 nop 670 nop
@@ -659,7 +695,7 @@ sun4u_5:
659 .asciz "write" 695 .asciz "write"
660 .align 4 696 .align 4
661sun4u_6: 697sun4u_6:
662 .asciz "\n\rOn sun4u you have to use UltraLinux (64bit) kernel\n\rand not a 32bit sun4[cdem] version\n\r\n\r" 698 .asciz "\n\rOn sun4u you have to use sparc64 kernel\n\rand not a sparc32 version\n\r\n\r"
663sun4u_6e: 699sun4u_6e:
664 .align 4 700 .align 4
665sun4u_7: 701sun4u_7:
diff --git a/arch/sparc/kernel/ioport.c b/arch/sparc/kernel/ioport.c
index a2846f5e32d8..0f094db918c7 100644
--- a/arch/sparc/kernel/ioport.c
+++ b/arch/sparc/kernel/ioport.c
@@ -55,17 +55,13 @@ const struct sparc32_dma_ops *sparc32_dma_ops;
55/* This function must make sure that caches and memory are coherent after DMA 55/* This function must make sure that caches and memory are coherent after DMA
56 * On LEON systems without cache snooping it flushes the entire D-CACHE. 56 * On LEON systems without cache snooping it flushes the entire D-CACHE.
57 */ 57 */
58#ifndef CONFIG_SPARC_LEON
59static inline void dma_make_coherent(unsigned long pa, unsigned long len) 58static inline void dma_make_coherent(unsigned long pa, unsigned long len)
60{ 59{
60 if (sparc_cpu_model == sparc_leon) {
61 if (!sparc_leon3_snooping_enabled())
62 leon_flush_dcache_all();
63 }
61} 64}
62#else
63static inline void dma_make_coherent(unsigned long pa, unsigned long len)
64{
65 if (!sparc_leon3_snooping_enabled())
66 leon_flush_dcache_all();
67}
68#endif
69 65
70static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz); 66static void __iomem *_sparc_ioremap(struct resource *res, u32 bus, u32 pa, int sz);
71static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys, 67static void __iomem *_sparc_alloc_io(unsigned int busno, unsigned long phys,
@@ -427,9 +423,6 @@ arch_initcall(sparc_register_ioport);
427#endif /* CONFIG_SBUS */ 423#endif /* CONFIG_SBUS */
428 424
429 425
430/* LEON reuses PCI DMA ops */
431#if defined(CONFIG_PCI) || defined(CONFIG_SPARC_LEON)
432
433/* Allocate and map kernel buffer using consistent mode DMA for a device. 426/* Allocate and map kernel buffer using consistent mode DMA for a device.
434 * hwdev should be valid struct pci_dev pointer for PCI devices. 427 * hwdev should be valid struct pci_dev pointer for PCI devices.
435 */ 428 */
@@ -657,14 +650,11 @@ struct dma_map_ops pci32_dma_ops = {
657}; 650};
658EXPORT_SYMBOL(pci32_dma_ops); 651EXPORT_SYMBOL(pci32_dma_ops);
659 652
660#endif /* CONFIG_PCI || CONFIG_SPARC_LEON */ 653/* leon re-uses pci32_dma_ops */
654struct dma_map_ops *leon_dma_ops = &pci32_dma_ops;
655EXPORT_SYMBOL(leon_dma_ops);
661 656
662#ifdef CONFIG_SPARC_LEON
663struct dma_map_ops *dma_ops = &pci32_dma_ops;
664#elif defined(CONFIG_SBUS)
665struct dma_map_ops *dma_ops = &sbus_dma_ops; 657struct dma_map_ops *dma_ops = &sbus_dma_ops;
666#endif
667
668EXPORT_SYMBOL(dma_ops); 658EXPORT_SYMBOL(dma_ops);
669 659
670 660
diff --git a/arch/sparc/kernel/irq_32.c b/arch/sparc/kernel/irq_32.c
index ae04914f7774..c145f6fd123b 100644
--- a/arch/sparc/kernel/irq_32.c
+++ b/arch/sparc/kernel/irq_32.c
@@ -241,9 +241,6 @@ int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
241 unsigned int cpu_irq; 241 unsigned int cpu_irq;
242 int err; 242 int err;
243 243
244#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON
245 struct tt_entry *trap_table;
246#endif
247 244
248 err = request_irq(irq, irq_handler, 0, "floppy", NULL); 245 err = request_irq(irq, irq_handler, 0, "floppy", NULL);
249 if (err) 246 if (err)
@@ -264,13 +261,18 @@ int sparc_floppy_request_irq(unsigned int irq, irq_handler_t irq_handler)
264 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP; 261 table[SP_TRAP_IRQ1+(cpu_irq-1)].inst_four = SPARC_NOP;
265 262
266 INSTANTIATE(sparc_ttable) 263 INSTANTIATE(sparc_ttable)
267#if defined CONFIG_SMP && !defined CONFIG_SPARC_LEON 264
268 trap_table = &trapbase_cpu1; 265#if defined CONFIG_SMP
269 INSTANTIATE(trap_table) 266 if (sparc_cpu_model != sparc_leon) {
270 trap_table = &trapbase_cpu2; 267 struct tt_entry *trap_table;
271 INSTANTIATE(trap_table) 268
272 trap_table = &trapbase_cpu3; 269 trap_table = &trapbase_cpu1;
273 INSTANTIATE(trap_table) 270 INSTANTIATE(trap_table)
271 trap_table = &trapbase_cpu2;
272 INSTANTIATE(trap_table)
273 trap_table = &trapbase_cpu3;
274 INSTANTIATE(trap_table)
275 }
274#endif 276#endif
275#undef INSTANTIATE 277#undef INSTANTIATE
276 /* 278 /*
diff --git a/arch/sparc/kernel/kernel.h b/arch/sparc/kernel/kernel.h
index a86372d34587..291bb5de9ce0 100644
--- a/arch/sparc/kernel/kernel.h
+++ b/arch/sparc/kernel/kernel.h
@@ -26,6 +26,9 @@ static inline unsigned long kimage_addr_to_ra(const char *p)
26#endif 26#endif
27 27
28#ifdef CONFIG_SPARC32 28#ifdef CONFIG_SPARC32
29/* setup_32.c */
30void sparc32_start_kernel(struct linux_romvec *rp);
31
29/* cpu.c */ 32/* cpu.c */
30extern void cpu_probe(void); 33extern void cpu_probe(void);
31 34
diff --git a/arch/sparc/kernel/leon_kernel.c b/arch/sparc/kernel/leon_kernel.c
index 77c1b916e4dd..e34e2c40c060 100644
--- a/arch/sparc/kernel/leon_kernel.c
+++ b/arch/sparc/kernel/leon_kernel.c
@@ -23,6 +23,7 @@
23#include <asm/smp.h> 23#include <asm/smp.h>
24#include <asm/setup.h> 24#include <asm/setup.h>
25 25
26#include "kernel.h"
26#include "prom.h" 27#include "prom.h"
27#include "irq.h" 28#include "irq.h"
28 29
diff --git a/arch/sparc/kernel/leon_pmc.c b/arch/sparc/kernel/leon_pmc.c
index 519ca923f59f..4e174321097d 100644
--- a/arch/sparc/kernel/leon_pmc.c
+++ b/arch/sparc/kernel/leon_pmc.c
@@ -7,6 +7,7 @@
7#include <linux/pm.h> 7#include <linux/pm.h>
8 8
9#include <asm/leon_amba.h> 9#include <asm/leon_amba.h>
10#include <asm/cpu_type.h>
10#include <asm/leon.h> 11#include <asm/leon.h>
11 12
12/* List of Systems that need fixup instructions around power-down instruction */ 13/* List of Systems that need fixup instructions around power-down instruction */
@@ -65,13 +66,15 @@ void pmc_leon_idle(void)
65/* Install LEON Power Down function */ 66/* Install LEON Power Down function */
66static int __init leon_pmc_install(void) 67static int __init leon_pmc_install(void)
67{ 68{
68 /* Assign power management IDLE handler */ 69 if (sparc_cpu_model == sparc_leon) {
69 if (pmc_leon_need_fixup()) 70 /* Assign power management IDLE handler */
70 pm_idle = pmc_leon_idle_fixup; 71 if (pmc_leon_need_fixup())
71 else 72 pm_idle = pmc_leon_idle_fixup;
72 pm_idle = pmc_leon_idle; 73 else
74 pm_idle = pmc_leon_idle;
73 75
74 printk(KERN_INFO "leon: power management initialized\n"); 76 printk(KERN_INFO "leon: power management initialized\n");
77 }
75 78
76 return 0; 79 return 0;
77} 80}
diff --git a/arch/sparc/kernel/leon_smp.c b/arch/sparc/kernel/leon_smp.c
index a469090faf9f..0f3fb6d9c8ef 100644
--- a/arch/sparc/kernel/leon_smp.c
+++ b/arch/sparc/kernel/leon_smp.c
@@ -48,15 +48,13 @@
48 48
49#include "kernel.h" 49#include "kernel.h"
50 50
51#ifdef CONFIG_SPARC_LEON
52
53#include "irq.h" 51#include "irq.h"
54 52
55extern ctxd_t *srmmu_ctx_table_phys; 53extern ctxd_t *srmmu_ctx_table_phys;
56static int smp_processors_ready; 54static int smp_processors_ready;
57extern volatile unsigned long cpu_callin_map[NR_CPUS]; 55extern volatile unsigned long cpu_callin_map[NR_CPUS];
58extern cpumask_t smp_commenced_mask; 56extern cpumask_t smp_commenced_mask;
59void __init leon_configure_cache_smp(void); 57void __cpuinit leon_configure_cache_smp(void);
60static void leon_ipi_init(void); 58static void leon_ipi_init(void);
61 59
62/* IRQ number of LEON IPIs */ 60/* IRQ number of LEON IPIs */
@@ -123,7 +121,7 @@ void __cpuinit leon_callin(void)
123 121
124extern struct linux_prom_registers smp_penguin_ctable; 122extern struct linux_prom_registers smp_penguin_ctable;
125 123
126void __init leon_configure_cache_smp(void) 124void __cpuinit leon_configure_cache_smp(void)
127{ 125{
128 unsigned long cfg = sparc_leon3_get_dcachecfg(); 126 unsigned long cfg = sparc_leon3_get_dcachecfg();
129 int me = smp_processor_id(); 127 int me = smp_processor_id();
@@ -507,5 +505,3 @@ void __init leon_init_smp(void)
507 505
508 sparc32_ipi_ops = &leon_ipi_ops; 506 sparc32_ipi_ops = &leon_ipi_ops;
509} 507}
510
511#endif /* CONFIG_SPARC_LEON */
diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
index fe6787cc62fc..cb36e82dcd5d 100644
--- a/arch/sparc/kernel/process_32.c
+++ b/arch/sparc/kernel/process_32.c
@@ -65,50 +65,25 @@ extern void fpsave(unsigned long *, unsigned long *, void *, unsigned long *);
65struct task_struct *last_task_used_math = NULL; 65struct task_struct *last_task_used_math = NULL;
66struct thread_info *current_set[NR_CPUS]; 66struct thread_info *current_set[NR_CPUS];
67 67
68#ifndef CONFIG_SMP
69
70/* 68/*
71 * the idle loop on a Sparc... ;) 69 * the idle loop on a Sparc... ;)
72 */ 70 */
73void cpu_idle(void) 71void cpu_idle(void)
74{ 72{
75 /* endless idle loop with no priority at all */ 73 set_thread_flag(TIF_POLLING_NRFLAG);
76 for (;;) {
77 if (pm_idle) {
78 while (!need_resched())
79 (*pm_idle)();
80 } else {
81 while (!need_resched())
82 cpu_relax();
83 }
84 schedule_preempt_disabled();
85 }
86}
87
88#else
89 74
90/* This is being executed in task 0 'user space'. */
91void cpu_idle(void)
92{
93 set_thread_flag(TIF_POLLING_NRFLAG);
94 /* endless idle loop with no priority at all */ 75 /* endless idle loop with no priority at all */
95 while(1) { 76 for (;;) {
96#ifdef CONFIG_SPARC_LEON 77 while (!need_resched()) {
97 if (pm_idle) { 78 if (pm_idle)
98 while (!need_resched())
99 (*pm_idle)(); 79 (*pm_idle)();
100 } else 80 else
101#endif
102 {
103 while (!need_resched())
104 cpu_relax(); 81 cpu_relax();
105 } 82 }
106 schedule_preempt_disabled(); 83 schedule_preempt_disabled();
107 } 84 }
108} 85}
109 86
110#endif
111
112/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */ 87/* XXX cli/sti -> local_irq_xxx here, check this works once SMP is fixed. */
113void machine_halt(void) 88void machine_halt(void)
114{ 89{
diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
index 741df916c124..1303021748c8 100644
--- a/arch/sparc/kernel/prom_common.c
+++ b/arch/sparc/kernel/prom_common.c
@@ -23,7 +23,6 @@
23#include <linux/of_pdt.h> 23#include <linux/of_pdt.h>
24#include <asm/prom.h> 24#include <asm/prom.h>
25#include <asm/oplib.h> 25#include <asm/oplib.h>
26#include <asm/leon.h>
27 26
28#include "prom.h" 27#include "prom.h"
29 28
diff --git a/arch/sparc/kernel/rtrap_32.S b/arch/sparc/kernel/rtrap_32.S
index 7abc24e2bf1a..6c34de0c2abd 100644
--- a/arch/sparc/kernel/rtrap_32.S
+++ b/arch/sparc/kernel/rtrap_32.S
@@ -231,11 +231,14 @@ srmmu_rett_stackchk:
231 cmp %g1, %fp 231 cmp %g1, %fp
232 bleu ret_trap_user_stack_is_bolixed 232 bleu ret_trap_user_stack_is_bolixed
233 mov AC_M_SFSR, %g1 233 mov AC_M_SFSR, %g1
234 lda [%g1] ASI_M_MMUREGS, %g0 234LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g0)
235SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g0)
235 236
236 lda [%g0] ASI_M_MMUREGS, %g1 237LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %g1)
238SUN_PI_(lda [%g0] ASI_M_MMUREGS, %g1)
237 or %g1, 0x2, %g1 239 or %g1, 0x2, %g1
238 sta %g1, [%g0] ASI_M_MMUREGS 240LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
241SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
239 242
240 restore %g0, %g0, %g0 243 restore %g0, %g0, %g0
241 244
@@ -244,13 +247,16 @@ srmmu_rett_stackchk:
244 save %g0, %g0, %g0 247 save %g0, %g0, %g0
245 248
246 andn %g1, 0x2, %g1 249 andn %g1, 0x2, %g1
247 sta %g1, [%g0] ASI_M_MMUREGS 250LEON_PI(sta %g1, [%g0] ASI_LEON_MMUREGS)
251SUN_PI_(sta %g1, [%g0] ASI_M_MMUREGS)
248 252
249 mov AC_M_SFAR, %g2 253 mov AC_M_SFAR, %g2
250 lda [%g2] ASI_M_MMUREGS, %g2 254LEON_PI(lda [%g2] ASI_LEON_MMUREGS, %g2)
255SUN_PI_(lda [%g2] ASI_M_MMUREGS, %g2)
251 256
252 mov AC_M_SFSR, %g1 257 mov AC_M_SFSR, %g1
253 lda [%g1] ASI_M_MMUREGS, %g1 258LEON_PI(lda [%g1] ASI_LEON_MMUREGS, %g1)
259SUN_PI_(lda [%g1] ASI_M_MMUREGS, %g1)
254 andcc %g1, 0x2, %g0 260 andcc %g1, 0x2, %g0
255 be ret_trap_userwins_ok 261 be ret_trap_userwins_ok
256 nop 262 nop
diff --git a/arch/sparc/kernel/setup_32.c b/arch/sparc/kernel/setup_32.c
index c052313f4dc5..efe3e64bba38 100644
--- a/arch/sparc/kernel/setup_32.c
+++ b/arch/sparc/kernel/setup_32.c
@@ -32,6 +32,7 @@
32#include <linux/cpu.h> 32#include <linux/cpu.h>
33#include <linux/kdebug.h> 33#include <linux/kdebug.h>
34#include <linux/export.h> 34#include <linux/export.h>
35#include <linux/start_kernel.h>
35 36
36#include <asm/io.h> 37#include <asm/io.h>
37#include <asm/processor.h> 38#include <asm/processor.h>
@@ -45,6 +46,7 @@
45#include <asm/cpudata.h> 46#include <asm/cpudata.h>
46#include <asm/setup.h> 47#include <asm/setup.h>
47#include <asm/cacheflush.h> 48#include <asm/cacheflush.h>
49#include <asm/sections.h>
48 50
49#include "kernel.h" 51#include "kernel.h"
50 52
@@ -237,28 +239,42 @@ static void __init per_cpu_patch(void)
237 } 239 }
238} 240}
239 241
242struct leon_1insn_patch_entry {
243 unsigned int addr;
244 unsigned int insn;
245};
246
240enum sparc_cpu sparc_cpu_model; 247enum sparc_cpu sparc_cpu_model;
241EXPORT_SYMBOL(sparc_cpu_model); 248EXPORT_SYMBOL(sparc_cpu_model);
242 249
243struct tt_entry *sparc_ttable; 250static __init void leon_patch(void)
251{
252 struct leon_1insn_patch_entry *start = (void *)__leon_1insn_patch;
253 struct leon_1insn_patch_entry *end = (void *)__leon_1insn_patch_end;
244 254
245struct pt_regs fake_swapper_regs; 255 /* Default instruction is leon - no patching */
256 if (sparc_cpu_model == sparc_leon)
257 return;
246 258
247void __init setup_arch(char **cmdline_p) 259 while (start < end) {
248{ 260 unsigned long addr = start->addr;
249 int i;
250 unsigned long highest_paddr;
251 261
252 sparc_ttable = (struct tt_entry *) &trapbase; 262 *(unsigned int *)(addr) = start->insn;
263 flushi(addr);
253 264
254 /* Initialize PROM console and command line. */ 265 start++;
255 *cmdline_p = prom_getbootargs(); 266 }
256 strcpy(boot_command_line, *cmdline_p); 267}
257 parse_early_param();
258 268
259 boot_flags_init(*cmdline_p); 269struct tt_entry *sparc_ttable;
270struct pt_regs fake_swapper_regs;
260 271
261 register_console(&prom_early_console); 272/* Called from head_32.S - before we have setup anything
273 * in the kernel. Be very careful with what you do here.
274 */
275void __init sparc32_start_kernel(struct linux_romvec *rp)
276{
277 prom_init(rp);
262 278
263 /* Set sparc_cpu_model */ 279 /* Set sparc_cpu_model */
264 sparc_cpu_model = sun_unknown; 280 sparc_cpu_model = sun_unknown;
@@ -275,6 +291,26 @@ void __init setup_arch(char **cmdline_p)
275 if (!strncmp(&cputypval[0], "leon" , 4)) 291 if (!strncmp(&cputypval[0], "leon" , 4))
276 sparc_cpu_model = sparc_leon; 292 sparc_cpu_model = sparc_leon;
277 293
294 leon_patch();
295 start_kernel();
296}
297
298void __init setup_arch(char **cmdline_p)
299{
300 int i;
301 unsigned long highest_paddr;
302
303 sparc_ttable = (struct tt_entry *) &trapbase;
304
305 /* Initialize PROM console and command line. */
306 *cmdline_p = prom_getbootargs();
307 strcpy(boot_command_line, *cmdline_p);
308 parse_early_param();
309
310 boot_flags_init(*cmdline_p);
311
312 register_console(&prom_early_console);
313
278 printk("ARCH: "); 314 printk("ARCH: ");
279 switch(sparc_cpu_model) { 315 switch(sparc_cpu_model) {
280 case sun4m: 316 case sun4m:
diff --git a/arch/sparc/kernel/signal32.c b/arch/sparc/kernel/signal32.c
index bb1513e45f1a..a53e0a5fd3a3 100644
--- a/arch/sparc/kernel/signal32.c
+++ b/arch/sparc/kernel/signal32.c
@@ -32,8 +32,6 @@
32 32
33#include "sigutil.h" 33#include "sigutil.h"
34 34
35#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
36
37/* This magic should be in g_upper[0] for all upper parts 35/* This magic should be in g_upper[0] for all upper parts
38 * to be valid. 36 * to be valid.
39 */ 37 */
@@ -274,7 +272,6 @@ void do_sigreturn32(struct pt_regs *regs)
274 case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32); 272 case 2: set.sig[1] = seta[2] + (((long)seta[3]) << 32);
275 case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32); 273 case 1: set.sig[0] = seta[0] + (((long)seta[1]) << 32);
276 } 274 }
277 sigdelsetmask(&set, ~_BLOCKABLE);
278 set_current_blocked(&set); 275 set_current_blocked(&set);
279 return; 276 return;
280 277
@@ -376,7 +373,6 @@ asmlinkage void do_rt_sigreturn32(struct pt_regs *regs)
376 case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32); 373 case 2: set.sig[1] = seta.sig[2] + (((long)seta.sig[3]) << 32);
377 case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32); 374 case 1: set.sig[0] = seta.sig[0] + (((long)seta.sig[1]) << 32);
378 } 375 }
379 sigdelsetmask(&set, ~_BLOCKABLE);
380 set_current_blocked(&set); 376 set_current_blocked(&set);
381 return; 377 return;
382segv: 378segv:
@@ -775,7 +771,7 @@ sigsegv:
775 return -EFAULT; 771 return -EFAULT;
776} 772}
777 773
778static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka, 774static inline void handle_signal32(unsigned long signr, struct k_sigaction *ka,
779 siginfo_t *info, 775 siginfo_t *info,
780 sigset_t *oldset, struct pt_regs *regs) 776 sigset_t *oldset, struct pt_regs *regs)
781{ 777{
@@ -787,12 +783,9 @@ static inline int handle_signal32(unsigned long signr, struct k_sigaction *ka,
787 err = setup_frame32(ka, regs, signr, oldset); 783 err = setup_frame32(ka, regs, signr, oldset);
788 784
789 if (err) 785 if (err)
790 return err; 786 return;
791
792 block_sigmask(ka, signr);
793 tracehook_signal_handler(signr, info, ka, regs, 0);
794 787
795 return 0; 788 signal_delivered(signr, info, ka, regs, 0);
796} 789}
797 790
798static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs, 791static inline void syscall_restart32(unsigned long orig_i0, struct pt_regs *regs,
@@ -841,14 +834,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs)
841 if (signr > 0) { 834 if (signr > 0) {
842 if (restart_syscall) 835 if (restart_syscall)
843 syscall_restart32(orig_i0, regs, &ka.sa); 836 syscall_restart32(orig_i0, regs, &ka.sa);
844 if (handle_signal32(signr, &ka, &info, oldset, regs) == 0) { 837 handle_signal32(signr, &ka, &info, oldset, regs);
845 /* A signal was successfully delivered; the saved
846 * sigmask will have been stored in the signal frame,
847 * and will be restored by sigreturn, so we can simply
848 * clear the TS_RESTORE_SIGMASK flag.
849 */
850 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
851 }
852 return; 838 return;
853 } 839 }
854 if (restart_syscall && 840 if (restart_syscall &&
@@ -872,10 +858,7 @@ void do_signal32(sigset_t *oldset, struct pt_regs * regs)
872 /* If there's no signal to deliver, we just put the saved sigmask 858 /* If there's no signal to deliver, we just put the saved sigmask
873 * back 859 * back
874 */ 860 */
875 if (current_thread_info()->status & TS_RESTORE_SIGMASK) { 861 restore_saved_sigmask();
876 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
877 set_current_blocked(&current->saved_sigmask);
878 }
879} 862}
880 863
881struct sigstack32 { 864struct sigstack32 {
diff --git a/arch/sparc/kernel/signal_32.c b/arch/sparc/kernel/signal_32.c
index 2b7e849f7c65..68f9c8650af4 100644
--- a/arch/sparc/kernel/signal_32.c
+++ b/arch/sparc/kernel/signal_32.c
@@ -29,8 +29,6 @@
29 29
30#include "sigutil.h" 30#include "sigutil.h"
31 31
32#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
33
34extern void fpsave(unsigned long *fpregs, unsigned long *fsr, 32extern void fpsave(unsigned long *fpregs, unsigned long *fsr,
35 void *fpqueue, unsigned long *fpqdepth); 33 void *fpqueue, unsigned long *fpqdepth);
36extern void fpload(unsigned long *fpregs, unsigned long *fsr); 34extern void fpload(unsigned long *fpregs, unsigned long *fsr);
@@ -130,7 +128,6 @@ asmlinkage void do_sigreturn(struct pt_regs *regs)
130 if (err) 128 if (err)
131 goto segv_and_exit; 129 goto segv_and_exit;
132 130
133 sigdelsetmask(&set, ~_BLOCKABLE);
134 set_current_blocked(&set); 131 set_current_blocked(&set);
135 return; 132 return;
136 133
@@ -197,7 +194,6 @@ asmlinkage void do_rt_sigreturn(struct pt_regs *regs)
197 goto segv; 194 goto segv;
198 } 195 }
199 196
200 sigdelsetmask(&set, ~_BLOCKABLE);
201 set_current_blocked(&set); 197 set_current_blocked(&set);
202 return; 198 return;
203segv: 199segv:
@@ -449,10 +445,11 @@ sigsegv:
449 return -EFAULT; 445 return -EFAULT;
450} 446}
451 447
452static inline int 448static inline void
453handle_signal(unsigned long signr, struct k_sigaction *ka, 449handle_signal(unsigned long signr, struct k_sigaction *ka,
454 siginfo_t *info, sigset_t *oldset, struct pt_regs *regs) 450 siginfo_t *info, struct pt_regs *regs)
455{ 451{
452 sigset_t *oldset = sigmask_to_save();
456 int err; 453 int err;
457 454
458 if (ka->sa.sa_flags & SA_SIGINFO) 455 if (ka->sa.sa_flags & SA_SIGINFO)
@@ -461,12 +458,9 @@ handle_signal(unsigned long signr, struct k_sigaction *ka,
461 err = setup_frame(ka, regs, signr, oldset); 458 err = setup_frame(ka, regs, signr, oldset);
462 459
463 if (err) 460 if (err)
464 return err; 461 return;
465
466 block_sigmask(ka, signr);
467 tracehook_signal_handler(signr, info, ka, regs, 0);
468 462
469 return 0; 463 signal_delivered(signr, info, ka, regs, 0);
470} 464}
471 465
472static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, 466static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -498,7 +492,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
498{ 492{
499 struct k_sigaction ka; 493 struct k_sigaction ka;
500 int restart_syscall; 494 int restart_syscall;
501 sigset_t *oldset;
502 siginfo_t info; 495 siginfo_t info;
503 int signr; 496 int signr;
504 497
@@ -523,11 +516,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
523 if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C)) 516 if (pt_regs_is_syscall(regs) && (regs->psr & PSR_C))
524 regs->u_regs[UREG_G6] = orig_i0; 517 regs->u_regs[UREG_G6] = orig_i0;
525 518
526 if (test_thread_flag(TIF_RESTORE_SIGMASK))
527 oldset = &current->saved_sigmask;
528 else
529 oldset = &current->blocked;
530
531 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 519 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
532 520
533 /* If the debugger messes with the program counter, it clears 521 /* If the debugger messes with the program counter, it clears
@@ -544,15 +532,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
544 if (signr > 0) { 532 if (signr > 0) {
545 if (restart_syscall) 533 if (restart_syscall)
546 syscall_restart(orig_i0, regs, &ka.sa); 534 syscall_restart(orig_i0, regs, &ka.sa);
547 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { 535 handle_signal(signr, &ka, &info, regs);
548 /* a signal was successfully delivered; the saved
549 * sigmask will have been stored in the signal frame,
550 * and will be restored by sigreturn, so we can simply
551 * clear the TIF_RESTORE_SIGMASK flag.
552 */
553 if (test_thread_flag(TIF_RESTORE_SIGMASK))
554 clear_thread_flag(TIF_RESTORE_SIGMASK);
555 }
556 return; 536 return;
557 } 537 }
558 if (restart_syscall && 538 if (restart_syscall &&
@@ -576,22 +556,17 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
576 /* if there's no signal to deliver, we just put the saved sigmask 556 /* if there's no signal to deliver, we just put the saved sigmask
577 * back 557 * back
578 */ 558 */
579 if (test_thread_flag(TIF_RESTORE_SIGMASK)) { 559 restore_saved_sigmask();
580 clear_thread_flag(TIF_RESTORE_SIGMASK);
581 set_current_blocked(&current->saved_sigmask);
582 }
583} 560}
584 561
585void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, 562void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0,
586 unsigned long thread_info_flags) 563 unsigned long thread_info_flags)
587{ 564{
588 if (thread_info_flags & (_TIF_SIGPENDING | _TIF_RESTORE_SIGMASK)) 565 if (thread_info_flags & _TIF_SIGPENDING)
589 do_signal(regs, orig_i0); 566 do_signal(regs, orig_i0);
590 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 567 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
591 clear_thread_flag(TIF_NOTIFY_RESUME); 568 clear_thread_flag(TIF_NOTIFY_RESUME);
592 tracehook_notify_resume(regs); 569 tracehook_notify_resume(regs);
593 if (current->replacement_session_keyring)
594 key_replace_session_keyring();
595 } 570 }
596} 571}
597 572
diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c
index eafaab486b2d..867de2f8189c 100644
--- a/arch/sparc/kernel/signal_64.c
+++ b/arch/sparc/kernel/signal_64.c
@@ -38,8 +38,6 @@
38#include "systbls.h" 38#include "systbls.h"
39#include "sigutil.h" 39#include "sigutil.h"
40 40
41#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
42
43/* {set, get}context() needed for 64-bit SparcLinux userland. */ 41/* {set, get}context() needed for 64-bit SparcLinux userland. */
44asmlinkage void sparc64_set_context(struct pt_regs *regs) 42asmlinkage void sparc64_set_context(struct pt_regs *regs)
45{ 43{
@@ -71,7 +69,6 @@ asmlinkage void sparc64_set_context(struct pt_regs *regs)
71 if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t))) 69 if (__copy_from_user(&set, &ucp->uc_sigmask, sizeof(sigset_t)))
72 goto do_sigsegv; 70 goto do_sigsegv;
73 } 71 }
74 sigdelsetmask(&set, ~_BLOCKABLE);
75 set_current_blocked(&set); 72 set_current_blocked(&set);
76 } 73 }
77 if (test_thread_flag(TIF_32BIT)) { 74 if (test_thread_flag(TIF_32BIT)) {
@@ -315,7 +312,6 @@ void do_rt_sigreturn(struct pt_regs *regs)
315 /* Prevent syscall restart. */ 312 /* Prevent syscall restart. */
316 pt_regs_clear_syscall(regs); 313 pt_regs_clear_syscall(regs);
317 314
318 sigdelsetmask(&set, ~_BLOCKABLE);
319 set_current_blocked(&set); 315 set_current_blocked(&set);
320 return; 316 return;
321segv: 317segv:
@@ -466,7 +462,7 @@ sigsegv:
466 return -EFAULT; 462 return -EFAULT;
467} 463}
468 464
469static inline int handle_signal(unsigned long signr, struct k_sigaction *ka, 465static inline void handle_signal(unsigned long signr, struct k_sigaction *ka,
470 siginfo_t *info, 466 siginfo_t *info,
471 sigset_t *oldset, struct pt_regs *regs) 467 sigset_t *oldset, struct pt_regs *regs)
472{ 468{
@@ -475,12 +471,9 @@ static inline int handle_signal(unsigned long signr, struct k_sigaction *ka,
475 err = setup_rt_frame(ka, regs, signr, oldset, 471 err = setup_rt_frame(ka, regs, signr, oldset,
476 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL); 472 (ka->sa.sa_flags & SA_SIGINFO) ? info : NULL);
477 if (err) 473 if (err)
478 return err; 474 return;
479
480 block_sigmask(ka, signr);
481 tracehook_signal_handler(signr, info, ka, regs, 0);
482 475
483 return 0; 476 signal_delivered(signr, info, ka, regs, 0);
484} 477}
485 478
486static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs, 479static inline void syscall_restart(unsigned long orig_i0, struct pt_regs *regs,
@@ -512,7 +505,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
512{ 505{
513 struct k_sigaction ka; 506 struct k_sigaction ka;
514 int restart_syscall; 507 int restart_syscall;
515 sigset_t *oldset; 508 sigset_t *oldset = sigmask_to_save();
516 siginfo_t info; 509 siginfo_t info;
517 int signr; 510 int signr;
518 511
@@ -538,11 +531,6 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
538 (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY))) 531 (regs->tstate & (TSTATE_XCARRY | TSTATE_ICARRY)))
539 regs->u_regs[UREG_G6] = orig_i0; 532 regs->u_regs[UREG_G6] = orig_i0;
540 533
541 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
542 oldset = &current->saved_sigmask;
543 else
544 oldset = &current->blocked;
545
546#ifdef CONFIG_COMPAT 534#ifdef CONFIG_COMPAT
547 if (test_thread_flag(TIF_32BIT)) { 535 if (test_thread_flag(TIF_32BIT)) {
548 extern void do_signal32(sigset_t *, struct pt_regs *); 536 extern void do_signal32(sigset_t *, struct pt_regs *);
@@ -563,14 +551,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
563 if (signr > 0) { 551 if (signr > 0) {
564 if (restart_syscall) 552 if (restart_syscall)
565 syscall_restart(orig_i0, regs, &ka.sa); 553 syscall_restart(orig_i0, regs, &ka.sa);
566 if (handle_signal(signr, &ka, &info, oldset, regs) == 0) { 554 handle_signal(signr, &ka, &info, oldset, regs);
567 /* A signal was successfully delivered; the saved
568 * sigmask will have been stored in the signal frame,
569 * and will be restored by sigreturn, so we can simply
570 * clear the TS_RESTORE_SIGMASK flag.
571 */
572 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
573 }
574 return; 555 return;
575 } 556 }
576 if (restart_syscall && 557 if (restart_syscall &&
@@ -594,10 +575,7 @@ static void do_signal(struct pt_regs *regs, unsigned long orig_i0)
594 /* If there's no signal to deliver, we just put the saved sigmask 575 /* If there's no signal to deliver, we just put the saved sigmask
595 * back 576 * back
596 */ 577 */
597 if (current_thread_info()->status & TS_RESTORE_SIGMASK) { 578 restore_saved_sigmask();
598 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
599 set_current_blocked(&current->saved_sigmask);
600 }
601} 579}
602 580
603void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags) 581void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long thread_info_flags)
@@ -607,8 +585,6 @@ void do_notify_resume(struct pt_regs *regs, unsigned long orig_i0, unsigned long
607 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 585 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
608 clear_thread_flag(TIF_NOTIFY_RESUME); 586 clear_thread_flag(TIF_NOTIFY_RESUME);
609 tracehook_notify_resume(regs); 587 tracehook_notify_resume(regs);
610 if (current->replacement_session_keyring)
611 key_replace_session_keyring();
612 } 588 }
613} 589}
614 590
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index 3ee51f189a55..275f74fd6f6a 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -580,16 +580,9 @@ SYSCALL_DEFINE5(64_mremap, unsigned long, addr, unsigned long, old_len,
580 unsigned long, new_len, unsigned long, flags, 580 unsigned long, new_len, unsigned long, flags,
581 unsigned long, new_addr) 581 unsigned long, new_addr)
582{ 582{
583 unsigned long ret = -EINVAL;
584
585 if (test_thread_flag(TIF_32BIT)) 583 if (test_thread_flag(TIF_32BIT))
586 goto out; 584 return -EINVAL;
587 585 return sys_mremap(addr, old_len, new_len, flags, new_addr);
588 down_write(&current->mm->mmap_sem);
589 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
590 up_write(&current->mm->mmap_sem);
591out:
592 return ret;
593} 586}
594 587
595/* we come to here via sys_nis_syscall so it can setup the regs argument */ 588/* we come to here via sys_nis_syscall so it can setup the regs argument */
diff --git a/arch/sparc/kernel/trampoline_32.S b/arch/sparc/kernel/trampoline_32.S
index 7364ddc9e5aa..af27acab4486 100644
--- a/arch/sparc/kernel/trampoline_32.S
+++ b/arch/sparc/kernel/trampoline_32.S
@@ -149,8 +149,6 @@ sun4d_cpu_startup:
149 149
150 b,a smp_do_cpu_idle 150 b,a smp_do_cpu_idle
151 151
152#ifdef CONFIG_SPARC_LEON
153
154 __CPUINIT 152 __CPUINIT
155 .align 4 153 .align 4
156 .global leon_smp_cpu_startup, smp_penguin_ctable 154 .global leon_smp_cpu_startup, smp_penguin_ctable
@@ -161,7 +159,7 @@ leon_smp_cpu_startup:
161 ld [%g1+4],%g1 159 ld [%g1+4],%g1
162 srl %g1,4,%g1 160 srl %g1,4,%g1
163 set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */ 161 set 0x00000100,%g5 /* SRMMU_CTXTBL_PTR */
164 sta %g1, [%g5] ASI_M_MMUREGS 162 sta %g1, [%g5] ASI_LEON_MMUREGS
165 163
166 /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */ 164 /* Set up a sane %psr -- PIL<0xf> S<0x1> PS<0x1> CWP<0x0> */
167 set (PSR_PIL | PSR_S | PSR_PS), %g1 165 set (PSR_PIL | PSR_S | PSR_PS), %g1
@@ -207,5 +205,3 @@ leon_smp_cpu_startup:
207 nop 205 nop
208 206
209 b,a smp_do_cpu_idle 207 b,a smp_do_cpu_idle
210
211#endif
diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
index c72fdf55e1c1..3b05e6697710 100644
--- a/arch/sparc/kernel/traps_64.c
+++ b/arch/sparc/kernel/traps_64.c
@@ -2054,7 +2054,7 @@ void do_fpieee(struct pt_regs *regs)
2054 do_fpe_common(regs); 2054 do_fpe_common(regs);
2055} 2055}
2056 2056
2057extern int do_mathemu(struct pt_regs *, struct fpustate *); 2057extern int do_mathemu(struct pt_regs *, struct fpustate *, bool);
2058 2058
2059void do_fpother(struct pt_regs *regs) 2059void do_fpother(struct pt_regs *regs)
2060{ 2060{
@@ -2068,7 +2068,7 @@ void do_fpother(struct pt_regs *regs)
2068 switch ((current_thread_info()->xfsr[0] & 0x1c000)) { 2068 switch ((current_thread_info()->xfsr[0] & 0x1c000)) {
2069 case (2 << 14): /* unfinished_FPop */ 2069 case (2 << 14): /* unfinished_FPop */
2070 case (3 << 14): /* unimplemented_FPop */ 2070 case (3 << 14): /* unimplemented_FPop */
2071 ret = do_mathemu(regs, f); 2071 ret = do_mathemu(regs, f, false);
2072 break; 2072 break;
2073 } 2073 }
2074 if (ret) 2074 if (ret)
@@ -2308,10 +2308,12 @@ void do_illegal_instruction(struct pt_regs *regs)
2308 } else { 2308 } else {
2309 struct fpustate *f = FPUSTATE; 2309 struct fpustate *f = FPUSTATE;
2310 2310
2311 /* XXX maybe verify XFSR bits like 2311 /* On UltraSPARC T2 and later, FPU insns which
2312 * XXX do_fpother() does? 2312 * are not implemented in HW signal an illegal
2313 * instruction trap and do not set the FP Trap
2314 * Trap in the %fsr to unimplemented_FPop.
2313 */ 2315 */
2314 if (do_mathemu(regs, f)) 2316 if (do_mathemu(regs, f, true))
2315 return; 2317 return;
2316 } 2318 }
2317 } 2319 }
diff --git a/arch/sparc/kernel/vio.c b/arch/sparc/kernel/vio.c
index 5cffdc55f075..3e244f31e56b 100644
--- a/arch/sparc/kernel/vio.c
+++ b/arch/sparc/kernel/vio.c
@@ -443,7 +443,7 @@ static int __init vio_init(void)
443 root_vdev = vio_create_one(hp, root, NULL); 443 root_vdev = vio_create_one(hp, root, NULL);
444 err = -ENODEV; 444 err = -ENODEV;
445 if (!root_vdev) { 445 if (!root_vdev) {
446 printk(KERN_ERR "VIO: Coult not create root device.\n"); 446 printk(KERN_ERR "VIO: Could not create root device.\n");
447 goto out_release; 447 goto out_release;
448 } 448 }
449 449
diff --git a/arch/sparc/kernel/vmlinux.lds.S b/arch/sparc/kernel/vmlinux.lds.S
index 0e1605697b49..89c2c29f154b 100644
--- a/arch/sparc/kernel/vmlinux.lds.S
+++ b/arch/sparc/kernel/vmlinux.lds.S
@@ -107,6 +107,11 @@ SECTIONS
107 *(.sun4v_2insn_patch) 107 *(.sun4v_2insn_patch)
108 __sun4v_2insn_patch_end = .; 108 __sun4v_2insn_patch_end = .;
109 } 109 }
110 .leon_1insn_patch : {
111 __leon_1insn_patch = .;
112 *(.leon_1insn_patch)
113 __leon_1insn_patch_end = .;
114 }
110 .swapper_tsb_phys_patch : { 115 .swapper_tsb_phys_patch : {
111 __swapper_tsb_phys_patch = .; 116 __swapper_tsb_phys_patch = .;
112 *(.swapper_tsb_phys_patch) 117 *(.swapper_tsb_phys_patch)
diff --git a/arch/sparc/kernel/wof.S b/arch/sparc/kernel/wof.S
index 4c2de3cf309b..28a7bc69f82b 100644
--- a/arch/sparc/kernel/wof.S
+++ b/arch/sparc/kernel/wof.S
@@ -332,24 +332,30 @@ spwin_srmmu_stackchk:
332 mov AC_M_SFSR, %glob_tmp 332 mov AC_M_SFSR, %glob_tmp
333 333
334 /* Clear the fault status and turn on the no_fault bit. */ 334 /* Clear the fault status and turn on the no_fault bit. */
335 lda [%glob_tmp] ASI_M_MMUREGS, %g0 ! eat SFSR 335LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0) ! eat SFSR
336SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0) ! eat SFSR
336 337
337 lda [%g0] ASI_M_MMUREGS, %glob_tmp ! read MMU control 338LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control
339SUN_PI_(lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control
338 or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit 340 or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
339 sta %glob_tmp, [%g0] ASI_M_MMUREGS ! set it 341LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it
342SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it
340 343
341 /* Dump the registers and cross fingers. */ 344 /* Dump the registers and cross fingers. */
342 STORE_WINDOW(sp) 345 STORE_WINDOW(sp)
343 346
344 /* Clear the no_fault bit and check the status. */ 347 /* Clear the no_fault bit and check the status. */
345 andn %glob_tmp, 0x2, %glob_tmp 348 andn %glob_tmp, 0x2, %glob_tmp
346 sta %glob_tmp, [%g0] ASI_M_MMUREGS 349LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)
350SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)
347 351
348 mov AC_M_SFAR, %glob_tmp 352 mov AC_M_SFAR, %glob_tmp
349 lda [%glob_tmp] ASI_M_MMUREGS, %g0 353LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0)
354SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0)
350 355
351 mov AC_M_SFSR, %glob_tmp 356 mov AC_M_SFSR, %glob_tmp
352 lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp 357LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)
358SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp)
353 andcc %glob_tmp, 0x2, %g0 ! did we fault? 359 andcc %glob_tmp, 0x2, %g0 ! did we fault?
354 be,a spwin_finish_up + 0x4 ! cool beans, success 360 be,a spwin_finish_up + 0x4 ! cool beans, success
355 restore %g0, %g0, %g0 361 restore %g0, %g0, %g0
diff --git a/arch/sparc/kernel/wuf.S b/arch/sparc/kernel/wuf.S
index 9fde91a249e0..2c21cc59683e 100644
--- a/arch/sparc/kernel/wuf.S
+++ b/arch/sparc/kernel/wuf.S
@@ -254,16 +254,19 @@ srmmu_fwin_stackchk:
254 mov AC_M_SFSR, %l4 254 mov AC_M_SFSR, %l4
255 cmp %l5, %sp 255 cmp %l5, %sp
256 bleu fwin_user_stack_is_bolixed 256 bleu fwin_user_stack_is_bolixed
257 lda [%l4] ASI_M_MMUREGS, %g0 ! clear fault status 257LEON_PI( lda [%l4] ASI_LEON_MMUREGS, %g0) ! clear fault status
258SUN_PI_( lda [%l4] ASI_M_MMUREGS, %g0) ! clear fault status
258 259
259 /* The technique is, turn off faults on this processor, 260 /* The technique is, turn off faults on this processor,
260 * just let the load rip, then check the sfsr to see if 261 * just let the load rip, then check the sfsr to see if
261 * a fault did occur. Then we turn on fault traps again 262 * a fault did occur. Then we turn on fault traps again
262 * and branch conditionally based upon what happened. 263 * and branch conditionally based upon what happened.
263 */ 264 */
264 lda [%g0] ASI_M_MMUREGS, %l5 ! read mmu-ctrl reg 265LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %l5) ! read mmu-ctrl reg
266SUN_PI_(lda [%g0] ASI_M_MMUREGS, %l5) ! read mmu-ctrl reg
265 or %l5, 0x2, %l5 ! turn on no-fault bit 267 or %l5, 0x2, %l5 ! turn on no-fault bit
266 sta %l5, [%g0] ASI_M_MMUREGS ! store it 268LEON_PI(sta %l5, [%g0] ASI_LEON_MMUREGS) ! store it
269SUN_PI_(sta %l5, [%g0] ASI_M_MMUREGS) ! store it
267 270
268 /* Cross fingers and go for it. */ 271 /* Cross fingers and go for it. */
269 LOAD_WINDOW(sp) 272 LOAD_WINDOW(sp)
@@ -275,18 +278,22 @@ srmmu_fwin_stackchk:
275 278
276 /* LOCATION: Window 'T' */ 279 /* LOCATION: Window 'T' */
277 280
278 lda [%g0] ASI_M_MMUREGS, %twin_tmp1 ! load mmu-ctrl again 281LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
279 andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit 282SUN_PI_(lda [%g0] ASI_M_MMUREGS, %twin_tmp1) ! load mmu-ctrl again
280 sta %twin_tmp1, [%g0] ASI_M_MMUREGS ! store it 283 andn %twin_tmp1, 0x2, %twin_tmp1 ! clear no-fault bit
284LEON_PI(sta %twin_tmp1, [%g0] ASI_LEON_MMUREGS) ! store it
285SUN_PI_(sta %twin_tmp1, [%g0] ASI_M_MMUREGS) ! store it
281 286
282 mov AC_M_SFAR, %twin_tmp2 287 mov AC_M_SFAR, %twin_tmp2
283 lda [%twin_tmp2] ASI_M_MMUREGS, %g0 ! read fault address 288LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %g0) ! read fault address
289SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %g0) ! read fault address
284 290
285 mov AC_M_SFSR, %twin_tmp2 291 mov AC_M_SFSR, %twin_tmp2
286 lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2 ! read fault status 292LEON_PI(lda [%twin_tmp2] ASI_LEON_MMUREGS, %twin_tmp2) ! read fault status
287 andcc %twin_tmp2, 0x2, %g0 ! did fault occur? 293SUN_PI_(lda [%twin_tmp2] ASI_M_MMUREGS, %twin_tmp2) ! read fault status
294 andcc %twin_tmp2, 0x2, %g0 ! did fault occur?
288 295
289 bne 1f ! yep, cleanup 296 bne 1f ! yep, cleanup
290 nop 297 nop
291 298
292 wr %t_psr, 0x0, %psr 299 wr %t_psr, 0x0, %psr
diff --git a/arch/sparc/math-emu/math_64.c b/arch/sparc/math-emu/math_64.c
index 2bbe2f28ad23..1704068da928 100644
--- a/arch/sparc/math-emu/math_64.c
+++ b/arch/sparc/math-emu/math_64.c
@@ -163,7 +163,7 @@ typedef union {
163 u64 q[2]; 163 u64 q[2];
164} *argp; 164} *argp;
165 165
166int do_mathemu(struct pt_regs *regs, struct fpustate *f) 166int do_mathemu(struct pt_regs *regs, struct fpustate *f, bool illegal_insn_trap)
167{ 167{
168 unsigned long pc = regs->tpc; 168 unsigned long pc = regs->tpc;
169 unsigned long tstate = regs->tstate; 169 unsigned long tstate = regs->tstate;
@@ -218,7 +218,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
218 case FSQRTS: { 218 case FSQRTS: {
219 unsigned long x = current_thread_info()->xfsr[0]; 219 unsigned long x = current_thread_info()->xfsr[0];
220 220
221 x = (x >> 14) & 0xf; 221 x = (x >> 14) & 0x7;
222 TYPE(x,1,1,1,1,0,0); 222 TYPE(x,1,1,1,1,0,0);
223 break; 223 break;
224 } 224 }
@@ -226,7 +226,7 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
226 case FSQRTD: { 226 case FSQRTD: {
227 unsigned long x = current_thread_info()->xfsr[0]; 227 unsigned long x = current_thread_info()->xfsr[0];
228 228
229 x = (x >> 14) & 0xf; 229 x = (x >> 14) & 0x7;
230 TYPE(x,2,1,2,1,0,0); 230 TYPE(x,2,1,2,1,0,0);
231 break; 231 break;
232 } 232 }
@@ -357,9 +357,17 @@ int do_mathemu(struct pt_regs *regs, struct fpustate *f)
357 if (type) { 357 if (type) {
358 argp rs1 = NULL, rs2 = NULL, rd = NULL; 358 argp rs1 = NULL, rs2 = NULL, rd = NULL;
359 359
360 freg = (current_thread_info()->xfsr[0] >> 14) & 0xf; 360 /* Starting with UltraSPARC-T2, the cpu does not set the FP Trap
361 if (freg != (type >> 9)) 361 * Type field in the %fsr to unimplemented_FPop. Nor does it
362 goto err; 362 * use the fp_exception_other trap. Instead it signals an
363 * illegal instruction and leaves the FP trap type field of
364 * the %fsr unchanged.
365 */
366 if (!illegal_insn_trap) {
367 int ftt = (current_thread_info()->xfsr[0] >> 14) & 0x7;
368 if (ftt != (type >> 9))
369 goto err;
370 }
363 current_thread_info()->xfsr[0] &= ~0x1c000; 371 current_thread_info()->xfsr[0] &= ~0x1c000;
364 freg = ((insn >> 14) & 0x1f); 372 freg = ((insn >> 14) & 0x1f);
365 switch (type & 0x3) { 373 switch (type & 0x3) {
diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
index 69ffd3112fed..30c3eccfdf5a 100644
--- a/arch/sparc/mm/Makefile
+++ b/arch/sparc/mm/Makefile
@@ -8,8 +8,9 @@ obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
8obj-y += fault_$(BITS).o 8obj-y += fault_$(BITS).o
9obj-y += init_$(BITS).o 9obj-y += init_$(BITS).o
10obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o 10obj-$(CONFIG_SPARC32) += extable.o srmmu.o iommu.o io-unit.o
11obj-$(CONFIG_SPARC32) += srmmu_access.o
11obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o 12obj-$(CONFIG_SPARC32) += hypersparc.o viking.o tsunami.o swift.o
12obj-$(CONFIG_SPARC_LEON)+= leon_mm.o 13obj-$(CONFIG_SPARC32) += leon_mm.o
13 14
14# Only used by sparc64 15# Only used by sparc64
15obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o 16obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
diff --git a/arch/sparc/mm/leon_mm.c b/arch/sparc/mm/leon_mm.c
index 4c67ae6e5023..5bed085a2c17 100644
--- a/arch/sparc/mm/leon_mm.c
+++ b/arch/sparc/mm/leon_mm.c
@@ -32,7 +32,7 @@ static inline unsigned long leon_get_ctable_ptr(void)
32} 32}
33 33
34 34
35unsigned long srmmu_swprobe(unsigned long vaddr, unsigned long *paddr) 35unsigned long leon_swprobe(unsigned long vaddr, unsigned long *paddr)
36{ 36{
37 37
38 unsigned int ctxtbl; 38 unsigned int ctxtbl;
diff --git a/arch/sparc/mm/srmmu.c b/arch/sparc/mm/srmmu.c
index 256db6b22c54..62e3f5773303 100644
--- a/arch/sparc/mm/srmmu.c
+++ b/arch/sparc/mm/srmmu.c
@@ -646,6 +646,23 @@ static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
646 } 646 }
647} 647}
648 648
649/* These flush types are not available on all chips... */
650static inline unsigned long srmmu_probe(unsigned long vaddr)
651{
652 unsigned long retval;
653
654 if (sparc_cpu_model != sparc_leon) {
655
656 vaddr &= PAGE_MASK;
657 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
658 "=r" (retval) :
659 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
660 } else {
661 retval = leon_swprobe(vaddr, 0);
662 }
663 return retval;
664}
665
649/* 666/*
650 * This is much cleaner than poking around physical address space 667 * This is much cleaner than poking around physical address space
651 * looking at the prom's page table directly which is what most 668 * looking at the prom's page table directly which is what most
@@ -665,7 +682,7 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
665 break; /* probably wrap around */ 682 break; /* probably wrap around */
666 if(start == 0xfef00000) 683 if(start == 0xfef00000)
667 start = KADB_DEBUGGER_BEGVM; 684 start = KADB_DEBUGGER_BEGVM;
668 if(!(prompte = srmmu_hwprobe(start))) { 685 if(!(prompte = srmmu_probe(start))) {
669 start += PAGE_SIZE; 686 start += PAGE_SIZE;
670 continue; 687 continue;
671 } 688 }
@@ -674,12 +691,12 @@ static void __init srmmu_inherit_prom_mappings(unsigned long start,
674 what = 0; 691 what = 0;
675 692
676 if(!(start & ~(SRMMU_REAL_PMD_MASK))) { 693 if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
677 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte) 694 if(srmmu_probe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
678 what = 1; 695 what = 1;
679 } 696 }
680 697
681 if(!(start & ~(SRMMU_PGDIR_MASK))) { 698 if(!(start & ~(SRMMU_PGDIR_MASK))) {
682 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == 699 if(srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
683 prompte) 700 prompte)
684 what = 2; 701 what = 2;
685 } 702 }
@@ -1156,7 +1173,7 @@ static void turbosparc_flush_page_to_ram(unsigned long page)
1156#ifdef TURBOSPARC_WRITEBACK 1173#ifdef TURBOSPARC_WRITEBACK
1157 volatile unsigned long clear; 1174 volatile unsigned long clear;
1158 1175
1159 if (srmmu_hwprobe(page)) 1176 if (srmmu_probe(page))
1160 turbosparc_flush_page_cache(page); 1177 turbosparc_flush_page_cache(page);
1161 clear = srmmu_get_fstatus(); 1178 clear = srmmu_get_fstatus();
1162#endif 1179#endif
diff --git a/arch/sparc/mm/srmmu_access.S b/arch/sparc/mm/srmmu_access.S
new file mode 100644
index 000000000000..d0a67b2c2383
--- /dev/null
+++ b/arch/sparc/mm/srmmu_access.S
@@ -0,0 +1,82 @@
1/* Assembler variants of srmmu access functions.
2 * Implemented in assembler to allow run-time patching.
3 * LEON uses a different ASI for MMUREGS than SUN.
4 *
5 * The leon_1insn_patch infrastructure is used
6 * for the run-time patching.
7 */
8
9#include <linux/linkage.h>
10
11#include <asm/asmmacro.h>
12#include <asm/pgtsrmmu.h>
13#include <asm/asi.h>
14
15/* unsigned int srmmu_get_mmureg(void) */
16ENTRY(srmmu_get_mmureg)
17LEON_PI(lda [%g0] ASI_LEON_MMUREGS, %o0)
18SUN_PI_(lda [%g0] ASI_M_MMUREGS, %o0)
19 retl
20 nop
21ENDPROC(srmmu_get_mmureg)
22
23/* void srmmu_set_mmureg(unsigned long regval) */
24ENTRY(srmmu_set_mmureg)
25LEON_PI(sta %o0, [%g0] ASI_LEON_MMUREGS)
26SUN_PI_(sta %o0, [%g0] ASI_M_MMUREGS)
27 retl
28 nop
29ENDPROC(srmmu_set_mmureg)
30
31/* void srmmu_set_ctable_ptr(unsigned long paddr) */
32ENTRY(srmmu_set_ctable_ptr)
33 /* paddr = ((paddr >> 4) & SRMMU_CTX_PMASK); */
34 srl %o0, 4, %g1
35 and %g1, SRMMU_CTX_PMASK, %g1
36
37 mov SRMMU_CTXTBL_PTR, %g2
38LEON_PI(sta %g1, [%g2] ASI_LEON_MMUREGS)
39SUN_PI_(sta %g1, [%g2] ASI_M_MMUREGS)
40 retl
41 nop
42ENDPROC(srmmu_set_ctable_ptr)
43
44
45/* void srmmu_set_context(int context) */
46ENTRY(srmmu_set_context)
47 mov SRMMU_CTX_REG, %g1
48LEON_PI(sta %o0, [%g1] ASI_LEON_MMUREGS)
49SUN_PI_(sta %o0, [%g1] ASI_M_MMUREGS)
50 retl
51 nop
52ENDPROC(srmmu_set_context)
53
54
55/* int srmmu_get_context(void) */
56ENTRY(srmmu_get_context)
57 mov SRMMU_CTX_REG, %o0
58LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0)
59SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0)
60 retl
61 nop
62ENDPROC(srmmu_get_context)
63
64
65/* unsigned int srmmu_get_fstatus(void) */
66ENTRY(srmmu_get_fstatus)
67 mov SRMMU_FAULT_STATUS, %o0
68LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0)
69SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0)
70 retl
71 nop
72ENDPROC(srmmu_get_fstatus)
73
74
75/* unsigned int srmmu_get_faddr(void) */
76ENTRY(srmmu_get_faddr)
77 mov SRMMU_FAULT_ADDR, %o0
78LEON_PI(lda [%o0] ASI_LEON_MMUREGS, %o0)
79SUN_PI_(lda [%o0] ASI_M_MMUREGS, %o0)
80 retl
81 nop
82ENDPROC(srmmu_get_faddr)
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 69adc08d36a5..6e74450ff0a1 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -44,7 +44,6 @@ typedef __kernel_uid32_t __compat_gid32_t;
44typedef __kernel_mode_t compat_mode_t; 44typedef __kernel_mode_t compat_mode_t;
45typedef __kernel_dev_t compat_dev_t; 45typedef __kernel_dev_t compat_dev_t;
46typedef __kernel_loff_t compat_loff_t; 46typedef __kernel_loff_t compat_loff_t;
47typedef __kernel_nlink_t compat_nlink_t;
48typedef __kernel_ipc_pid_t compat_ipc_pid_t; 47typedef __kernel_ipc_pid_t compat_ipc_pid_t;
49typedef __kernel_daddr_t compat_daddr_t; 48typedef __kernel_daddr_t compat_daddr_t;
50typedef __kernel_fsid_t compat_fsid_t; 49typedef __kernel_fsid_t compat_fsid_t;
diff --git a/arch/tile/include/asm/thread_info.h b/arch/tile/include/asm/thread_info.h
index 656c486e64fa..e9c670d7a7fe 100644
--- a/arch/tile/include/asm/thread_info.h
+++ b/arch/tile/include/asm/thread_info.h
@@ -91,11 +91,6 @@ extern void smp_nap(void);
91/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */ 91/* Enable interrupts racelessly and nap forever: helper for cpu_idle(). */
92extern void _cpu_idle(void); 92extern void _cpu_idle(void);
93 93
94/* Switch boot idle thread to a freshly-allocated stack and free old stack. */
95extern void cpu_idle_on_new_stack(struct thread_info *old_ti,
96 unsigned long new_sp,
97 unsigned long new_ss10);
98
99#else /* __ASSEMBLY__ */ 94#else /* __ASSEMBLY__ */
100 95
101/* 96/*
@@ -166,7 +161,23 @@ static inline void set_restore_sigmask(void)
166{ 161{
167 struct thread_info *ti = current_thread_info(); 162 struct thread_info *ti = current_thread_info();
168 ti->status |= TS_RESTORE_SIGMASK; 163 ti->status |= TS_RESTORE_SIGMASK;
169 set_bit(TIF_SIGPENDING, &ti->flags); 164 WARN_ON(!test_bit(TIF_SIGPENDING, &ti->flags));
165}
166static inline void clear_restore_sigmask(void)
167{
168 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
169}
170static inline bool test_restore_sigmask(void)
171{
172 return current_thread_info()->status & TS_RESTORE_SIGMASK;
173}
174static inline bool test_and_clear_restore_sigmask(void)
175{
176 struct thread_info *ti = current_thread_info();
177 if (!(ti->status & TS_RESTORE_SIGMASK))
178 return false;
179 ti->status &= ~TS_RESTORE_SIGMASK;
180 return true;
170} 181}
171#endif /* !__ASSEMBLY__ */ 182#endif /* !__ASSEMBLY__ */
172 183
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index c3dd275f25e2..9ab078a4605d 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -146,7 +146,7 @@ extern int fixup_exception(struct pt_regs *regs);
146#ifdef __tilegx__ 146#ifdef __tilegx__
147#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret) 147#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
148#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret) 148#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
149#define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret) 149#define __get_user_4(x, ptr, ret) __get_user_asm(ld4s, x, ptr, ret)
150#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret) 150#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
151#else 151#else
152#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret) 152#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
diff --git a/arch/tile/kernel/compat_signal.c b/arch/tile/kernel/compat_signal.c
index cdef6e5ec022..474571b84085 100644
--- a/arch/tile/kernel/compat_signal.c
+++ b/arch/tile/kernel/compat_signal.c
@@ -118,8 +118,6 @@ struct compat_rt_sigframe {
118 struct compat_ucontext uc; 118 struct compat_ucontext uc;
119}; 119};
120 120
121#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
122
123long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act, 121long compat_sys_rt_sigaction(int sig, struct compat_sigaction __user *act,
124 struct compat_sigaction __user *oact, 122 struct compat_sigaction __user *oact,
125 size_t sigsetsize) 123 size_t sigsetsize)
@@ -302,7 +300,6 @@ long compat_sys_rt_sigreturn(struct pt_regs *regs)
302 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 300 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
303 goto badframe; 301 goto badframe;
304 302
305 sigdelsetmask(&set, ~_BLOCKABLE);
306 set_current_blocked(&set); 303 set_current_blocked(&set);
307 304
308 if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 305 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index 133c4b56a99e..c31637baff28 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -68,20 +68,6 @@ STD_ENTRY(KBacktraceIterator_init_current)
68 jrp lr /* keep backtracer happy */ 68 jrp lr /* keep backtracer happy */
69 STD_ENDPROC(KBacktraceIterator_init_current) 69 STD_ENDPROC(KBacktraceIterator_init_current)
70 70
71/*
72 * Reset our stack to r1/r2 (sp and ksp0+cpu respectively), then
73 * free the old stack (passed in r0) and re-invoke cpu_idle().
74 * We update sp and ksp0 simultaneously to avoid backtracer warnings.
75 */
76STD_ENTRY(cpu_idle_on_new_stack)
77 {
78 move sp, r1
79 mtspr SPR_SYSTEM_SAVE_K_0, r2
80 }
81 jal free_thread_info
82 j cpu_idle
83 STD_ENDPROC(cpu_idle_on_new_stack)
84
85/* Loop forever on a nap during SMP boot. */ 71/* Loop forever on a nap during SMP boot. */
86STD_ENTRY(smp_nap) 72STD_ENTRY(smp_nap)
87 nap 73 nap
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index ba1023d8a021..6be799150501 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -565,8 +565,6 @@ int do_work_pending(struct pt_regs *regs, u32 thread_info_flags)
565 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 565 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
566 clear_thread_flag(TIF_NOTIFY_RESUME); 566 clear_thread_flag(TIF_NOTIFY_RESUME);
567 tracehook_notify_resume(regs); 567 tracehook_notify_resume(regs);
568 if (current->replacement_session_keyring)
569 key_replace_session_keyring();
570 return 1; 568 return 1;
571 } 569 }
572 if (thread_info_flags & _TIF_SINGLESTEP) { 570 if (thread_info_flags & _TIF_SINGLESTEP) {
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 6098ccc59be2..dd87f3420390 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -29,6 +29,7 @@
29#include <linux/smp.h> 29#include <linux/smp.h>
30#include <linux/timex.h> 30#include <linux/timex.h>
31#include <linux/hugetlb.h> 31#include <linux/hugetlb.h>
32#include <linux/start_kernel.h>
32#include <asm/setup.h> 33#include <asm/setup.h>
33#include <asm/sections.h> 34#include <asm/sections.h>
34#include <asm/cacheflush.h> 35#include <asm/cacheflush.h>
diff --git a/arch/tile/kernel/signal.c b/arch/tile/kernel/signal.c
index f79d4b88c747..e29b0553211d 100644
--- a/arch/tile/kernel/signal.c
+++ b/arch/tile/kernel/signal.c
@@ -37,8 +37,6 @@
37 37
38#define DEBUG_SIG 0 38#define DEBUG_SIG 0
39 39
40#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
41
42SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss, 40SYSCALL_DEFINE3(sigaltstack, const stack_t __user *, uss,
43 stack_t __user *, uoss, struct pt_regs *, regs) 41 stack_t __user *, uoss, struct pt_regs *, regs)
44{ 42{
@@ -96,7 +94,6 @@ SYSCALL_DEFINE1(rt_sigreturn, struct pt_regs *, regs)
96 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 94 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
97 goto badframe; 95 goto badframe;
98 96
99 sigdelsetmask(&set, ~_BLOCKABLE);
100 set_current_blocked(&set); 97 set_current_blocked(&set);
101 98
102 if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) 99 if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
@@ -242,10 +239,11 @@ give_sigsegv:
242 * OK, we're invoking a handler 239 * OK, we're invoking a handler
243 */ 240 */
244 241
245static int handle_signal(unsigned long sig, siginfo_t *info, 242static void handle_signal(unsigned long sig, siginfo_t *info,
246 struct k_sigaction *ka, sigset_t *oldset, 243 struct k_sigaction *ka,
247 struct pt_regs *regs) 244 struct pt_regs *regs)
248{ 245{
246 sigset_t *oldset = sigmask_to_save();
249 int ret; 247 int ret;
250 248
251 /* Are we from a system call? */ 249 /* Are we from a system call? */
@@ -278,15 +276,9 @@ static int handle_signal(unsigned long sig, siginfo_t *info,
278 else 276 else
279#endif 277#endif
280 ret = setup_rt_frame(sig, ka, info, oldset, regs); 278 ret = setup_rt_frame(sig, ka, info, oldset, regs);
281 if (ret == 0) { 279 if (ret)
282 /* This code is only called from system calls or from 280 return;
283 * the work_pending path in the return-to-user code, and 281 signal_delivered(sig, info, ka, regs, 0);
284 * either way we can re-enable interrupts unconditionally.
285 */
286 block_sigmask(ka, sig);
287 }
288
289 return ret;
290} 282}
291 283
292/* 284/*
@@ -299,7 +291,6 @@ void do_signal(struct pt_regs *regs)
299 siginfo_t info; 291 siginfo_t info;
300 int signr; 292 int signr;
301 struct k_sigaction ka; 293 struct k_sigaction ka;
302 sigset_t *oldset;
303 294
304 /* 295 /*
305 * i386 will check if we're coming from kernel mode and bail out 296 * i386 will check if we're coming from kernel mode and bail out
@@ -308,24 +299,10 @@ void do_signal(struct pt_regs *regs)
308 * helpful, we can reinstate the check on "!user_mode(regs)". 299 * helpful, we can reinstate the check on "!user_mode(regs)".
309 */ 300 */
310 301
311 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
312 oldset = &current->saved_sigmask;
313 else
314 oldset = &current->blocked;
315
316 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 302 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
317 if (signr > 0) { 303 if (signr > 0) {
318 /* Whee! Actually deliver the signal. */ 304 /* Whee! Actually deliver the signal. */
319 if (handle_signal(signr, &info, &ka, oldset, regs) == 0) { 305 handle_signal(signr, &info, &ka, regs);
320 /*
321 * A signal was successfully delivered; the saved
322 * sigmask will have been stored in the signal frame,
323 * and will be restored by sigreturn, so we can simply
324 * clear the TS_RESTORE_SIGMASK flag.
325 */
326 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
327 }
328
329 goto done; 306 goto done;
330 } 307 }
331 308
@@ -350,10 +327,7 @@ void do_signal(struct pt_regs *regs)
350 } 327 }
351 328
352 /* If there's no signal to deliver, just put the saved sigmask back. */ 329 /* If there's no signal to deliver, just put the saved sigmask back. */
353 if (current_thread_info()->status & TS_RESTORE_SIGMASK) { 330 restore_saved_sigmask();
354 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
355 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
356 }
357 331
358done: 332done:
359 /* Avoid double syscall restart if there are nested signals. */ 333 /* Avoid double syscall restart if there are nested signals. */
diff --git a/arch/um/include/shared/frame_kern.h b/arch/um/include/shared/frame_kern.h
index 76078490c258..e584e40ee832 100644
--- a/arch/um/include/shared/frame_kern.h
+++ b/arch/um/include/shared/frame_kern.h
@@ -6,9 +6,6 @@
6#ifndef __FRAME_KERN_H_ 6#ifndef __FRAME_KERN_H_
7#define __FRAME_KERN_H_ 7#define __FRAME_KERN_H_
8 8
9#define _S(nr) (1<<((nr)-1))
10#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
11
12extern int setup_signal_stack_sc(unsigned long stack_top, int sig, 9extern int setup_signal_stack_sc(unsigned long stack_top, int sig,
13 struct k_sigaction *ka, 10 struct k_sigaction *ka,
14 struct pt_regs *regs, 11 struct pt_regs *regs,
diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
index 3a2235e0abc3..ccb9a9d283f1 100644
--- a/arch/um/kernel/process.c
+++ b/arch/um/kernel/process.c
@@ -117,11 +117,8 @@ void interrupt_end(void)
117 schedule(); 117 schedule();
118 if (test_thread_flag(TIF_SIGPENDING)) 118 if (test_thread_flag(TIF_SIGPENDING))
119 do_signal(); 119 do_signal();
120 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) { 120 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
121 tracehook_notify_resume(&current->thread.regs); 121 tracehook_notify_resume(&current->thread.regs);
122 if (current->replacement_session_keyring)
123 key_replace_session_keyring();
124 }
125} 122}
126 123
127void exit_thread(void) 124void exit_thread(void)
diff --git a/arch/um/kernel/reboot.c b/arch/um/kernel/reboot.c
index 4d93dff6b371..3d15243ce692 100644
--- a/arch/um/kernel/reboot.c
+++ b/arch/um/kernel/reboot.c
@@ -4,7 +4,9 @@
4 */ 4 */
5 5
6#include "linux/sched.h" 6#include "linux/sched.h"
7#include "linux/spinlock.h"
7#include "linux/slab.h" 8#include "linux/slab.h"
9#include "linux/oom.h"
8#include "kern_util.h" 10#include "kern_util.h"
9#include "os.h" 11#include "os.h"
10#include "skas.h" 12#include "skas.h"
@@ -22,13 +24,18 @@ static void kill_off_processes(void)
22 struct task_struct *p; 24 struct task_struct *p;
23 int pid; 25 int pid;
24 26
27 read_lock(&tasklist_lock);
25 for_each_process(p) { 28 for_each_process(p) {
26 if (p->mm == NULL) 29 struct task_struct *t;
27 continue;
28 30
29 pid = p->mm->context.id.u.pid; 31 t = find_lock_task_mm(p);
32 if (!t)
33 continue;
34 pid = t->mm->context.id.u.pid;
35 task_unlock(t);
30 os_kill_ptraced_process(pid, 1); 36 os_kill_ptraced_process(pid, 1);
31 } 37 }
38 read_unlock(&tasklist_lock);
32 } 39 }
33} 40}
34 41
diff --git a/arch/um/kernel/signal.c b/arch/um/kernel/signal.c
index 292e706016c5..7362d58efc29 100644
--- a/arch/um/kernel/signal.c
+++ b/arch/um/kernel/signal.c
@@ -15,17 +15,13 @@
15EXPORT_SYMBOL(block_signals); 15EXPORT_SYMBOL(block_signals);
16EXPORT_SYMBOL(unblock_signals); 16EXPORT_SYMBOL(unblock_signals);
17 17
18#define _S(nr) (1<<((nr)-1))
19
20#define _BLOCKABLE (~(_S(SIGKILL) | _S(SIGSTOP)))
21
22/* 18/*
23 * OK, we're invoking a handler 19 * OK, we're invoking a handler
24 */ 20 */
25static int handle_signal(struct pt_regs *regs, unsigned long signr, 21static void handle_signal(struct pt_regs *regs, unsigned long signr,
26 struct k_sigaction *ka, siginfo_t *info, 22 struct k_sigaction *ka, siginfo_t *info)
27 sigset_t *oldset)
28{ 23{
24 sigset_t *oldset = sigmask_to_save();
29 unsigned long sp; 25 unsigned long sp;
30 int err; 26 int err;
31 27
@@ -65,9 +61,7 @@ static int handle_signal(struct pt_regs *regs, unsigned long signr,
65 if (err) 61 if (err)
66 force_sigsegv(signr, current); 62 force_sigsegv(signr, current);
67 else 63 else
68 block_sigmask(ka, signr); 64 signal_delivered(signr, info, ka, regs, 0);
69
70 return err;
71} 65}
72 66
73static int kern_do_signal(struct pt_regs *regs) 67static int kern_do_signal(struct pt_regs *regs)
@@ -77,24 +71,9 @@ static int kern_do_signal(struct pt_regs *regs)
77 int sig, handled_sig = 0; 71 int sig, handled_sig = 0;
78 72
79 while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) { 73 while ((sig = get_signal_to_deliver(&info, &ka_copy, regs, NULL)) > 0) {
80 sigset_t *oldset;
81 if (test_thread_flag(TIF_RESTORE_SIGMASK))
82 oldset = &current->saved_sigmask;
83 else
84 oldset = &current->blocked;
85 handled_sig = 1; 74 handled_sig = 1;
86 /* Whee! Actually deliver the signal. */ 75 /* Whee! Actually deliver the signal. */
87 if (!handle_signal(regs, sig, &ka_copy, &info, oldset)) { 76 handle_signal(regs, sig, &ka_copy, &info);
88 /*
89 * a signal was successfully delivered; the saved
90 * sigmask will have been stored in the signal frame,
91 * and will be restored by sigreturn, so we can simply
92 * clear the TIF_RESTORE_SIGMASK flag
93 */
94 if (test_thread_flag(TIF_RESTORE_SIGMASK))
95 clear_thread_flag(TIF_RESTORE_SIGMASK);
96 break;
97 }
98 } 77 }
99 78
100 /* Did we come from a system call? */ 79 /* Did we come from a system call? */
@@ -130,10 +109,8 @@ static int kern_do_signal(struct pt_regs *regs)
130 * if there's no signal to deliver, we just put the saved sigmask 109 * if there's no signal to deliver, we just put the saved sigmask
131 * back 110 * back
132 */ 111 */
133 if (!handled_sig && test_thread_flag(TIF_RESTORE_SIGMASK)) { 112 if (!handled_sig)
134 clear_thread_flag(TIF_RESTORE_SIGMASK); 113 restore_saved_sigmask();
135 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
136 }
137 return handled_sig; 114 return handled_sig;
138} 115}
139 116
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index dafc94715950..3be60765c0e2 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -30,6 +30,8 @@ int handle_page_fault(unsigned long address, unsigned long ip,
30 pmd_t *pmd; 30 pmd_t *pmd;
31 pte_t *pte; 31 pte_t *pte;
32 int err = -EFAULT; 32 int err = -EFAULT;
33 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
34 (is_write ? FAULT_FLAG_WRITE : 0);
33 35
34 *code_out = SEGV_MAPERR; 36 *code_out = SEGV_MAPERR;
35 37
@@ -40,6 +42,7 @@ int handle_page_fault(unsigned long address, unsigned long ip,
40 if (in_atomic()) 42 if (in_atomic())
41 goto out_nosemaphore; 43 goto out_nosemaphore;
42 44
45retry:
43 down_read(&mm->mmap_sem); 46 down_read(&mm->mmap_sem);
44 vma = find_vma(mm, address); 47 vma = find_vma(mm, address);
45 if (!vma) 48 if (!vma)
@@ -65,7 +68,11 @@ good_area:
65 do { 68 do {
66 int fault; 69 int fault;
67 70
68 fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); 71 fault = handle_mm_fault(mm, vma, address, flags);
72
73 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
74 goto out_nosemaphore;
75
69 if (unlikely(fault & VM_FAULT_ERROR)) { 76 if (unlikely(fault & VM_FAULT_ERROR)) {
70 if (fault & VM_FAULT_OOM) { 77 if (fault & VM_FAULT_OOM) {
71 goto out_of_memory; 78 goto out_of_memory;
@@ -75,10 +82,17 @@ good_area:
75 } 82 }
76 BUG(); 83 BUG();
77 } 84 }
78 if (fault & VM_FAULT_MAJOR) 85 if (flags & FAULT_FLAG_ALLOW_RETRY) {
79 current->maj_flt++; 86 if (fault & VM_FAULT_MAJOR)
80 else 87 current->maj_flt++;
81 current->min_flt++; 88 else
89 current->min_flt++;
90 if (fault & VM_FAULT_RETRY) {
91 flags &= ~FAULT_FLAG_ALLOW_RETRY;
92
93 goto retry;
94 }
95 }
82 96
83 pgd = pgd_offset(mm, address); 97 pgd = pgd_offset(mm, address);
84 pud = pud_offset(pgd, address); 98 pud = pud_offset(pgd, address);
diff --git a/arch/unicore32/kernel/signal.c b/arch/unicore32/kernel/signal.c
index 7754df6ef7d4..8adedb37720a 100644
--- a/arch/unicore32/kernel/signal.c
+++ b/arch/unicore32/kernel/signal.c
@@ -21,8 +21,6 @@
21#include <asm/cacheflush.h> 21#include <asm/cacheflush.h>
22#include <asm/ucontext.h> 22#include <asm/ucontext.h>
23 23
24#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
25
26/* 24/*
27 * For UniCore syscalls, we encode the syscall number into the instruction. 25 * For UniCore syscalls, we encode the syscall number into the instruction.
28 */ 26 */
@@ -61,10 +59,8 @@ static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
61 int err; 59 int err;
62 60
63 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set)); 61 err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
64 if (err == 0) { 62 if (err == 0)
65 sigdelsetmask(&set, ~_BLOCKABLE);
66 set_current_blocked(&set); 63 set_current_blocked(&set);
67 }
68 64
69 err |= __get_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00); 65 err |= __get_user(regs->UCreg_00, &sf->uc.uc_mcontext.regs.UCreg_00);
70 err |= __get_user(regs->UCreg_01, &sf->uc.uc_mcontext.regs.UCreg_01); 66 err |= __get_user(regs->UCreg_01, &sf->uc.uc_mcontext.regs.UCreg_01);
@@ -312,13 +308,12 @@ static inline void setup_syscall_restart(struct pt_regs *regs)
312/* 308/*
313 * OK, we're invoking a handler 309 * OK, we're invoking a handler
314 */ 310 */
315static int handle_signal(unsigned long sig, struct k_sigaction *ka, 311static void handle_signal(unsigned long sig, struct k_sigaction *ka,
316 siginfo_t *info, sigset_t *oldset, 312 siginfo_t *info, struct pt_regs *regs, int syscall)
317 struct pt_regs *regs, int syscall)
318{ 313{
319 struct thread_info *thread = current_thread_info(); 314 struct thread_info *thread = current_thread_info();
320 struct task_struct *tsk = current; 315 struct task_struct *tsk = current;
321 sigset_t blocked; 316 sigset_t *oldset = sigmask_to_save();
322 int usig = sig; 317 int usig = sig;
323 int ret; 318 int ret;
324 319
@@ -364,15 +359,10 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
364 359
365 if (ret != 0) { 360 if (ret != 0) {
366 force_sigsegv(sig, tsk); 361 force_sigsegv(sig, tsk);
367 return ret; 362 return;
368 } 363 }
369 364
370 /* 365 signal_delivered(sig, info, ka, regs, 0);
371 * Block the signal if we were successful.
372 */
373 block_sigmask(ka, sig);
374
375 return 0;
376} 366}
377 367
378/* 368/*
@@ -399,32 +389,12 @@ static void do_signal(struct pt_regs *regs, int syscall)
399 if (!user_mode(regs)) 389 if (!user_mode(regs))
400 return; 390 return;
401 391
402 if (try_to_freeze())
403 goto no_signal;
404
405 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 392 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
406 if (signr > 0) { 393 if (signr > 0) {
407 sigset_t *oldset; 394 handle_signal(signr, &ka, &info, regs, syscall);
408
409 if (test_thread_flag(TIF_RESTORE_SIGMASK))
410 oldset = &current->saved_sigmask;
411 else
412 oldset = &current->blocked;
413 if (handle_signal(signr, &ka, &info, oldset, regs, syscall)
414 == 0) {
415 /*
416 * A signal was successfully delivered; the saved
417 * sigmask will have been stored in the signal frame,
418 * and will be restored by sigreturn, so we can simply
419 * clear the TIF_RESTORE_SIGMASK flag.
420 */
421 if (test_thread_flag(TIF_RESTORE_SIGMASK))
422 clear_thread_flag(TIF_RESTORE_SIGMASK);
423 }
424 return; 395 return;
425 } 396 }
426 397
427 no_signal:
428 /* 398 /*
429 * No signal to deliver to the process - restart the syscall. 399 * No signal to deliver to the process - restart the syscall.
430 */ 400 */
@@ -451,8 +421,7 @@ static void do_signal(struct pt_regs *regs, int syscall)
451 /* If there's no signal to deliver, we just put the saved 421 /* If there's no signal to deliver, we just put the saved
452 * sigmask back. 422 * sigmask back.
453 */ 423 */
454 if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK)) 424 restore_saved_sigmask();
455 set_current_blocked(&current->saved_sigmask);
456} 425}
457 426
458asmlinkage void do_notify_resume(struct pt_regs *regs, 427asmlinkage void do_notify_resume(struct pt_regs *regs,
@@ -464,8 +433,6 @@ asmlinkage void do_notify_resume(struct pt_regs *regs,
464 if (thread_flags & _TIF_NOTIFY_RESUME) { 433 if (thread_flags & _TIF_NOTIFY_RESUME) {
465 clear_thread_flag(TIF_NOTIFY_RESUME); 434 clear_thread_flag(TIF_NOTIFY_RESUME);
466 tracehook_notify_resume(regs); 435 tracehook_notify_resume(regs);
467 if (current->replacement_session_keyring)
468 key_replace_session_keyring();
469 } 436 }
470} 437}
471 438
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d700811785ea..c70684f859e1 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1506,6 +1506,8 @@ config EFI_STUB
1506 This kernel feature allows a bzImage to be loaded directly 1506 This kernel feature allows a bzImage to be loaded directly
1507 by EFI firmware without the use of a bootloader. 1507 by EFI firmware without the use of a bootloader.
1508 1508
1509 See Documentation/x86/efi-stub.txt for more information.
1510
1509config SECCOMP 1511config SECCOMP
1510 def_bool y 1512 def_bool y
1511 prompt "Enable seccomp to safely compute untrusted bytecode" 1513 prompt "Enable seccomp to safely compute untrusted bytecode"
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index 2c14e76bb4c7..4e85f5f85837 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -16,6 +16,26 @@
16 16
17static efi_system_table_t *sys_table; 17static efi_system_table_t *sys_table;
18 18
19static void efi_printk(char *str)
20{
21 char *s8;
22
23 for (s8 = str; *s8; s8++) {
24 struct efi_simple_text_output_protocol *out;
25 efi_char16_t ch[2] = { 0 };
26
27 ch[0] = *s8;
28 out = (struct efi_simple_text_output_protocol *)sys_table->con_out;
29
30 if (*s8 == '\n') {
31 efi_char16_t nl[2] = { '\r', 0 };
32 efi_call_phys2(out->output_string, out, nl);
33 }
34
35 efi_call_phys2(out->output_string, out, ch);
36 }
37}
38
19static efi_status_t __get_map(efi_memory_desc_t **map, unsigned long *map_size, 39static efi_status_t __get_map(efi_memory_desc_t **map, unsigned long *map_size,
20 unsigned long *desc_size) 40 unsigned long *desc_size)
21{ 41{
@@ -531,8 +551,10 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
531 EFI_LOADER_DATA, 551 EFI_LOADER_DATA,
532 nr_initrds * sizeof(*initrds), 552 nr_initrds * sizeof(*initrds),
533 &initrds); 553 &initrds);
534 if (status != EFI_SUCCESS) 554 if (status != EFI_SUCCESS) {
555 efi_printk("Failed to alloc mem for initrds\n");
535 goto fail; 556 goto fail;
557 }
536 558
537 str = (char *)(unsigned long)hdr->cmd_line_ptr; 559 str = (char *)(unsigned long)hdr->cmd_line_ptr;
538 for (i = 0; i < nr_initrds; i++) { 560 for (i = 0; i < nr_initrds; i++) {
@@ -575,32 +597,42 @@ static efi_status_t handle_ramdisks(efi_loaded_image_t *image,
575 597
576 status = efi_call_phys3(boottime->handle_protocol, 598 status = efi_call_phys3(boottime->handle_protocol,
577 image->device_handle, &fs_proto, &io); 599 image->device_handle, &fs_proto, &io);
578 if (status != EFI_SUCCESS) 600 if (status != EFI_SUCCESS) {
601 efi_printk("Failed to handle fs_proto\n");
579 goto free_initrds; 602 goto free_initrds;
603 }
580 604
581 status = efi_call_phys2(io->open_volume, io, &fh); 605 status = efi_call_phys2(io->open_volume, io, &fh);
582 if (status != EFI_SUCCESS) 606 if (status != EFI_SUCCESS) {
607 efi_printk("Failed to open volume\n");
583 goto free_initrds; 608 goto free_initrds;
609 }
584 } 610 }
585 611
586 status = efi_call_phys5(fh->open, fh, &h, filename_16, 612 status = efi_call_phys5(fh->open, fh, &h, filename_16,
587 EFI_FILE_MODE_READ, (u64)0); 613 EFI_FILE_MODE_READ, (u64)0);
588 if (status != EFI_SUCCESS) 614 if (status != EFI_SUCCESS) {
615 efi_printk("Failed to open initrd file\n");
589 goto close_handles; 616 goto close_handles;
617 }
590 618
591 initrd->handle = h; 619 initrd->handle = h;
592 620
593 info_sz = 0; 621 info_sz = 0;
594 status = efi_call_phys4(h->get_info, h, &info_guid, 622 status = efi_call_phys4(h->get_info, h, &info_guid,
595 &info_sz, NULL); 623 &info_sz, NULL);
596 if (status != EFI_BUFFER_TOO_SMALL) 624 if (status != EFI_BUFFER_TOO_SMALL) {
625 efi_printk("Failed to get initrd info size\n");
597 goto close_handles; 626 goto close_handles;
627 }
598 628
599grow: 629grow:
600 status = efi_call_phys3(sys_table->boottime->allocate_pool, 630 status = efi_call_phys3(sys_table->boottime->allocate_pool,
601 EFI_LOADER_DATA, info_sz, &info); 631 EFI_LOADER_DATA, info_sz, &info);
602 if (status != EFI_SUCCESS) 632 if (status != EFI_SUCCESS) {
633 efi_printk("Failed to alloc mem for initrd info\n");
603 goto close_handles; 634 goto close_handles;
635 }
604 636
605 status = efi_call_phys4(h->get_info, h, &info_guid, 637 status = efi_call_phys4(h->get_info, h, &info_guid,
606 &info_sz, info); 638 &info_sz, info);
@@ -612,8 +644,10 @@ grow:
612 file_sz = info->file_size; 644 file_sz = info->file_size;
613 efi_call_phys1(sys_table->boottime->free_pool, info); 645 efi_call_phys1(sys_table->boottime->free_pool, info);
614 646
615 if (status != EFI_SUCCESS) 647 if (status != EFI_SUCCESS) {
648 efi_printk("Failed to get initrd info\n");
616 goto close_handles; 649 goto close_handles;
650 }
617 651
618 initrd->size = file_sz; 652 initrd->size = file_sz;
619 initrd_total += file_sz; 653 initrd_total += file_sz;
@@ -629,11 +663,14 @@ grow:
629 */ 663 */
630 status = high_alloc(initrd_total, 0x1000, 664 status = high_alloc(initrd_total, 0x1000,
631 &initrd_addr, hdr->initrd_addr_max); 665 &initrd_addr, hdr->initrd_addr_max);
632 if (status != EFI_SUCCESS) 666 if (status != EFI_SUCCESS) {
667 efi_printk("Failed to alloc highmem for initrds\n");
633 goto close_handles; 668 goto close_handles;
669 }
634 670
635 /* We've run out of free low memory. */ 671 /* We've run out of free low memory. */
636 if (initrd_addr > hdr->initrd_addr_max) { 672 if (initrd_addr > hdr->initrd_addr_max) {
673 efi_printk("We've run out of free low memory\n");
637 status = EFI_INVALID_PARAMETER; 674 status = EFI_INVALID_PARAMETER;
638 goto free_initrd_total; 675 goto free_initrd_total;
639 } 676 }
@@ -652,8 +689,10 @@ grow:
652 status = efi_call_phys3(fh->read, 689 status = efi_call_phys3(fh->read,
653 initrds[j].handle, 690 initrds[j].handle,
654 &chunksize, addr); 691 &chunksize, addr);
655 if (status != EFI_SUCCESS) 692 if (status != EFI_SUCCESS) {
693 efi_printk("Failed to read initrd\n");
656 goto free_initrd_total; 694 goto free_initrd_total;
695 }
657 addr += chunksize; 696 addr += chunksize;
658 size -= chunksize; 697 size -= chunksize;
659 } 698 }
@@ -674,7 +713,7 @@ free_initrd_total:
674 low_free(initrd_total, initrd_addr); 713 low_free(initrd_total, initrd_addr);
675 714
676close_handles: 715close_handles:
677 for (k = j; k < nr_initrds; k++) 716 for (k = j; k < i; k++)
678 efi_call_phys1(fh->close, initrds[k].handle); 717 efi_call_phys1(fh->close, initrds[k].handle);
679free_initrds: 718free_initrds:
680 efi_call_phys1(sys_table->boottime->free_pool, initrds); 719 efi_call_phys1(sys_table->boottime->free_pool, initrds);
@@ -732,8 +771,10 @@ static efi_status_t make_boot_params(struct boot_params *boot_params,
732 options_size++; /* NUL termination */ 771 options_size++; /* NUL termination */
733 772
734 status = low_alloc(options_size, 1, &cmdline); 773 status = low_alloc(options_size, 1, &cmdline);
735 if (status != EFI_SUCCESS) 774 if (status != EFI_SUCCESS) {
775 efi_printk("Failed to alloc mem for cmdline\n");
736 goto fail; 776 goto fail;
777 }
737 778
738 s1 = (u8 *)(unsigned long)cmdline; 779 s1 = (u8 *)(unsigned long)cmdline;
739 s2 = (u16 *)options; 780 s2 = (u16 *)options;
@@ -895,12 +936,16 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table)
895 936
896 status = efi_call_phys3(sys_table->boottime->handle_protocol, 937 status = efi_call_phys3(sys_table->boottime->handle_protocol,
897 handle, &proto, (void *)&image); 938 handle, &proto, (void *)&image);
898 if (status != EFI_SUCCESS) 939 if (status != EFI_SUCCESS) {
940 efi_printk("Failed to get handle for LOADED_IMAGE_PROTOCOL\n");
899 goto fail; 941 goto fail;
942 }
900 943
901 status = low_alloc(0x4000, 1, (unsigned long *)&boot_params); 944 status = low_alloc(0x4000, 1, (unsigned long *)&boot_params);
902 if (status != EFI_SUCCESS) 945 if (status != EFI_SUCCESS) {
946 efi_printk("Failed to alloc lowmem for boot params\n");
903 goto fail; 947 goto fail;
948 }
904 949
905 memset(boot_params, 0x0, 0x4000); 950 memset(boot_params, 0x0, 0x4000);
906 951
@@ -933,8 +978,10 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table)
933 if (status != EFI_SUCCESS) { 978 if (status != EFI_SUCCESS) {
934 status = low_alloc(hdr->init_size, hdr->kernel_alignment, 979 status = low_alloc(hdr->init_size, hdr->kernel_alignment,
935 &start); 980 &start);
936 if (status != EFI_SUCCESS) 981 if (status != EFI_SUCCESS) {
982 efi_printk("Failed to alloc mem for kernel\n");
937 goto fail; 983 goto fail;
984 }
938 } 985 }
939 986
940 hdr->code32_start = (__u32)start; 987 hdr->code32_start = (__u32)start;
@@ -945,19 +992,25 @@ struct boot_params *efi_main(void *handle, efi_system_table_t *_table)
945 status = efi_call_phys3(sys_table->boottime->allocate_pool, 992 status = efi_call_phys3(sys_table->boottime->allocate_pool,
946 EFI_LOADER_DATA, sizeof(*gdt), 993 EFI_LOADER_DATA, sizeof(*gdt),
947 (void **)&gdt); 994 (void **)&gdt);
948 if (status != EFI_SUCCESS) 995 if (status != EFI_SUCCESS) {
996 efi_printk("Failed to alloc mem for gdt structure\n");
949 goto fail; 997 goto fail;
998 }
950 999
951 gdt->size = 0x800; 1000 gdt->size = 0x800;
952 status = low_alloc(gdt->size, 8, (unsigned long *)&gdt->address); 1001 status = low_alloc(gdt->size, 8, (unsigned long *)&gdt->address);
953 if (status != EFI_SUCCESS) 1002 if (status != EFI_SUCCESS) {
1003 efi_printk("Failed to alloc mem for gdt\n");
954 goto fail; 1004 goto fail;
1005 }
955 1006
956 status = efi_call_phys3(sys_table->boottime->allocate_pool, 1007 status = efi_call_phys3(sys_table->boottime->allocate_pool,
957 EFI_LOADER_DATA, sizeof(*idt), 1008 EFI_LOADER_DATA, sizeof(*idt),
958 (void **)&idt); 1009 (void **)&idt);
959 if (status != EFI_SUCCESS) 1010 if (status != EFI_SUCCESS) {
1011 efi_printk("Failed to alloc mem for idt structure\n");
960 goto fail; 1012 goto fail;
1013 }
961 1014
962 idt->size = 0; 1015 idt->size = 0;
963 idt->address = 0; 1016 idt->address = 0;
diff --git a/arch/x86/boot/compressed/eboot.h b/arch/x86/boot/compressed/eboot.h
index 39251663e65b..3b6e15627c55 100644
--- a/arch/x86/boot/compressed/eboot.h
+++ b/arch/x86/boot/compressed/eboot.h
@@ -58,4 +58,10 @@ struct efi_uga_draw_protocol {
58 void *blt; 58 void *blt;
59}; 59};
60 60
61struct efi_simple_text_output_protocol {
62 void *reset;
63 void *output_string;
64 void *test_string;
65};
66
61#endif /* BOOT_COMPRESSED_EBOOT_H */ 67#endif /* BOOT_COMPRESSED_EBOOT_H */
diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
index 8bbea6aa40d9..efe5acfc79c3 100644
--- a/arch/x86/boot/header.S
+++ b/arch/x86/boot/header.S
@@ -94,10 +94,10 @@ bs_die:
94 94
95 .section ".bsdata", "a" 95 .section ".bsdata", "a"
96bugger_off_msg: 96bugger_off_msg:
97 .ascii "Direct booting from floppy is no longer supported.\r\n" 97 .ascii "Direct floppy boot is not supported. "
98 .ascii "Please use a boot loader program instead.\r\n" 98 .ascii "Use a boot loader program instead.\r\n"
99 .ascii "\n" 99 .ascii "\n"
100 .ascii "Remove disk and press any key to reboot . . .\r\n" 100 .ascii "Remove disk and press any key to reboot ...\r\n"
101 .byte 0 101 .byte 0
102 102
103#ifdef CONFIG_EFI_STUB 103#ifdef CONFIG_EFI_STUB
@@ -111,7 +111,7 @@ coff_header:
111#else 111#else
112 .word 0x8664 # x86-64 112 .word 0x8664 # x86-64
113#endif 113#endif
114 .word 2 # nr_sections 114 .word 3 # nr_sections
115 .long 0 # TimeDateStamp 115 .long 0 # TimeDateStamp
116 .long 0 # PointerToSymbolTable 116 .long 0 # PointerToSymbolTable
117 .long 1 # NumberOfSymbols 117 .long 1 # NumberOfSymbols
@@ -158,8 +158,8 @@ extra_header_fields:
158#else 158#else
159 .quad 0 # ImageBase 159 .quad 0 # ImageBase
160#endif 160#endif
161 .long 0x1000 # SectionAlignment 161 .long 0x20 # SectionAlignment
162 .long 0x200 # FileAlignment 162 .long 0x20 # FileAlignment
163 .word 0 # MajorOperatingSystemVersion 163 .word 0 # MajorOperatingSystemVersion
164 .word 0 # MinorOperatingSystemVersion 164 .word 0 # MinorOperatingSystemVersion
165 .word 0 # MajorImageVersion 165 .word 0 # MajorImageVersion
@@ -200,8 +200,10 @@ extra_header_fields:
200 200
201 # Section table 201 # Section table
202section_table: 202section_table:
203 .ascii ".text" 203 #
204 .byte 0 204 # The offset & size fields are filled in by build.c.
205 #
206 .ascii ".setup"
205 .byte 0 207 .byte 0
206 .byte 0 208 .byte 0
207 .long 0 209 .long 0
@@ -217,9 +219,8 @@ section_table:
217 219
218 # 220 #
219 # The EFI application loader requires a relocation section 221 # The EFI application loader requires a relocation section
220 # because EFI applications must be relocatable. But since 222 # because EFI applications must be relocatable. The .reloc
221 # we don't need the loader to fixup any relocs for us, we 223 # offset & size fields are filled in by build.c.
222 # just create an empty (zero-length) .reloc section header.
223 # 224 #
224 .ascii ".reloc" 225 .ascii ".reloc"
225 .byte 0 226 .byte 0
@@ -233,6 +234,25 @@ section_table:
233 .word 0 # NumberOfRelocations 234 .word 0 # NumberOfRelocations
234 .word 0 # NumberOfLineNumbers 235 .word 0 # NumberOfLineNumbers
235 .long 0x42100040 # Characteristics (section flags) 236 .long 0x42100040 # Characteristics (section flags)
237
238 #
239 # The offset & size fields are filled in by build.c.
240 #
241 .ascii ".text"
242 .byte 0
243 .byte 0
244 .byte 0
245 .long 0
246 .long 0x0 # startup_{32,64}
247 .long 0 # Size of initialized data
248 # on disk
249 .long 0x0 # startup_{32,64}
250 .long 0 # PointerToRelocations
251 .long 0 # PointerToLineNumbers
252 .word 0 # NumberOfRelocations
253 .word 0 # NumberOfLineNumbers
254 .long 0x60500020 # Characteristics (section flags)
255
236#endif /* CONFIG_EFI_STUB */ 256#endif /* CONFIG_EFI_STUB */
237 257
238 # Kernel attributes; used by setup. This is part 1 of the 258 # Kernel attributes; used by setup. This is part 1 of the
diff --git a/arch/x86/boot/tools/build.c b/arch/x86/boot/tools/build.c
index 3f61f6e2b46f..4b8e165ee572 100644
--- a/arch/x86/boot/tools/build.c
+++ b/arch/x86/boot/tools/build.c
@@ -50,6 +50,8 @@ typedef unsigned int u32;
50u8 buf[SETUP_SECT_MAX*512]; 50u8 buf[SETUP_SECT_MAX*512];
51int is_big_kernel; 51int is_big_kernel;
52 52
53#define PECOFF_RELOC_RESERVE 0x20
54
53/*----------------------------------------------------------------------*/ 55/*----------------------------------------------------------------------*/
54 56
55static const u32 crctab32[] = { 57static const u32 crctab32[] = {
@@ -133,11 +135,103 @@ static void usage(void)
133 die("Usage: build setup system [> image]"); 135 die("Usage: build setup system [> image]");
134} 136}
135 137
136int main(int argc, char ** argv)
137{
138#ifdef CONFIG_EFI_STUB 138#ifdef CONFIG_EFI_STUB
139 unsigned int file_sz, pe_header; 139
140static void update_pecoff_section_header(char *section_name, u32 offset, u32 size)
141{
142 unsigned int pe_header;
143 unsigned short num_sections;
144 u8 *section;
145
146 pe_header = get_unaligned_le32(&buf[0x3c]);
147 num_sections = get_unaligned_le16(&buf[pe_header + 6]);
148
149#ifdef CONFIG_X86_32
150 section = &buf[pe_header + 0xa8];
151#else
152 section = &buf[pe_header + 0xb8];
140#endif 153#endif
154
155 while (num_sections > 0) {
156 if (strncmp((char*)section, section_name, 8) == 0) {
157 /* section header size field */
158 put_unaligned_le32(size, section + 0x8);
159
160 /* section header vma field */
161 put_unaligned_le32(offset, section + 0xc);
162
163 /* section header 'size of initialised data' field */
164 put_unaligned_le32(size, section + 0x10);
165
166 /* section header 'file offset' field */
167 put_unaligned_le32(offset, section + 0x14);
168
169 break;
170 }
171 section += 0x28;
172 num_sections--;
173 }
174}
175
176static void update_pecoff_setup_and_reloc(unsigned int size)
177{
178 u32 setup_offset = 0x200;
179 u32 reloc_offset = size - PECOFF_RELOC_RESERVE;
180 u32 setup_size = reloc_offset - setup_offset;
181
182 update_pecoff_section_header(".setup", setup_offset, setup_size);
183 update_pecoff_section_header(".reloc", reloc_offset, PECOFF_RELOC_RESERVE);
184
185 /*
186 * Modify .reloc section contents with a single entry. The
187 * relocation is applied to offset 10 of the relocation section.
188 */
189 put_unaligned_le32(reloc_offset + 10, &buf[reloc_offset]);
190 put_unaligned_le32(10, &buf[reloc_offset + 4]);
191}
192
193static void update_pecoff_text(unsigned int text_start, unsigned int file_sz)
194{
195 unsigned int pe_header;
196 unsigned int text_sz = file_sz - text_start;
197
198 pe_header = get_unaligned_le32(&buf[0x3c]);
199
200 /* Size of image */
201 put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
202
203 /*
204 * Size of code: Subtract the size of the first sector (512 bytes)
205 * which includes the header.
206 */
207 put_unaligned_le32(file_sz - 512, &buf[pe_header + 0x1c]);
208
209#ifdef CONFIG_X86_32
210 /*
211 * Address of entry point.
212 *
213 * The EFI stub entry point is +16 bytes from the start of
214 * the .text section.
215 */
216 put_unaligned_le32(text_start + 16, &buf[pe_header + 0x28]);
217#else
218 /*
219 * Address of entry point. startup_32 is at the beginning and
220 * the 64-bit entry point (startup_64) is always 512 bytes
221 * after. The EFI stub entry point is 16 bytes after that, as
222 * the first instruction allows legacy loaders to jump over
223 * the EFI stub initialisation
224 */
225 put_unaligned_le32(text_start + 528, &buf[pe_header + 0x28]);
226#endif /* CONFIG_X86_32 */
227
228 update_pecoff_section_header(".text", text_start, text_sz);
229}
230
231#endif /* CONFIG_EFI_STUB */
232
233int main(int argc, char ** argv)
234{
141 unsigned int i, sz, setup_sectors; 235 unsigned int i, sz, setup_sectors;
142 int c; 236 int c;
143 u32 sys_size; 237 u32 sys_size;
@@ -163,6 +257,12 @@ int main(int argc, char ** argv)
163 die("Boot block hasn't got boot flag (0xAA55)"); 257 die("Boot block hasn't got boot flag (0xAA55)");
164 fclose(file); 258 fclose(file);
165 259
260#ifdef CONFIG_EFI_STUB
261 /* Reserve 0x20 bytes for .reloc section */
262 memset(buf+c, 0, PECOFF_RELOC_RESERVE);
263 c += PECOFF_RELOC_RESERVE;
264#endif
265
166 /* Pad unused space with zeros */ 266 /* Pad unused space with zeros */
167 setup_sectors = (c + 511) / 512; 267 setup_sectors = (c + 511) / 512;
168 if (setup_sectors < SETUP_SECT_MIN) 268 if (setup_sectors < SETUP_SECT_MIN)
@@ -170,6 +270,10 @@ int main(int argc, char ** argv)
170 i = setup_sectors*512; 270 i = setup_sectors*512;
171 memset(buf+c, 0, i-c); 271 memset(buf+c, 0, i-c);
172 272
273#ifdef CONFIG_EFI_STUB
274 update_pecoff_setup_and_reloc(i);
275#endif
276
173 /* Set the default root device */ 277 /* Set the default root device */
174 put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]); 278 put_unaligned_le16(DEFAULT_ROOT_DEV, &buf[508]);
175 279
@@ -194,66 +298,8 @@ int main(int argc, char ** argv)
194 put_unaligned_le32(sys_size, &buf[0x1f4]); 298 put_unaligned_le32(sys_size, &buf[0x1f4]);
195 299
196#ifdef CONFIG_EFI_STUB 300#ifdef CONFIG_EFI_STUB
197 file_sz = sz + i + ((sys_size * 16) - sz); 301 update_pecoff_text(setup_sectors * 512, sz + i + ((sys_size * 16) - sz));
198 302#endif
199 pe_header = get_unaligned_le32(&buf[0x3c]);
200
201 /* Size of image */
202 put_unaligned_le32(file_sz, &buf[pe_header + 0x50]);
203
204 /*
205 * Subtract the size of the first section (512 bytes) which
206 * includes the header and .reloc section. The remaining size
207 * is that of the .text section.
208 */
209 file_sz -= 512;
210
211 /* Size of code */
212 put_unaligned_le32(file_sz, &buf[pe_header + 0x1c]);
213
214#ifdef CONFIG_X86_32
215 /*
216 * Address of entry point.
217 *
218 * The EFI stub entry point is +16 bytes from the start of
219 * the .text section.
220 */
221 put_unaligned_le32(i + 16, &buf[pe_header + 0x28]);
222
223 /* .text size */
224 put_unaligned_le32(file_sz, &buf[pe_header + 0xb0]);
225
226 /* .text vma */
227 put_unaligned_le32(0x200, &buf[pe_header + 0xb4]);
228
229 /* .text size of initialised data */
230 put_unaligned_le32(file_sz, &buf[pe_header + 0xb8]);
231
232 /* .text file offset */
233 put_unaligned_le32(0x200, &buf[pe_header + 0xbc]);
234#else
235 /*
236 * Address of entry point. startup_32 is at the beginning and
237 * the 64-bit entry point (startup_64) is always 512 bytes
238 * after. The EFI stub entry point is 16 bytes after that, as
239 * the first instruction allows legacy loaders to jump over
240 * the EFI stub initialisation
241 */
242 put_unaligned_le32(i + 528, &buf[pe_header + 0x28]);
243
244 /* .text size */
245 put_unaligned_le32(file_sz, &buf[pe_header + 0xc0]);
246
247 /* .text vma */
248 put_unaligned_le32(0x200, &buf[pe_header + 0xc4]);
249
250 /* .text size of initialised data */
251 put_unaligned_le32(file_sz, &buf[pe_header + 0xc8]);
252
253 /* .text file offset */
254 put_unaligned_le32(0x200, &buf[pe_header + 0xcc]);
255#endif /* CONFIG_X86_32 */
256#endif /* CONFIG_EFI_STUB */
257 303
258 crc = partial_crc32(buf, i, crc); 304 crc = partial_crc32(buf, i, crc);
259 if (fwrite(buf, 1, i, stdout) != i) 305 if (fwrite(buf, 1, i, stdout) != i)
diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
index be6d9e365a80..3470624d7835 100644
--- a/arch/x86/crypto/aesni-intel_asm.S
+++ b/arch/x86/crypto/aesni-intel_asm.S
@@ -2460,10 +2460,12 @@ ENTRY(aesni_cbc_dec)
2460 pxor IN3, STATE4 2460 pxor IN3, STATE4
2461 movaps IN4, IV 2461 movaps IN4, IV
2462#else 2462#else
2463 pxor (INP), STATE2
2464 pxor 0x10(INP), STATE3
2465 pxor IN1, STATE4 2463 pxor IN1, STATE4
2466 movaps IN2, IV 2464 movaps IN2, IV
2465 movups (INP), IN1
2466 pxor IN1, STATE2
2467 movups 0x10(INP), IN2
2468 pxor IN2, STATE3
2467#endif 2469#endif
2468 movups STATE1, (OUTP) 2470 movups STATE1, (OUTP)
2469 movups STATE2, 0x10(OUTP) 2471 movups STATE2, 0x10(OUTP)
diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
index 98bd70faccc5..daeca56211e3 100644
--- a/arch/x86/ia32/ia32_signal.c
+++ b/arch/x86/ia32/ia32_signal.c
@@ -273,7 +273,6 @@ asmlinkage long sys32_sigreturn(struct pt_regs *regs)
273 sizeof(frame->extramask)))) 273 sizeof(frame->extramask))))
274 goto badframe; 274 goto badframe;
275 275
276 sigdelsetmask(&set, ~_BLOCKABLE);
277 set_current_blocked(&set); 276 set_current_blocked(&set);
278 277
279 if (ia32_restore_sigcontext(regs, &frame->sc, &ax)) 278 if (ia32_restore_sigcontext(regs, &frame->sc, &ax))
@@ -299,7 +298,6 @@ asmlinkage long sys32_rt_sigreturn(struct pt_regs *regs)
299 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 298 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
300 goto badframe; 299 goto badframe;
301 300
302 sigdelsetmask(&set, ~_BLOCKABLE);
303 set_current_blocked(&set); 301 set_current_blocked(&set);
304 302
305 if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) 303 if (ia32_restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 724aa441de7d..0c44630d1789 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -29,6 +29,7 @@
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/mmu.h> 30#include <asm/mmu.h>
31#include <asm/mpspec.h> 31#include <asm/mpspec.h>
32#include <asm/realmode.h>
32 33
33#define COMPILER_DEPENDENT_INT64 long long 34#define COMPILER_DEPENDENT_INT64 long long
34#define COMPILER_DEPENDENT_UINT64 unsigned long long 35#define COMPILER_DEPENDENT_UINT64 unsigned long long
@@ -116,10 +117,8 @@ static inline void acpi_disable_pci(void)
116/* Low-level suspend routine. */ 117/* Low-level suspend routine. */
117extern int acpi_suspend_lowlevel(void); 118extern int acpi_suspend_lowlevel(void);
118 119
119extern const unsigned char acpi_wakeup_code[]; 120/* Physical address to resume after wakeup */
120 121#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
121/* early initialization routine */
122extern void acpi_reserve_wakeup_memory(void);
123 122
124/* 123/*
125 * Check if the CPU can handle C2 and deeper 124 * Check if the CPU can handle C2 and deeper
diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
index b97596e2b68c..a6983b277220 100644
--- a/arch/x86/include/asm/bitops.h
+++ b/arch/x86/include/asm/bitops.h
@@ -15,6 +15,8 @@
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <asm/alternative.h> 16#include <asm/alternative.h>
17 17
18#define BIT_64(n) (U64_C(1) << (n))
19
18/* 20/*
19 * These have to be done with inline assembly: that way the bit-setting 21 * These have to be done with inline assembly: that way the bit-setting
20 * is guaranteed to be atomic. All bit operations return 0 if the bit 22 * is guaranteed to be atomic. All bit operations return 0 if the bit
diff --git a/arch/x86/include/asm/ftrace.h b/arch/x86/include/asm/ftrace.h
index 18d9005d9e4f..b0767bc08740 100644
--- a/arch/x86/include/asm/ftrace.h
+++ b/arch/x86/include/asm/ftrace.h
@@ -34,7 +34,7 @@
34 34
35#ifndef __ASSEMBLY__ 35#ifndef __ASSEMBLY__
36extern void mcount(void); 36extern void mcount(void);
37extern int modifying_ftrace_code; 37extern atomic_t modifying_ftrace_code;
38 38
39static inline unsigned long ftrace_call_adjust(unsigned long addr) 39static inline unsigned long ftrace_call_adjust(unsigned long addr)
40{ 40{
diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
index 0e3793b821ef..dc580c42851c 100644
--- a/arch/x86/include/asm/nmi.h
+++ b/arch/x86/include/asm/nmi.h
@@ -54,6 +54,20 @@ struct nmiaction {
54 __register_nmi_handler((t), &fn##_na); \ 54 __register_nmi_handler((t), &fn##_na); \
55}) 55})
56 56
57/*
58 * For special handlers that register/unregister in the
59 * init section only. This should be considered rare.
60 */
61#define register_nmi_handler_initonly(t, fn, fg, n) \
62({ \
63 static struct nmiaction fn##_na __initdata = { \
64 .handler = (fn), \
65 .name = (n), \
66 .flags = (fg), \
67 }; \
68 __register_nmi_handler((t), &fn##_na); \
69})
70
57int __register_nmi_handler(unsigned int, struct nmiaction *); 71int __register_nmi_handler(unsigned int, struct nmiaction *);
58 72
59void unregister_nmi_handler(unsigned int, const char *); 73void unregister_nmi_handler(unsigned int, const char *);
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index 43876f16caf1..cb00ccc7d571 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -47,16 +47,26 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
47 * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd 47 * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
48 * operations. 48 * operations.
49 * 49 *
50 * Without THP if the mmap_sem is hold for reading, the 50 * Without THP if the mmap_sem is hold for reading, the pmd can only
51 * pmd can only transition from null to not null while pmd_read_atomic runs. 51 * transition from null to not null while pmd_read_atomic runs. So
52 * So there's no need of literally reading it atomically. 52 * we can always return atomic pmd values with this function.
53 * 53 *
54 * With THP if the mmap_sem is hold for reading, the pmd can become 54 * With THP if the mmap_sem is hold for reading, the pmd can become
55 * THP or null or point to a pte (and in turn become "stable") at any 55 * trans_huge or none or point to a pte (and in turn become "stable")
56 * time under pmd_read_atomic, so it's mandatory to read it atomically 56 * at any time under pmd_read_atomic. We could read it really
57 * with cmpxchg8b. 57 * atomically here with a atomic64_read for the THP enabled case (and
58 * it would be a whole lot simpler), but to avoid using cmpxchg8b we
59 * only return an atomic pmdval if the low part of the pmdval is later
60 * found stable (i.e. pointing to a pte). And we're returning a none
61 * pmdval if the low part of the pmd is none. In some cases the high
62 * and low part of the pmdval returned may not be consistent if THP is
63 * enabled (the low part may point to previously mapped hugepage,
64 * while the high part may point to a more recently mapped hugepage),
65 * but pmd_none_or_trans_huge_or_clear_bad() only needs the low part
66 * of the pmd to be read atomically to decide if the pmd is unstable
67 * or not, with the only exception of when the low part of the pmd is
68 * zero in which case we return a none pmd.
58 */ 69 */
59#ifndef CONFIG_TRANSPARENT_HUGEPAGE
60static inline pmd_t pmd_read_atomic(pmd_t *pmdp) 70static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
61{ 71{
62 pmdval_t ret; 72 pmdval_t ret;
@@ -74,12 +84,6 @@ static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
74 84
75 return (pmd_t) { ret }; 85 return (pmd_t) { ret };
76} 86}
77#else /* CONFIG_TRANSPARENT_HUGEPAGE */
78static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
79{
80 return (pmd_t) { atomic64_read((atomic64_t *)pmdp) };
81}
82#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
83 87
84static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) 88static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
85{ 89{
diff --git a/arch/x86/include/asm/posix_types_32.h b/arch/x86/include/asm/posix_types_32.h
index 99f262e04b91..8e525059e7d8 100644
--- a/arch/x86/include/asm/posix_types_32.h
+++ b/arch/x86/include/asm/posix_types_32.h
@@ -10,9 +10,6 @@
10typedef unsigned short __kernel_mode_t; 10typedef unsigned short __kernel_mode_t;
11#define __kernel_mode_t __kernel_mode_t 11#define __kernel_mode_t __kernel_mode_t
12 12
13typedef unsigned short __kernel_nlink_t;
14#define __kernel_nlink_t __kernel_nlink_t
15
16typedef unsigned short __kernel_ipc_pid_t; 13typedef unsigned short __kernel_ipc_pid_t;
17#define __kernel_ipc_pid_t __kernel_ipc_pid_t 14#define __kernel_ipc_pid_t __kernel_ipc_pid_t
18 15
diff --git a/arch/x86/include/asm/sighandling.h b/arch/x86/include/asm/sighandling.h
index ada93b3b8c66..beff97f7df37 100644
--- a/arch/x86/include/asm/sighandling.h
+++ b/arch/x86/include/asm/sighandling.h
@@ -7,8 +7,6 @@
7 7
8#include <asm/processor-flags.h> 8#include <asm/processor-flags.h>
9 9
10#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
11
12#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \ 10#define __FIX_EFLAGS (X86_EFLAGS_AC | X86_EFLAGS_OF | \
13 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \ 11 X86_EFLAGS_DF | X86_EFLAGS_TF | X86_EFLAGS_SF | \
14 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \ 12 X86_EFLAGS_ZF | X86_EFLAGS_AF | X86_EFLAGS_PF | \
diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
index 5c25de07cba8..89f794f007ec 100644
--- a/arch/x86/include/asm/thread_info.h
+++ b/arch/x86/include/asm/thread_info.h
@@ -248,7 +248,23 @@ static inline void set_restore_sigmask(void)
248{ 248{
249 struct thread_info *ti = current_thread_info(); 249 struct thread_info *ti = current_thread_info();
250 ti->status |= TS_RESTORE_SIGMASK; 250 ti->status |= TS_RESTORE_SIGMASK;
251 set_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags); 251 WARN_ON(!test_bit(TIF_SIGPENDING, (unsigned long *)&ti->flags));
252}
253static inline void clear_restore_sigmask(void)
254{
255 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
256}
257static inline bool test_restore_sigmask(void)
258{
259 return current_thread_info()->status & TS_RESTORE_SIGMASK;
260}
261static inline bool test_and_clear_restore_sigmask(void)
262{
263 struct thread_info *ti = current_thread_info();
264 if (!(ti->status & TS_RESTORE_SIGMASK))
265 return false;
266 ti->status &= ~TS_RESTORE_SIGMASK;
267 return true;
252} 268}
253 269
254static inline bool is_ia32_task(void) 270static inline bool is_ia32_task(void)
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 04cd6882308e..e1f3a17034fc 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -33,9 +33,8 @@
33#define segment_eq(a, b) ((a).seg == (b).seg) 33#define segment_eq(a, b) ((a).seg == (b).seg)
34 34
35#define user_addr_max() (current_thread_info()->addr_limit.seg) 35#define user_addr_max() (current_thread_info()->addr_limit.seg)
36#define __addr_ok(addr) \ 36#define __addr_ok(addr) \
37 ((unsigned long __force)(addr) < \ 37 ((unsigned long __force)(addr) < user_addr_max())
38 (current_thread_info()->addr_limit.seg))
39 38
40/* 39/*
41 * Test whether a block of memory is a valid user space address. 40 * Test whether a block of memory is a valid user space address.
@@ -47,14 +46,14 @@
47 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry... 46 * This needs 33-bit (65-bit for x86_64) arithmetic. We have a carry...
48 */ 47 */
49 48
50#define __range_not_ok(addr, size) \ 49#define __range_not_ok(addr, size, limit) \
51({ \ 50({ \
52 unsigned long flag, roksum; \ 51 unsigned long flag, roksum; \
53 __chk_user_ptr(addr); \ 52 __chk_user_ptr(addr); \
54 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \ 53 asm("add %3,%1 ; sbb %0,%0 ; cmp %1,%4 ; sbb $0,%0" \
55 : "=&r" (flag), "=r" (roksum) \ 54 : "=&r" (flag), "=r" (roksum) \
56 : "1" (addr), "g" ((long)(size)), \ 55 : "1" (addr), "g" ((long)(size)), \
57 "rm" (current_thread_info()->addr_limit.seg)); \ 56 "rm" (limit)); \
58 flag; \ 57 flag; \
59}) 58})
60 59
@@ -77,7 +76,8 @@
77 * checks that the pointer is in the user space range - after calling 76 * checks that the pointer is in the user space range - after calling
78 * this function, memory access functions may still return -EFAULT. 77 * this function, memory access functions may still return -EFAULT.
79 */ 78 */
80#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) 79#define access_ok(type, addr, size) \
80 (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
81 81
82/* 82/*
83 * The exception table consists of pairs of addresses relative to the 83 * The exception table consists of pairs of addresses relative to the
diff --git a/arch/x86/include/asm/uv/uv_bau.h b/arch/x86/include/asm/uv/uv_bau.h
index becf47b81735..6149b476d9df 100644
--- a/arch/x86/include/asm/uv/uv_bau.h
+++ b/arch/x86/include/asm/uv/uv_bau.h
@@ -149,7 +149,6 @@
149/* 4 bits of software ack period */ 149/* 4 bits of software ack period */
150#define UV2_ACK_MASK 0x7UL 150#define UV2_ACK_MASK 0x7UL
151#define UV2_ACK_UNITS_SHFT 3 151#define UV2_ACK_UNITS_SHFT 3
152#define UV2_LEG_SHFT UV2H_LB_BAU_MISC_CONTROL_USE_LEGACY_DESCRIPTOR_FORMATS_SHFT
153#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT 152#define UV2_EXT_SHFT UV2H_LB_BAU_MISC_CONTROL_ENABLE_EXTENDED_SB_STATUS_SHFT
154 153
155/* 154/*
diff --git a/arch/x86/kernel/aperture_64.c b/arch/x86/kernel/aperture_64.c
index 6e76c191a835..d5fd66f0d4cd 100644
--- a/arch/x86/kernel/aperture_64.c
+++ b/arch/x86/kernel/aperture_64.c
@@ -20,7 +20,6 @@
20#include <linux/bitops.h> 20#include <linux/bitops.h>
21#include <linux/ioport.h> 21#include <linux/ioport.h>
22#include <linux/suspend.h> 22#include <linux/suspend.h>
23#include <linux/kmemleak.h>
24#include <asm/e820.h> 23#include <asm/e820.h>
25#include <asm/io.h> 24#include <asm/io.h>
26#include <asm/iommu.h> 25#include <asm/iommu.h>
@@ -95,11 +94,6 @@ static u32 __init allocate_aperture(void)
95 return 0; 94 return 0;
96 } 95 }
97 memblock_reserve(addr, aper_size); 96 memblock_reserve(addr, aper_size);
98 /*
99 * Kmemleak should not scan this block as it may not be mapped via the
100 * kernel direct mapping.
101 */
102 kmemleak_ignore(phys_to_virt(addr));
103 printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n", 97 printk(KERN_INFO "Mapping aperture over %d KB of RAM @ %lx\n",
104 aper_size >> 10, addr); 98 aper_size >> 10, addr);
105 insert_aperture_resource((u32)addr, aper_size); 99 insert_aperture_resource((u32)addr, aper_size);
diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
index ac96561d1a99..5f0ff597437c 100644
--- a/arch/x86/kernel/apic/io_apic.c
+++ b/arch/x86/kernel/apic/io_apic.c
@@ -1195,7 +1195,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1195 BUG_ON(!cfg->vector); 1195 BUG_ON(!cfg->vector);
1196 1196
1197 vector = cfg->vector; 1197 vector = cfg->vector;
1198 for_each_cpu_and(cpu, cfg->domain, cpu_online_mask) 1198 for_each_cpu(cpu, cfg->domain)
1199 per_cpu(vector_irq, cpu)[vector] = -1; 1199 per_cpu(vector_irq, cpu)[vector] = -1;
1200 1200
1201 cfg->vector = 0; 1201 cfg->vector = 0;
@@ -1203,7 +1203,7 @@ static void __clear_irq_vector(int irq, struct irq_cfg *cfg)
1203 1203
1204 if (likely(!cfg->move_in_progress)) 1204 if (likely(!cfg->move_in_progress))
1205 return; 1205 return;
1206 for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) { 1206 for_each_cpu(cpu, cfg->old_domain) {
1207 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; 1207 for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
1208 vector++) { 1208 vector++) {
1209 if (per_cpu(vector_irq, cpu)[vector] != irq) 1209 if (per_cpu(vector_irq, cpu)[vector] != irq)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 82f29e70d058..6b9333b429ba 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1101,14 +1101,20 @@ int is_debug_stack(unsigned long addr)
1101 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ)); 1101 addr > (__get_cpu_var(debug_stack_addr) - DEBUG_STKSZ));
1102} 1102}
1103 1103
1104static DEFINE_PER_CPU(u32, debug_stack_use_ctr);
1105
1104void debug_stack_set_zero(void) 1106void debug_stack_set_zero(void)
1105{ 1107{
1108 this_cpu_inc(debug_stack_use_ctr);
1106 load_idt((const struct desc_ptr *)&nmi_idt_descr); 1109 load_idt((const struct desc_ptr *)&nmi_idt_descr);
1107} 1110}
1108 1111
1109void debug_stack_reset(void) 1112void debug_stack_reset(void)
1110{ 1113{
1111 load_idt((const struct desc_ptr *)&idt_descr); 1114 if (WARN_ON(!this_cpu_read(debug_stack_use_ctr)))
1115 return;
1116 if (this_cpu_dec_return(debug_stack_use_ctr) == 0)
1117 load_idt((const struct desc_ptr *)&idt_descr);
1112} 1118}
1113 1119
1114#else /* CONFIG_X86_64 */ 1120#else /* CONFIG_X86_64 */
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index b772dd6ad450..da27c5d2168a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1251,15 +1251,15 @@ void mce_log_therm_throt_event(__u64 status)
1251 * poller finds an MCE, poll 2x faster. When the poller finds no more 1251 * poller finds an MCE, poll 2x faster. When the poller finds no more
1252 * errors, poll 2x slower (up to check_interval seconds). 1252 * errors, poll 2x slower (up to check_interval seconds).
1253 */ 1253 */
1254static int check_interval = 5 * 60; /* 5 minutes */ 1254static unsigned long check_interval = 5 * 60; /* 5 minutes */
1255 1255
1256static DEFINE_PER_CPU(int, mce_next_interval); /* in jiffies */ 1256static DEFINE_PER_CPU(unsigned long, mce_next_interval); /* in jiffies */
1257static DEFINE_PER_CPU(struct timer_list, mce_timer); 1257static DEFINE_PER_CPU(struct timer_list, mce_timer);
1258 1258
1259static void mce_start_timer(unsigned long data) 1259static void mce_timer_fn(unsigned long data)
1260{ 1260{
1261 struct timer_list *t = &per_cpu(mce_timer, data); 1261 struct timer_list *t = &__get_cpu_var(mce_timer);
1262 int *n; 1262 unsigned long iv;
1263 1263
1264 WARN_ON(smp_processor_id() != data); 1264 WARN_ON(smp_processor_id() != data);
1265 1265
@@ -1272,13 +1272,14 @@ static void mce_start_timer(unsigned long data)
1272 * Alert userspace if needed. If we logged an MCE, reduce the 1272 * Alert userspace if needed. If we logged an MCE, reduce the
1273 * polling interval, otherwise increase the polling interval. 1273 * polling interval, otherwise increase the polling interval.
1274 */ 1274 */
1275 n = &__get_cpu_var(mce_next_interval); 1275 iv = __this_cpu_read(mce_next_interval);
1276 if (mce_notify_irq()) 1276 if (mce_notify_irq())
1277 *n = max(*n/2, HZ/100); 1277 iv = max(iv / 2, (unsigned long) HZ/100);
1278 else 1278 else
1279 *n = min(*n*2, (int)round_jiffies_relative(check_interval*HZ)); 1279 iv = min(iv * 2, round_jiffies_relative(check_interval * HZ));
1280 __this_cpu_write(mce_next_interval, iv);
1280 1281
1281 t->expires = jiffies + *n; 1282 t->expires = jiffies + iv;
1282 add_timer_on(t, smp_processor_id()); 1283 add_timer_on(t, smp_processor_id());
1283} 1284}
1284 1285
@@ -1472,9 +1473,9 @@ static int __cpuinit __mcheck_cpu_apply_quirks(struct cpuinfo_x86 *c)
1472 rdmsrl(msrs[i], val); 1473 rdmsrl(msrs[i], val);
1473 1474
1474 /* CntP bit set? */ 1475 /* CntP bit set? */
1475 if (val & BIT(62)) { 1476 if (val & BIT_64(62)) {
1476 val &= ~BIT(62); 1477 val &= ~BIT_64(62);
1477 wrmsrl(msrs[i], val); 1478 wrmsrl(msrs[i], val);
1478 } 1479 }
1479 } 1480 }
1480 1481
@@ -1556,17 +1557,17 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1556static void __mcheck_cpu_init_timer(void) 1557static void __mcheck_cpu_init_timer(void)
1557{ 1558{
1558 struct timer_list *t = &__get_cpu_var(mce_timer); 1559 struct timer_list *t = &__get_cpu_var(mce_timer);
1559 int *n = &__get_cpu_var(mce_next_interval); 1560 unsigned long iv = check_interval * HZ;
1560 1561
1561 setup_timer(t, mce_start_timer, smp_processor_id()); 1562 setup_timer(t, mce_timer_fn, smp_processor_id());
1562 1563
1563 if (mce_ignore_ce) 1564 if (mce_ignore_ce)
1564 return; 1565 return;
1565 1566
1566 *n = check_interval * HZ; 1567 __this_cpu_write(mce_next_interval, iv);
1567 if (!*n) 1568 if (!iv)
1568 return; 1569 return;
1569 t->expires = round_jiffies(jiffies + *n); 1570 t->expires = round_jiffies(jiffies + iv);
1570 add_timer_on(t, smp_processor_id()); 1571 add_timer_on(t, smp_processor_id());
1571} 1572}
1572 1573
@@ -2276,7 +2277,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
2276 case CPU_DOWN_FAILED_FROZEN: 2277 case CPU_DOWN_FAILED_FROZEN:
2277 if (!mce_ignore_ce && check_interval) { 2278 if (!mce_ignore_ce && check_interval) {
2278 t->expires = round_jiffies(jiffies + 2279 t->expires = round_jiffies(jiffies +
2279 __get_cpu_var(mce_next_interval)); 2280 per_cpu(mce_next_interval, cpu));
2280 add_timer_on(t, cpu); 2281 add_timer_on(t, cpu);
2281 } 2282 }
2282 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1); 2283 smp_call_function_single(cpu, mce_reenable_cpu, &action, 1);
diff --git a/arch/x86/kernel/cpu/mtrr/cleanup.c b/arch/x86/kernel/cpu/mtrr/cleanup.c
index ac140c7be396..bdda2e6c673b 100644
--- a/arch/x86/kernel/cpu/mtrr/cleanup.c
+++ b/arch/x86/kernel/cpu/mtrr/cleanup.c
@@ -266,7 +266,7 @@ range_to_mtrr(unsigned int reg, unsigned long range_startk,
266 if (align > max_align) 266 if (align > max_align)
267 align = max_align; 267 align = max_align;
268 268
269 sizek = 1 << align; 269 sizek = 1UL << align;
270 if (debug_print) { 270 if (debug_print) {
271 char start_factor = 'K', size_factor = 'K'; 271 char start_factor = 'K', size_factor = 'K';
272 unsigned long start_base, size_base; 272 unsigned long start_base, size_base;
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index e049d6da0183..c4706cf9c011 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -1496,6 +1496,7 @@ static struct cpu_hw_events *allocate_fake_cpuc(void)
1496 if (!cpuc->shared_regs) 1496 if (!cpuc->shared_regs)
1497 goto error; 1497 goto error;
1498 } 1498 }
1499 cpuc->is_fake = 1;
1499 return cpuc; 1500 return cpuc;
1500error: 1501error:
1501 free_fake_cpuc(cpuc); 1502 free_fake_cpuc(cpuc);
@@ -1756,6 +1757,12 @@ perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
1756 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry); 1757 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
1757} 1758}
1758 1759
1760static inline int
1761valid_user_frame(const void __user *fp, unsigned long size)
1762{
1763 return (__range_not_ok(fp, size, TASK_SIZE) == 0);
1764}
1765
1759#ifdef CONFIG_COMPAT 1766#ifdef CONFIG_COMPAT
1760 1767
1761#include <asm/compat.h> 1768#include <asm/compat.h>
@@ -1780,7 +1787,7 @@ perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1780 if (bytes != sizeof(frame)) 1787 if (bytes != sizeof(frame))
1781 break; 1788 break;
1782 1789
1783 if (fp < compat_ptr(regs->sp)) 1790 if (!valid_user_frame(fp, sizeof(frame)))
1784 break; 1791 break;
1785 1792
1786 perf_callchain_store(entry, frame.return_address); 1793 perf_callchain_store(entry, frame.return_address);
@@ -1826,7 +1833,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
1826 if (bytes != sizeof(frame)) 1833 if (bytes != sizeof(frame))
1827 break; 1834 break;
1828 1835
1829 if ((unsigned long)fp < regs->sp) 1836 if (!valid_user_frame(fp, sizeof(frame)))
1830 break; 1837 break;
1831 1838
1832 perf_callchain_store(entry, frame.return_address); 1839 perf_callchain_store(entry, frame.return_address);
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 6638aaf54493..7241e2fc3c17 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -117,6 +117,7 @@ struct cpu_hw_events {
117 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */ 117 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
118 118
119 unsigned int group_flag; 119 unsigned int group_flag;
120 int is_fake;
120 121
121 /* 122 /*
122 * Intel DebugStore bits 123 * Intel DebugStore bits
@@ -364,6 +365,7 @@ struct x86_pmu {
364 int pebs_record_size; 365 int pebs_record_size;
365 void (*drain_pebs)(struct pt_regs *regs); 366 void (*drain_pebs)(struct pt_regs *regs);
366 struct event_constraint *pebs_constraints; 367 struct event_constraint *pebs_constraints;
368 void (*pebs_aliases)(struct perf_event *event);
367 369
368 /* 370 /*
369 * Intel LBR 371 * Intel LBR
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index 166546ec6aef..187c294bc658 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1119,27 +1119,33 @@ intel_bts_constraints(struct perf_event *event)
1119 return NULL; 1119 return NULL;
1120} 1120}
1121 1121
1122static bool intel_try_alt_er(struct perf_event *event, int orig_idx) 1122static int intel_alt_er(int idx)
1123{ 1123{
1124 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1)) 1124 if (!(x86_pmu.er_flags & ERF_HAS_RSP_1))
1125 return false; 1125 return idx;
1126 1126
1127 if (event->hw.extra_reg.idx == EXTRA_REG_RSP_0) { 1127 if (idx == EXTRA_REG_RSP_0)
1128 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 1128 return EXTRA_REG_RSP_1;
1129 event->hw.config |= 0x01bb; 1129
1130 event->hw.extra_reg.idx = EXTRA_REG_RSP_1; 1130 if (idx == EXTRA_REG_RSP_1)
1131 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1; 1131 return EXTRA_REG_RSP_0;
1132 } else if (event->hw.extra_reg.idx == EXTRA_REG_RSP_1) { 1132
1133 return idx;
1134}
1135
1136static void intel_fixup_er(struct perf_event *event, int idx)
1137{
1138 event->hw.extra_reg.idx = idx;
1139
1140 if (idx == EXTRA_REG_RSP_0) {
1133 event->hw.config &= ~INTEL_ARCH_EVENT_MASK; 1141 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1134 event->hw.config |= 0x01b7; 1142 event->hw.config |= 0x01b7;
1135 event->hw.extra_reg.idx = EXTRA_REG_RSP_0;
1136 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0; 1143 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_0;
1144 } else if (idx == EXTRA_REG_RSP_1) {
1145 event->hw.config &= ~INTEL_ARCH_EVENT_MASK;
1146 event->hw.config |= 0x01bb;
1147 event->hw.extra_reg.reg = MSR_OFFCORE_RSP_1;
1137 } 1148 }
1138
1139 if (event->hw.extra_reg.idx == orig_idx)
1140 return false;
1141
1142 return true;
1143} 1149}
1144 1150
1145/* 1151/*
@@ -1157,14 +1163,18 @@ __intel_shared_reg_get_constraints(struct cpu_hw_events *cpuc,
1157 struct event_constraint *c = &emptyconstraint; 1163 struct event_constraint *c = &emptyconstraint;
1158 struct er_account *era; 1164 struct er_account *era;
1159 unsigned long flags; 1165 unsigned long flags;
1160 int orig_idx = reg->idx; 1166 int idx = reg->idx;
1161 1167
1162 /* already allocated shared msr */ 1168 /*
1163 if (reg->alloc) 1169 * reg->alloc can be set due to existing state, so for fake cpuc we
1170 * need to ignore this, otherwise we might fail to allocate proper fake
1171 * state for this extra reg constraint. Also see the comment below.
1172 */
1173 if (reg->alloc && !cpuc->is_fake)
1164 return NULL; /* call x86_get_event_constraint() */ 1174 return NULL; /* call x86_get_event_constraint() */
1165 1175
1166again: 1176again:
1167 era = &cpuc->shared_regs->regs[reg->idx]; 1177 era = &cpuc->shared_regs->regs[idx];
1168 /* 1178 /*
1169 * we use spin_lock_irqsave() to avoid lockdep issues when 1179 * we use spin_lock_irqsave() to avoid lockdep issues when
1170 * passing a fake cpuc 1180 * passing a fake cpuc
@@ -1173,6 +1183,29 @@ again:
1173 1183
1174 if (!atomic_read(&era->ref) || era->config == reg->config) { 1184 if (!atomic_read(&era->ref) || era->config == reg->config) {
1175 1185
1186 /*
1187 * If its a fake cpuc -- as per validate_{group,event}() we
1188 * shouldn't touch event state and we can avoid doing so
1189 * since both will only call get_event_constraints() once
1190 * on each event, this avoids the need for reg->alloc.
1191 *
1192 * Not doing the ER fixup will only result in era->reg being
1193 * wrong, but since we won't actually try and program hardware
1194 * this isn't a problem either.
1195 */
1196 if (!cpuc->is_fake) {
1197 if (idx != reg->idx)
1198 intel_fixup_er(event, idx);
1199
1200 /*
1201 * x86_schedule_events() can call get_event_constraints()
1202 * multiple times on events in the case of incremental
1203 * scheduling(). reg->alloc ensures we only do the ER
1204 * allocation once.
1205 */
1206 reg->alloc = 1;
1207 }
1208
1176 /* lock in msr value */ 1209 /* lock in msr value */
1177 era->config = reg->config; 1210 era->config = reg->config;
1178 era->reg = reg->reg; 1211 era->reg = reg->reg;
@@ -1180,17 +1213,17 @@ again:
1180 /* one more user */ 1213 /* one more user */
1181 atomic_inc(&era->ref); 1214 atomic_inc(&era->ref);
1182 1215
1183 /* no need to reallocate during incremental event scheduling */
1184 reg->alloc = 1;
1185
1186 /* 1216 /*
1187 * need to call x86_get_event_constraint() 1217 * need to call x86_get_event_constraint()
1188 * to check if associated event has constraints 1218 * to check if associated event has constraints
1189 */ 1219 */
1190 c = NULL; 1220 c = NULL;
1191 } else if (intel_try_alt_er(event, orig_idx)) { 1221 } else {
1192 raw_spin_unlock_irqrestore(&era->lock, flags); 1222 idx = intel_alt_er(idx);
1193 goto again; 1223 if (idx != reg->idx) {
1224 raw_spin_unlock_irqrestore(&era->lock, flags);
1225 goto again;
1226 }
1194 } 1227 }
1195 raw_spin_unlock_irqrestore(&era->lock, flags); 1228 raw_spin_unlock_irqrestore(&era->lock, flags);
1196 1229
@@ -1204,11 +1237,14 @@ __intel_shared_reg_put_constraints(struct cpu_hw_events *cpuc,
1204 struct er_account *era; 1237 struct er_account *era;
1205 1238
1206 /* 1239 /*
1207 * only put constraint if extra reg was actually 1240 * Only put constraint if extra reg was actually allocated. Also takes
1208 * allocated. Also takes care of event which do 1241 * care of event which do not use an extra shared reg.
1209 * not use an extra shared reg 1242 *
1243 * Also, if this is a fake cpuc we shouldn't touch any event state
1244 * (reg->alloc) and we don't care about leaving inconsistent cpuc state
1245 * either since it'll be thrown out.
1210 */ 1246 */
1211 if (!reg->alloc) 1247 if (!reg->alloc || cpuc->is_fake)
1212 return; 1248 return;
1213 1249
1214 era = &cpuc->shared_regs->regs[reg->idx]; 1250 era = &cpuc->shared_regs->regs[reg->idx];
@@ -1300,15 +1336,9 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
1300 intel_put_shared_regs_event_constraints(cpuc, event); 1336 intel_put_shared_regs_event_constraints(cpuc, event);
1301} 1337}
1302 1338
1303static int intel_pmu_hw_config(struct perf_event *event) 1339static void intel_pebs_aliases_core2(struct perf_event *event)
1304{ 1340{
1305 int ret = x86_pmu_hw_config(event); 1341 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1306
1307 if (ret)
1308 return ret;
1309
1310 if (event->attr.precise_ip &&
1311 (event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1312 /* 1342 /*
1313 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P 1343 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1314 * (0x003c) so that we can use it with PEBS. 1344 * (0x003c) so that we can use it with PEBS.
@@ -1329,10 +1359,48 @@ static int intel_pmu_hw_config(struct perf_event *event)
1329 */ 1359 */
1330 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16); 1360 u64 alt_config = X86_CONFIG(.event=0xc0, .inv=1, .cmask=16);
1331 1361
1362 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1363 event->hw.config = alt_config;
1364 }
1365}
1366
1367static void intel_pebs_aliases_snb(struct perf_event *event)
1368{
1369 if ((event->hw.config & X86_RAW_EVENT_MASK) == 0x003c) {
1370 /*
1371 * Use an alternative encoding for CPU_CLK_UNHALTED.THREAD_P
1372 * (0x003c) so that we can use it with PEBS.
1373 *
1374 * The regular CPU_CLK_UNHALTED.THREAD_P event (0x003c) isn't
1375 * PEBS capable. However we can use UOPS_RETIRED.ALL
1376 * (0x01c2), which is a PEBS capable event, to get the same
1377 * count.
1378 *
1379 * UOPS_RETIRED.ALL counts the number of cycles that retires
1380 * CNTMASK micro-ops. By setting CNTMASK to a value (16)
1381 * larger than the maximum number of micro-ops that can be
1382 * retired per cycle (4) and then inverting the condition, we
1383 * count all cycles that retire 16 or less micro-ops, which
1384 * is every cycle.
1385 *
1386 * Thereby we gain a PEBS capable cycle counter.
1387 */
1388 u64 alt_config = X86_CONFIG(.event=0xc2, .umask=0x01, .inv=1, .cmask=16);
1332 1389
1333 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK); 1390 alt_config |= (event->hw.config & ~X86_RAW_EVENT_MASK);
1334 event->hw.config = alt_config; 1391 event->hw.config = alt_config;
1335 } 1392 }
1393}
1394
1395static int intel_pmu_hw_config(struct perf_event *event)
1396{
1397 int ret = x86_pmu_hw_config(event);
1398
1399 if (ret)
1400 return ret;
1401
1402 if (event->attr.precise_ip && x86_pmu.pebs_aliases)
1403 x86_pmu.pebs_aliases(event);
1336 1404
1337 if (intel_pmu_needs_lbr_smpl(event)) { 1405 if (intel_pmu_needs_lbr_smpl(event)) {
1338 ret = intel_pmu_setup_lbr_filter(event); 1406 ret = intel_pmu_setup_lbr_filter(event);
@@ -1607,6 +1675,7 @@ static __initconst const struct x86_pmu intel_pmu = {
1607 .max_period = (1ULL << 31) - 1, 1675 .max_period = (1ULL << 31) - 1,
1608 .get_event_constraints = intel_get_event_constraints, 1676 .get_event_constraints = intel_get_event_constraints,
1609 .put_event_constraints = intel_put_event_constraints, 1677 .put_event_constraints = intel_put_event_constraints,
1678 .pebs_aliases = intel_pebs_aliases_core2,
1610 1679
1611 .format_attrs = intel_arch3_formats_attr, 1680 .format_attrs = intel_arch3_formats_attr,
1612 1681
@@ -1840,8 +1909,9 @@ __init int intel_pmu_init(void)
1840 break; 1909 break;
1841 1910
1842 case 42: /* SandyBridge */ 1911 case 42: /* SandyBridge */
1843 x86_add_quirk(intel_sandybridge_quirk);
1844 case 45: /* SandyBridge, "Romely-EP" */ 1912 case 45: /* SandyBridge, "Romely-EP" */
1913 x86_add_quirk(intel_sandybridge_quirk);
1914 case 58: /* IvyBridge */
1845 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids, 1915 memcpy(hw_cache_event_ids, snb_hw_cache_event_ids,
1846 sizeof(hw_cache_event_ids)); 1916 sizeof(hw_cache_event_ids));
1847 1917
@@ -1849,6 +1919,7 @@ __init int intel_pmu_init(void)
1849 1919
1850 x86_pmu.event_constraints = intel_snb_event_constraints; 1920 x86_pmu.event_constraints = intel_snb_event_constraints;
1851 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints; 1921 x86_pmu.pebs_constraints = intel_snb_pebs_event_constraints;
1922 x86_pmu.pebs_aliases = intel_pebs_aliases_snb;
1852 x86_pmu.extra_regs = intel_snb_extra_regs; 1923 x86_pmu.extra_regs = intel_snb_extra_regs;
1853 /* all extra regs are per-cpu when HT is on */ 1924 /* all extra regs are per-cpu when HT is on */
1854 x86_pmu.er_flags |= ERF_HAS_RSP_1; 1925 x86_pmu.er_flags |= ERF_HAS_RSP_1;
diff --git a/arch/x86/kernel/cpu/perf_event_intel_ds.c b/arch/x86/kernel/cpu/perf_event_intel_ds.c
index 5a3edc27f6e5..35e2192df9f4 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_ds.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_ds.c
@@ -400,14 +400,7 @@ struct event_constraint intel_snb_pebs_event_constraints[] = {
400 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */ 400 INTEL_EVENT_CONSTRAINT(0xc4, 0xf), /* BR_INST_RETIRED.* */
401 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */ 401 INTEL_EVENT_CONSTRAINT(0xc5, 0xf), /* BR_MISP_RETIRED.* */
402 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */ 402 INTEL_EVENT_CONSTRAINT(0xcd, 0x8), /* MEM_TRANS_RETIRED.* */
403 INTEL_UEVENT_CONSTRAINT(0x11d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_LOADS */ 403 INTEL_EVENT_CONSTRAINT(0xd0, 0xf), /* MEM_UOP_RETIRED.* */
404 INTEL_UEVENT_CONSTRAINT(0x12d0, 0xf), /* MEM_UOP_RETIRED.STLB_MISS_STORES */
405 INTEL_UEVENT_CONSTRAINT(0x21d0, 0xf), /* MEM_UOP_RETIRED.LOCK_LOADS */
406 INTEL_UEVENT_CONSTRAINT(0x22d0, 0xf), /* MEM_UOP_RETIRED.LOCK_STORES */
407 INTEL_UEVENT_CONSTRAINT(0x41d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_LOADS */
408 INTEL_UEVENT_CONSTRAINT(0x42d0, 0xf), /* MEM_UOP_RETIRED.SPLIT_STORES */
409 INTEL_UEVENT_CONSTRAINT(0x81d0, 0xf), /* MEM_UOP_RETIRED.ANY_LOADS */
410 INTEL_UEVENT_CONSTRAINT(0x82d0, 0xf), /* MEM_UOP_RETIRED.ANY_STORES */
411 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */ 404 INTEL_EVENT_CONSTRAINT(0xd1, 0xf), /* MEM_LOAD_UOPS_RETIRED.* */
412 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */ 405 INTEL_EVENT_CONSTRAINT(0xd2, 0xf), /* MEM_LOAD_UOPS_LLC_HIT_RETIRED.* */
413 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */ 406 INTEL_UEVENT_CONSTRAINT(0x02d4, 0xf), /* MEM_LOAD_UOPS_MISC_RETIRED.LLC_MISS */
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
index 01ccf9b71473..623f28837476 100644
--- a/arch/x86/kernel/entry_32.S
+++ b/arch/x86/kernel/entry_32.S
@@ -316,7 +316,6 @@ ret_from_exception:
316 preempt_stop(CLBR_ANY) 316 preempt_stop(CLBR_ANY)
317ret_from_intr: 317ret_from_intr:
318 GET_THREAD_INFO(%ebp) 318 GET_THREAD_INFO(%ebp)
319resume_userspace_sig:
320#ifdef CONFIG_VM86 319#ifdef CONFIG_VM86
321 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS 320 movl PT_EFLAGS(%esp), %eax # mix EFLAGS and CS
322 movb PT_CS(%esp), %al 321 movb PT_CS(%esp), %al
@@ -615,9 +614,13 @@ work_notifysig: # deal with pending signals and
615 # vm86-space 614 # vm86-space
616 TRACE_IRQS_ON 615 TRACE_IRQS_ON
617 ENABLE_INTERRUPTS(CLBR_NONE) 616 ENABLE_INTERRUPTS(CLBR_NONE)
617 movb PT_CS(%esp), %bl
618 andb $SEGMENT_RPL_MASK, %bl
619 cmpb $USER_RPL, %bl
620 jb resume_kernel
618 xorl %edx, %edx 621 xorl %edx, %edx
619 call do_notify_resume 622 call do_notify_resume
620 jmp resume_userspace_sig 623 jmp resume_userspace
621 624
622 ALIGN 625 ALIGN
623work_notifysig_v86: 626work_notifysig_v86:
@@ -630,9 +633,13 @@ work_notifysig_v86:
630#endif 633#endif
631 TRACE_IRQS_ON 634 TRACE_IRQS_ON
632 ENABLE_INTERRUPTS(CLBR_NONE) 635 ENABLE_INTERRUPTS(CLBR_NONE)
636 movb PT_CS(%esp), %bl
637 andb $SEGMENT_RPL_MASK, %bl
638 cmpb $USER_RPL, %bl
639 jb resume_kernel
633 xorl %edx, %edx 640 xorl %edx, %edx
634 call do_notify_resume 641 call do_notify_resume
635 jmp resume_userspace_sig 642 jmp resume_userspace
636END(work_pending) 643END(work_pending)
637 644
638 # perform syscall exit tracing 645 # perform syscall exit tracing
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
index 320852d02026..7d65133b51be 100644
--- a/arch/x86/kernel/entry_64.S
+++ b/arch/x86/kernel/entry_64.S
@@ -191,6 +191,44 @@ ENDPROC(native_usergs_sysret64)
191.endm 191.endm
192 192
193/* 193/*
194 * When dynamic function tracer is enabled it will add a breakpoint
195 * to all locations that it is about to modify, sync CPUs, update
196 * all the code, sync CPUs, then remove the breakpoints. In this time
197 * if lockdep is enabled, it might jump back into the debug handler
198 * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
199 *
200 * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
201 * make sure the stack pointer does not get reset back to the top
202 * of the debug stack, and instead just reuses the current stack.
203 */
204#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
205
206.macro TRACE_IRQS_OFF_DEBUG
207 call debug_stack_set_zero
208 TRACE_IRQS_OFF
209 call debug_stack_reset
210.endm
211
212.macro TRACE_IRQS_ON_DEBUG
213 call debug_stack_set_zero
214 TRACE_IRQS_ON
215 call debug_stack_reset
216.endm
217
218.macro TRACE_IRQS_IRETQ_DEBUG offset=ARGOFFSET
219 bt $9,EFLAGS-\offset(%rsp) /* interrupts off? */
220 jnc 1f
221 TRACE_IRQS_ON_DEBUG
2221:
223.endm
224
225#else
226# define TRACE_IRQS_OFF_DEBUG TRACE_IRQS_OFF
227# define TRACE_IRQS_ON_DEBUG TRACE_IRQS_ON
228# define TRACE_IRQS_IRETQ_DEBUG TRACE_IRQS_IRETQ
229#endif
230
231/*
194 * C code is not supposed to know about undefined top of stack. Every time 232 * C code is not supposed to know about undefined top of stack. Every time
195 * a C function with an pt_regs argument is called from the SYSCALL based 233 * a C function with an pt_regs argument is called from the SYSCALL based
196 * fast path FIXUP_TOP_OF_STACK is needed. 234 * fast path FIXUP_TOP_OF_STACK is needed.
@@ -1098,7 +1136,7 @@ ENTRY(\sym)
1098 subq $ORIG_RAX-R15, %rsp 1136 subq $ORIG_RAX-R15, %rsp
1099 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15 1137 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
1100 call save_paranoid 1138 call save_paranoid
1101 TRACE_IRQS_OFF 1139 TRACE_IRQS_OFF_DEBUG
1102 movq %rsp,%rdi /* pt_regs pointer */ 1140 movq %rsp,%rdi /* pt_regs pointer */
1103 xorl %esi,%esi /* no error code */ 1141 xorl %esi,%esi /* no error code */
1104 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist) 1142 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
@@ -1393,7 +1431,7 @@ paranoidzeroentry machine_check *machine_check_vector(%rip)
1393ENTRY(paranoid_exit) 1431ENTRY(paranoid_exit)
1394 DEFAULT_FRAME 1432 DEFAULT_FRAME
1395 DISABLE_INTERRUPTS(CLBR_NONE) 1433 DISABLE_INTERRUPTS(CLBR_NONE)
1396 TRACE_IRQS_OFF 1434 TRACE_IRQS_OFF_DEBUG
1397 testl %ebx,%ebx /* swapgs needed? */ 1435 testl %ebx,%ebx /* swapgs needed? */
1398 jnz paranoid_restore 1436 jnz paranoid_restore
1399 testl $3,CS(%rsp) 1437 testl $3,CS(%rsp)
@@ -1404,7 +1442,7 @@ paranoid_swapgs:
1404 RESTORE_ALL 8 1442 RESTORE_ALL 8
1405 jmp irq_return 1443 jmp irq_return
1406paranoid_restore: 1444paranoid_restore:
1407 TRACE_IRQS_IRETQ 0 1445 TRACE_IRQS_IRETQ_DEBUG 0
1408 RESTORE_ALL 8 1446 RESTORE_ALL 8
1409 jmp irq_return 1447 jmp irq_return
1410paranoid_userspace: 1448paranoid_userspace:
diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
index 32ff36596ab1..c3a7cb4bf6e6 100644
--- a/arch/x86/kernel/ftrace.c
+++ b/arch/x86/kernel/ftrace.c
@@ -100,7 +100,7 @@ static const unsigned char *ftrace_nop_replace(void)
100} 100}
101 101
102static int 102static int
103ftrace_modify_code(unsigned long ip, unsigned const char *old_code, 103ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
104 unsigned const char *new_code) 104 unsigned const char *new_code)
105{ 105{
106 unsigned char replaced[MCOUNT_INSN_SIZE]; 106 unsigned char replaced[MCOUNT_INSN_SIZE];
@@ -141,7 +141,20 @@ int ftrace_make_nop(struct module *mod,
141 old = ftrace_call_replace(ip, addr); 141 old = ftrace_call_replace(ip, addr);
142 new = ftrace_nop_replace(); 142 new = ftrace_nop_replace();
143 143
144 return ftrace_modify_code(rec->ip, old, new); 144 /*
145 * On boot up, and when modules are loaded, the MCOUNT_ADDR
146 * is converted to a nop, and will never become MCOUNT_ADDR
147 * again. This code is either running before SMP (on boot up)
148 * or before the code will ever be executed (module load).
149 * We do not want to use the breakpoint version in this case,
150 * just modify the code directly.
151 */
152 if (addr == MCOUNT_ADDR)
153 return ftrace_modify_code_direct(rec->ip, old, new);
154
155 /* Normal cases use add_brk_on_nop */
156 WARN_ONCE(1, "invalid use of ftrace_make_nop");
157 return -EINVAL;
145} 158}
146 159
147int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) 160int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
@@ -152,9 +165,47 @@ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
152 old = ftrace_nop_replace(); 165 old = ftrace_nop_replace();
153 new = ftrace_call_replace(ip, addr); 166 new = ftrace_call_replace(ip, addr);
154 167
155 return ftrace_modify_code(rec->ip, old, new); 168 /* Should only be called when module is loaded */
169 return ftrace_modify_code_direct(rec->ip, old, new);
156} 170}
157 171
172/*
173 * The modifying_ftrace_code is used to tell the breakpoint
174 * handler to call ftrace_int3_handler(). If it fails to
175 * call this handler for a breakpoint added by ftrace, then
176 * the kernel may crash.
177 *
178 * As atomic_writes on x86 do not need a barrier, we do not
179 * need to add smp_mb()s for this to work. It is also considered
180 * that we can not read the modifying_ftrace_code before
181 * executing the breakpoint. That would be quite remarkable if
182 * it could do that. Here's the flow that is required:
183 *
184 * CPU-0 CPU-1
185 *
186 * atomic_inc(mfc);
187 * write int3s
188 * <trap-int3> // implicit (r)mb
189 * if (atomic_read(mfc))
190 * call ftrace_int3_handler()
191 *
192 * Then when we are finished:
193 *
194 * atomic_dec(mfc);
195 *
196 * If we hit a breakpoint that was not set by ftrace, it does not
197 * matter if ftrace_int3_handler() is called or not. It will
198 * simply be ignored. But it is crucial that a ftrace nop/caller
199 * breakpoint is handled. No other user should ever place a
200 * breakpoint on an ftrace nop/caller location. It must only
201 * be done by this code.
202 */
203atomic_t modifying_ftrace_code __read_mostly;
204
205static int
206ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
207 unsigned const char *new_code);
208
158int ftrace_update_ftrace_func(ftrace_func_t func) 209int ftrace_update_ftrace_func(ftrace_func_t func)
159{ 210{
160 unsigned long ip = (unsigned long)(&ftrace_call); 211 unsigned long ip = (unsigned long)(&ftrace_call);
@@ -163,13 +214,17 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
163 214
164 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); 215 memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
165 new = ftrace_call_replace(ip, (unsigned long)func); 216 new = ftrace_call_replace(ip, (unsigned long)func);
217
218 /* See comment above by declaration of modifying_ftrace_code */
219 atomic_inc(&modifying_ftrace_code);
220
166 ret = ftrace_modify_code(ip, old, new); 221 ret = ftrace_modify_code(ip, old, new);
167 222
223 atomic_dec(&modifying_ftrace_code);
224
168 return ret; 225 return ret;
169} 226}
170 227
171int modifying_ftrace_code __read_mostly;
172
173/* 228/*
174 * A breakpoint was added to the code address we are about to 229 * A breakpoint was added to the code address we are about to
175 * modify, and this is the handle that will just skip over it. 230 * modify, and this is the handle that will just skip over it.
@@ -489,13 +544,46 @@ void ftrace_replace_code(int enable)
489 } 544 }
490} 545}
491 546
547static int
548ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
549 unsigned const char *new_code)
550{
551 int ret;
552
553 ret = add_break(ip, old_code);
554 if (ret)
555 goto out;
556
557 run_sync();
558
559 ret = add_update_code(ip, new_code);
560 if (ret)
561 goto fail_update;
562
563 run_sync();
564
565 ret = ftrace_write(ip, new_code, 1);
566 if (ret) {
567 ret = -EPERM;
568 goto out;
569 }
570 run_sync();
571 out:
572 return ret;
573
574 fail_update:
575 probe_kernel_write((void *)ip, &old_code[0], 1);
576 goto out;
577}
578
492void arch_ftrace_update_code(int command) 579void arch_ftrace_update_code(int command)
493{ 580{
494 modifying_ftrace_code++; 581 /* See comment above by declaration of modifying_ftrace_code */
582 atomic_inc(&modifying_ftrace_code);
495 583
496 ftrace_modify_all_code(command); 584 ftrace_modify_all_code(command);
497 585
498 modifying_ftrace_code--; 586 atomic_dec(&modifying_ftrace_code);
499} 587}
500 588
501int __init ftrace_dyn_arch_init(void *data) 589int __init ftrace_dyn_arch_init(void *data)
diff --git a/arch/x86/kernel/hpet.c b/arch/x86/kernel/hpet.c
index 9cc7b4392f7c..1460a5df92f7 100644
--- a/arch/x86/kernel/hpet.c
+++ b/arch/x86/kernel/hpet.c
@@ -870,7 +870,7 @@ int __init hpet_enable(void)
870 else 870 else
871 pr_warn("HPET initial state will not be saved\n"); 871 pr_warn("HPET initial state will not be saved\n");
872 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY); 872 cfg &= ~(HPET_CFG_ENABLE | HPET_CFG_LEGACY);
873 hpet_writel(cfg, HPET_Tn_CFG(i)); 873 hpet_writel(cfg, HPET_CFG);
874 if (cfg) 874 if (cfg)
875 pr_warn("HPET: Unrecognized bits %#x set in global cfg\n", 875 pr_warn("HPET: Unrecognized bits %#x set in global cfg\n",
876 cfg); 876 cfg);
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index 086eb58c6e80..f1b42b3a186c 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -120,11 +120,6 @@ bool kvm_check_and_clear_guest_paused(void)
120 bool ret = false; 120 bool ret = false;
121 struct pvclock_vcpu_time_info *src; 121 struct pvclock_vcpu_time_info *src;
122 122
123 /*
124 * per_cpu() is safe here because this function is only called from
125 * timer functions where preemption is already disabled.
126 */
127 WARN_ON(!in_atomic());
128 src = &__get_cpu_var(hv_clock); 123 src = &__get_cpu_var(hv_clock);
129 if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) { 124 if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
130 __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED); 125 __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
index 90875279ef3d..a0b2f84457be 100644
--- a/arch/x86/kernel/nmi.c
+++ b/arch/x86/kernel/nmi.c
@@ -444,14 +444,16 @@ static inline void nmi_nesting_preprocess(struct pt_regs *regs)
444 */ 444 */
445 if (unlikely(is_debug_stack(regs->sp))) { 445 if (unlikely(is_debug_stack(regs->sp))) {
446 debug_stack_set_zero(); 446 debug_stack_set_zero();
447 __get_cpu_var(update_debug_stack) = 1; 447 this_cpu_write(update_debug_stack, 1);
448 } 448 }
449} 449}
450 450
451static inline void nmi_nesting_postprocess(void) 451static inline void nmi_nesting_postprocess(void)
452{ 452{
453 if (unlikely(__get_cpu_var(update_debug_stack))) 453 if (unlikely(this_cpu_read(update_debug_stack))) {
454 debug_stack_reset(); 454 debug_stack_reset();
455 this_cpu_write(update_debug_stack, 0);
456 }
455} 457}
456#endif 458#endif
457 459
diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
index e31bf8d5c4d2..149b8d9c6ad4 100644
--- a/arch/x86/kernel/nmi_selftest.c
+++ b/arch/x86/kernel/nmi_selftest.c
@@ -42,7 +42,7 @@ static int __init nmi_unk_cb(unsigned int val, struct pt_regs *regs)
42static void __init init_nmi_testsuite(void) 42static void __init init_nmi_testsuite(void)
43{ 43{
44 /* trap all the unknown NMIs we may generate */ 44 /* trap all the unknown NMIs we may generate */
45 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk"); 45 register_nmi_handler_initonly(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk");
46} 46}
47 47
48static void __init cleanup_nmi_testsuite(void) 48static void __init cleanup_nmi_testsuite(void)
@@ -64,7 +64,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
64{ 64{
65 unsigned long timeout; 65 unsigned long timeout;
66 66
67 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback, 67 if (register_nmi_handler_initonly(NMI_LOCAL, test_nmi_ipi_callback,
68 NMI_FLAG_FIRST, "nmi_selftest")) { 68 NMI_FLAG_FIRST, "nmi_selftest")) {
69 nmi_fail = FAILURE; 69 nmi_fail = FAILURE;
70 return; 70 return;
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 62c9457ccd2f..c0f420f76cd3 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -100,7 +100,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
100 struct dma_attrs *attrs) 100 struct dma_attrs *attrs)
101{ 101{
102 unsigned long dma_mask; 102 unsigned long dma_mask;
103 struct page *page = NULL; 103 struct page *page;
104 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT; 104 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
105 dma_addr_t addr; 105 dma_addr_t addr;
106 106
@@ -108,6 +108,7 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
108 108
109 flag |= __GFP_ZERO; 109 flag |= __GFP_ZERO;
110again: 110again:
111 page = NULL;
111 if (!(flag & GFP_ATOMIC)) 112 if (!(flag & GFP_ATOMIC))
112 page = dma_alloc_from_contiguous(dev, count, get_order(size)); 113 page = dma_alloc_from_contiguous(dev, count, get_order(size));
113 if (!page) 114 if (!page)
diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
index 13b1990c7c58..c4c6a5c2bf0f 100644
--- a/arch/x86/kernel/ptrace.c
+++ b/arch/x86/kernel/ptrace.c
@@ -1211,12 +1211,6 @@ static long x32_arch_ptrace(struct task_struct *child,
1211 0, sizeof(struct user_i387_struct), 1211 0, sizeof(struct user_i387_struct),
1212 datap); 1212 datap);
1213 1213
1214 /* normal 64bit interface to access TLS data.
1215 Works just like arch_prctl, except that the arguments
1216 are reversed. */
1217 case PTRACE_ARCH_PRCTL:
1218 return do_arch_prctl(child, data, addr);
1219
1220 default: 1214 default:
1221 return compat_ptrace_request(child, request, addr, data); 1215 return compat_ptrace_request(child, request, addr, data);
1222 } 1216 }
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 79c45af81604..25b48edb847c 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -639,9 +639,11 @@ void native_machine_shutdown(void)
639 set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id)); 639 set_cpus_allowed_ptr(current, cpumask_of(reboot_cpu_id));
640 640
641 /* 641 /*
642 * O.K Now that I'm on the appropriate processor, 642 * O.K Now that I'm on the appropriate processor, stop all of the
643 * stop all of the others. 643 * others. Also disable the local irq to not receive the per-cpu
644 * timer interrupt which may trigger scheduler's load balance.
644 */ 645 */
646 local_irq_disable();
645 stop_other_cpus(); 647 stop_other_cpus();
646#endif 648#endif
647 649
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 965dfda0fd5e..21af737053aa 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -555,7 +555,6 @@ unsigned long sys_sigreturn(struct pt_regs *regs)
555 sizeof(frame->extramask)))) 555 sizeof(frame->extramask))))
556 goto badframe; 556 goto badframe;
557 557
558 sigdelsetmask(&set, ~_BLOCKABLE);
559 set_current_blocked(&set); 558 set_current_blocked(&set);
560 559
561 if (restore_sigcontext(regs, &frame->sc, &ax)) 560 if (restore_sigcontext(regs, &frame->sc, &ax))
@@ -581,7 +580,6 @@ long sys_rt_sigreturn(struct pt_regs *regs)
581 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 580 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
582 goto badframe; 581 goto badframe;
583 582
584 sigdelsetmask(&set, ~_BLOCKABLE);
585 set_current_blocked(&set); 583 set_current_blocked(&set);
586 584
587 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) 585 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
@@ -647,42 +645,28 @@ setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
647 struct pt_regs *regs) 645 struct pt_regs *regs)
648{ 646{
649 int usig = signr_convert(sig); 647 int usig = signr_convert(sig);
650 sigset_t *set = &current->blocked; 648 sigset_t *set = sigmask_to_save();
651 int ret;
652
653 if (current_thread_info()->status & TS_RESTORE_SIGMASK)
654 set = &current->saved_sigmask;
655 649
656 /* Set up the stack frame */ 650 /* Set up the stack frame */
657 if (is_ia32) { 651 if (is_ia32) {
658 if (ka->sa.sa_flags & SA_SIGINFO) 652 if (ka->sa.sa_flags & SA_SIGINFO)
659 ret = ia32_setup_rt_frame(usig, ka, info, set, regs); 653 return ia32_setup_rt_frame(usig, ka, info, set, regs);
660 else 654 else
661 ret = ia32_setup_frame(usig, ka, set, regs); 655 return ia32_setup_frame(usig, ka, set, regs);
662#ifdef CONFIG_X86_X32_ABI 656#ifdef CONFIG_X86_X32_ABI
663 } else if (is_x32) { 657 } else if (is_x32) {
664 ret = x32_setup_rt_frame(usig, ka, info, 658 return x32_setup_rt_frame(usig, ka, info,
665 (compat_sigset_t *)set, regs); 659 (compat_sigset_t *)set, regs);
666#endif 660#endif
667 } else { 661 } else {
668 ret = __setup_rt_frame(sig, ka, info, set, regs); 662 return __setup_rt_frame(sig, ka, info, set, regs);
669 }
670
671 if (ret) {
672 force_sigsegv(sig, current);
673 return -EFAULT;
674 } 663 }
675
676 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
677 return ret;
678} 664}
679 665
680static int 666static void
681handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka, 667handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
682 struct pt_regs *regs) 668 struct pt_regs *regs)
683{ 669{
684 int ret;
685
686 /* Are we from a system call? */ 670 /* Are we from a system call? */
687 if (syscall_get_nr(current, regs) >= 0) { 671 if (syscall_get_nr(current, regs) >= 0) {
688 /* If so, check system call restarting.. */ 672 /* If so, check system call restarting.. */
@@ -713,10 +697,10 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
713 likely(test_and_clear_thread_flag(TIF_FORCED_TF))) 697 likely(test_and_clear_thread_flag(TIF_FORCED_TF)))
714 regs->flags &= ~X86_EFLAGS_TF; 698 regs->flags &= ~X86_EFLAGS_TF;
715 699
716 ret = setup_rt_frame(sig, ka, info, regs); 700 if (setup_rt_frame(sig, ka, info, regs) < 0) {
717 701 force_sigsegv(sig, current);
718 if (ret) 702 return;
719 return ret; 703 }
720 704
721 /* 705 /*
722 * Clear the direction flag as per the ABI for function entry. 706 * Clear the direction flag as per the ABI for function entry.
@@ -731,12 +715,8 @@ handle_signal(unsigned long sig, siginfo_t *info, struct k_sigaction *ka,
731 */ 715 */
732 regs->flags &= ~X86_EFLAGS_TF; 716 regs->flags &= ~X86_EFLAGS_TF;
733 717
734 block_sigmask(ka, sig); 718 signal_delivered(sig, info, ka, regs,
735 719 test_thread_flag(TIF_SINGLESTEP));
736 tracehook_signal_handler(sig, info, ka, regs,
737 test_thread_flag(TIF_SINGLESTEP));
738
739 return 0;
740} 720}
741 721
742#ifdef CONFIG_X86_32 722#ifdef CONFIG_X86_32
@@ -757,16 +737,6 @@ static void do_signal(struct pt_regs *regs)
757 siginfo_t info; 737 siginfo_t info;
758 int signr; 738 int signr;
759 739
760 /*
761 * We want the common case to go fast, which is why we may in certain
762 * cases get here from kernel mode. Just return without doing anything
763 * if so.
764 * X86_32: vm86 regs switched out by assembly code before reaching
765 * here, so testing against kernel CS suffices.
766 */
767 if (!user_mode(regs))
768 return;
769
770 signr = get_signal_to_deliver(&info, &ka, regs, NULL); 740 signr = get_signal_to_deliver(&info, &ka, regs, NULL);
771 if (signr > 0) { 741 if (signr > 0) {
772 /* Whee! Actually deliver the signal. */ 742 /* Whee! Actually deliver the signal. */
@@ -796,10 +766,7 @@ static void do_signal(struct pt_regs *regs)
796 * If there's no signal to deliver, we just put the saved sigmask 766 * If there's no signal to deliver, we just put the saved sigmask
797 * back. 767 * back.
798 */ 768 */
799 if (current_thread_info()->status & TS_RESTORE_SIGMASK) { 769 restore_saved_sigmask();
800 current_thread_info()->status &= ~TS_RESTORE_SIGMASK;
801 set_current_blocked(&current->saved_sigmask);
802 }
803} 770}
804 771
805/* 772/*
@@ -827,8 +794,6 @@ do_notify_resume(struct pt_regs *regs, void *unused, __u32 thread_info_flags)
827 if (thread_info_flags & _TIF_NOTIFY_RESUME) { 794 if (thread_info_flags & _TIF_NOTIFY_RESUME) {
828 clear_thread_flag(TIF_NOTIFY_RESUME); 795 clear_thread_flag(TIF_NOTIFY_RESUME);
829 tracehook_notify_resume(regs); 796 tracehook_notify_resume(regs);
830 if (current->replacement_session_keyring)
831 key_replace_session_keyring();
832 } 797 }
833 if (thread_info_flags & _TIF_USER_RETURN_NOTIFY) 798 if (thread_info_flags & _TIF_USER_RETURN_NOTIFY)
834 fire_user_return_notifiers(); 799 fire_user_return_notifiers();
@@ -936,7 +901,6 @@ asmlinkage long sys32_x32_rt_sigreturn(struct pt_regs *regs)
936 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 901 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
937 goto badframe; 902 goto badframe;
938 903
939 sigdelsetmask(&set, ~_BLOCKABLE);
940 set_current_blocked(&set); 904 set_current_blocked(&set);
941 905
942 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax)) 906 if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &ax))
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index f56f96da77f5..7bd8a0823654 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -349,9 +349,12 @@ static bool __cpuinit match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
349 349
350static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) 350static bool __cpuinit match_mc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
351{ 351{
352 if (c->phys_proc_id == o->phys_proc_id) 352 if (c->phys_proc_id == o->phys_proc_id) {
353 return topology_sane(c, o, "mc"); 353 if (cpu_has(c, X86_FEATURE_AMD_DCM))
354 return true;
354 355
356 return topology_sane(c, o, "mc");
357 }
355 return false; 358 return false;
356} 359}
357 360
@@ -382,6 +385,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
382 if ((i == cpu) || (has_mc && match_llc(c, o))) 385 if ((i == cpu) || (has_mc && match_llc(c, o)))
383 link_mask(llc_shared, cpu, i); 386 link_mask(llc_shared, cpu, i);
384 387
388 }
389
390 /*
391 * This needs a separate iteration over the cpus because we rely on all
392 * cpu_sibling_mask links to be set-up.
393 */
394 for_each_cpu(i, cpu_sibling_setup_mask) {
395 o = &cpu_data(i);
396
385 if ((i == cpu) || (has_mc && match_mc(c, o))) { 397 if ((i == cpu) || (has_mc && match_mc(c, o))) {
386 link_mask(core, cpu, i); 398 link_mask(core, cpu, i);
387 399
@@ -410,15 +422,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
410/* maps the cpu to the sched domain representing multi-core */ 422/* maps the cpu to the sched domain representing multi-core */
411const struct cpumask *cpu_coregroup_mask(int cpu) 423const struct cpumask *cpu_coregroup_mask(int cpu)
412{ 424{
413 struct cpuinfo_x86 *c = &cpu_data(cpu); 425 return cpu_llc_shared_mask(cpu);
414 /*
415 * For perf, we return last level cache shared map.
416 * And for power savings, we return cpu_core_map
417 */
418 if (!(cpu_has(c, X86_FEATURE_AMD_DCM)))
419 return cpu_core_mask(cpu);
420 else
421 return cpu_llc_shared_mask(cpu);
422} 426}
423 427
424static void impress_friends(void) 428static void impress_friends(void)
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index ff08457a025d..05b31d92f69c 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -303,8 +303,12 @@ gp_in_kernel:
303dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code) 303dotraplinkage void __kprobes notrace do_int3(struct pt_regs *regs, long error_code)
304{ 304{
305#ifdef CONFIG_DYNAMIC_FTRACE 305#ifdef CONFIG_DYNAMIC_FTRACE
306 /* ftrace must be first, everything else may cause a recursive crash */ 306 /*
307 if (unlikely(modifying_ftrace_code) && ftrace_int3_handler(regs)) 307 * ftrace must be first, everything else may cause a recursive crash.
308 * See note by declaration of modifying_ftrace_code in ftrace.c
309 */
310 if (unlikely(atomic_read(&modifying_ftrace_code)) &&
311 ftrace_int3_handler(regs))
308 return; 312 return;
309#endif 313#endif
310#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP 314#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 72102e0ab7cb..be3cea4407ff 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2595,8 +2595,7 @@ static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
2595 *gfnp = gfn; 2595 *gfnp = gfn;
2596 kvm_release_pfn_clean(pfn); 2596 kvm_release_pfn_clean(pfn);
2597 pfn &= ~mask; 2597 pfn &= ~mask;
2598 if (!get_page_unless_zero(pfn_to_page(pfn))) 2598 kvm_get_pfn(pfn);
2599 BUG();
2600 *pfnp = pfn; 2599 *pfnp = pfn;
2601 } 2600 }
2602 } 2601 }
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index f61ee67ec00f..4f74d94c8d97 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -8,6 +8,7 @@
8#include <linux/module.h> 8#include <linux/module.h>
9 9
10#include <asm/word-at-a-time.h> 10#include <asm/word-at-a-time.h>
11#include <linux/sched.h>
11 12
12/* 13/*
13 * best effort, GUP based copy_from_user() that is NMI-safe 14 * best effort, GUP based copy_from_user() that is NMI-safe
@@ -21,6 +22,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
21 void *map; 22 void *map;
22 int ret; 23 int ret;
23 24
25 if (__range_not_ok(from, n, TASK_SIZE))
26 return len;
27
24 do { 28 do {
25 ret = __get_user_pages_fast(addr, 1, 0, &page); 29 ret = __get_user_pages_fast(addr, 1, 0, &page);
26 if (!ret) 30 if (!ret)
diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt
index 819137904428..5d7e51f3fd28 100644
--- a/arch/x86/lib/x86-opcode-map.txt
+++ b/arch/x86/lib/x86-opcode-map.txt
@@ -28,7 +28,7 @@
28# - (66): the last prefix is 0x66 28# - (66): the last prefix is 0x66
29# - (F3): the last prefix is 0xF3 29# - (F3): the last prefix is 0xF3
30# - (F2): the last prefix is 0xF2 30# - (F2): the last prefix is 0xF2
31# 31# - (!F3) : the last prefix is not 0xF3 (including non-last prefix case)
32 32
33Table: one byte opcode 33Table: one byte opcode
34Referrer: 34Referrer:
@@ -515,12 +515,12 @@ b4: LFS Gv,Mp
515b5: LGS Gv,Mp 515b5: LGS Gv,Mp
516b6: MOVZX Gv,Eb 516b6: MOVZX Gv,Eb
517b7: MOVZX Gv,Ew 517b7: MOVZX Gv,Ew
518b8: JMPE | POPCNT Gv,Ev (F3) 518b8: JMPE (!F3) | POPCNT Gv,Ev (F3)
519b9: Grp10 (1A) 519b9: Grp10 (1A)
520ba: Grp8 Ev,Ib (1A) 520ba: Grp8 Ev,Ib (1A)
521bb: BTC Ev,Gv 521bb: BTC Ev,Gv
522bc: BSF Gv,Ev | TZCNT Gv,Ev (F3) 522bc: BSF Gv,Ev (!F3) | TZCNT Gv,Ev (F3)
523bd: BSR Gv,Ev | LZCNT Gv,Ev (F3) 523bd: BSR Gv,Ev (!F3) | LZCNT Gv,Ev (F3)
524be: MOVSX Gv,Eb 524be: MOVSX Gv,Eb
525bf: MOVSX Gv,Ew 525bf: MOVSX Gv,Ew
526# 0x0f 0xc0-0xcf 526# 0x0f 0xc0-0xcf
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 97141c26a13a..bc4e9d84157f 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -62,7 +62,8 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
62 extra += PMD_SIZE; 62 extra += PMD_SIZE;
63#endif 63#endif
64 /* The first 2/4M doesn't use large pages. */ 64 /* The first 2/4M doesn't use large pages. */
65 extra += mr->end - mr->start; 65 if (mr->start < PMD_SIZE)
66 extra += mr->end - mr->start;
66 67
67 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; 68 ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT;
68 } else 69 } else
diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
index be1ef574ce9a..78fe3f1ac49f 100644
--- a/arch/x86/mm/ioremap.c
+++ b/arch/x86/mm/ioremap.c
@@ -180,7 +180,7 @@ err_free_memtype:
180 180
181/** 181/**
182 * ioremap_nocache - map bus memory into CPU space 182 * ioremap_nocache - map bus memory into CPU space
183 * @offset: bus address of the memory 183 * @phys_addr: bus address of the memory
184 * @size: size of the resource to map 184 * @size: size of the resource to map
185 * 185 *
186 * ioremap_nocache performs a platform specific sequence of operations to 186 * ioremap_nocache performs a platform specific sequence of operations to
@@ -217,7 +217,7 @@ EXPORT_SYMBOL(ioremap_nocache);
217 217
218/** 218/**
219 * ioremap_wc - map memory into CPU space write combined 219 * ioremap_wc - map memory into CPU space write combined
220 * @offset: bus address of the memory 220 * @phys_addr: bus address of the memory
221 * @size: size of the resource to map 221 * @size: size of the resource to map
222 * 222 *
223 * This version of ioremap ensures that the memory is marked write combining. 223 * This version of ioremap ensures that the memory is marked write combining.
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index e1ebde315210..a718e0d23503 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -122,7 +122,7 @@ within(unsigned long addr, unsigned long start, unsigned long end)
122 122
123/** 123/**
124 * clflush_cache_range - flush a cache range with clflush 124 * clflush_cache_range - flush a cache range with clflush
125 * @addr: virtual start address 125 * @vaddr: virtual start address
126 * @size: number of bytes to flush 126 * @size: number of bytes to flush
127 * 127 *
128 * clflush is an unordered instruction which needs fencing with mfence 128 * clflush is an unordered instruction which needs fencing with mfence
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index f11729fd019c..3d68ef6d2266 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -158,31 +158,47 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end, unsigned long req_type)
158 return req_type; 158 return req_type;
159} 159}
160 160
161struct pagerange_state {
162 unsigned long cur_pfn;
163 int ram;
164 int not_ram;
165};
166
167static int
168pagerange_is_ram_callback(unsigned long initial_pfn, unsigned long total_nr_pages, void *arg)
169{
170 struct pagerange_state *state = arg;
171
172 state->not_ram |= initial_pfn > state->cur_pfn;
173 state->ram |= total_nr_pages > 0;
174 state->cur_pfn = initial_pfn + total_nr_pages;
175
176 return state->ram && state->not_ram;
177}
178
161static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end) 179static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
162{ 180{
163 int ram_page = 0, not_rampage = 0; 181 int ret = 0;
164 unsigned long page_nr; 182 unsigned long start_pfn = start >> PAGE_SHIFT;
183 unsigned long end_pfn = (end + PAGE_SIZE - 1) >> PAGE_SHIFT;
184 struct pagerange_state state = {start_pfn, 0, 0};
165 185
166 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT); 186 /*
167 ++page_nr) { 187 * For legacy reasons, physical address range in the legacy ISA
168 /* 188 * region is tracked as non-RAM. This will allow users of
169 * For legacy reasons, physical address range in the legacy ISA 189 * /dev/mem to map portions of legacy ISA region, even when
170 * region is tracked as non-RAM. This will allow users of 190 * some of those portions are listed(or not even listed) with
171 * /dev/mem to map portions of legacy ISA region, even when 191 * different e820 types(RAM/reserved/..)
172 * some of those portions are listed(or not even listed) with 192 */
173 * different e820 types(RAM/reserved/..) 193 if (start_pfn < ISA_END_ADDRESS >> PAGE_SHIFT)
174 */ 194 start_pfn = ISA_END_ADDRESS >> PAGE_SHIFT;
175 if (page_nr >= (ISA_END_ADDRESS >> PAGE_SHIFT) && 195
176 page_is_ram(page_nr)) 196 if (start_pfn < end_pfn) {
177 ram_page = 1; 197 ret = walk_system_ram_range(start_pfn, end_pfn - start_pfn,
178 else 198 &state, pagerange_is_ram_callback);
179 not_rampage = 1;
180
181 if (ram_page == not_rampage)
182 return -1;
183 } 199 }
184 200
185 return ram_page; 201 return (ret > 0) ? -1 : (state.ram ? 1 : 0);
186} 202}
187 203
188/* 204/*
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index 732af3a96183..4599c3e8bcb6 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -176,6 +176,8 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
176 return; 176 return;
177 } 177 }
178 178
179 node_set(node, numa_nodes_parsed);
180
179 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n", 181 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
180 node, pxm, 182 node, pxm,
181 (unsigned long long) start, (unsigned long long) end - 1); 183 (unsigned long long) start, (unsigned long long) end - 1);
diff --git a/arch/x86/platform/mrst/early_printk_mrst.c b/arch/x86/platform/mrst/early_printk_mrst.c
index 3c6e328483c7..028454f0c3a5 100644
--- a/arch/x86/platform/mrst/early_printk_mrst.c
+++ b/arch/x86/platform/mrst/early_printk_mrst.c
@@ -110,19 +110,16 @@ static struct kmsg_dumper dw_dumper;
110static int dumper_registered; 110static int dumper_registered;
111 111
112static void dw_kmsg_dump(struct kmsg_dumper *dumper, 112static void dw_kmsg_dump(struct kmsg_dumper *dumper,
113 enum kmsg_dump_reason reason, 113 enum kmsg_dump_reason reason)
114 const char *s1, unsigned long l1,
115 const char *s2, unsigned long l2)
116{ 114{
117 int i; 115 static char line[1024];
116 size_t len;
118 117
119 /* When run to this, we'd better re-init the HW */ 118 /* When run to this, we'd better re-init the HW */
120 mrst_early_console_init(); 119 mrst_early_console_init();
121 120
122 for (i = 0; i < l1; i++) 121 while (kmsg_dump_get_line(dumper, true, line, sizeof(line), &len))
123 early_mrst_console.write(&early_mrst_console, s1 + i, 1); 122 early_mrst_console.write(&early_mrst_console, line, len);
124 for (i = 0; i < l2; i++)
125 early_mrst_console.write(&early_mrst_console, s2 + i, 1);
126} 123}
127 124
128/* Set the ratio rate to 115200, 8n1, IRQ disabled */ 125/* Set the ratio rate to 115200, 8n1, IRQ disabled */
diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
index e31bcd8f2eee..fd41a9262d65 100644
--- a/arch/x86/platform/mrst/mrst.c
+++ b/arch/x86/platform/mrst/mrst.c
@@ -782,7 +782,7 @@ BLOCKING_NOTIFIER_HEAD(intel_scu_notifier);
782EXPORT_SYMBOL_GPL(intel_scu_notifier); 782EXPORT_SYMBOL_GPL(intel_scu_notifier);
783 783
784/* Called by IPC driver */ 784/* Called by IPC driver */
785void intel_scu_devices_create(void) 785void __devinit intel_scu_devices_create(void)
786{ 786{
787 int i; 787 int i;
788 788
diff --git a/arch/x86/platform/uv/tlb_uv.c b/arch/x86/platform/uv/tlb_uv.c
index 3ae0e61abd23..59880afa851f 100644
--- a/arch/x86/platform/uv/tlb_uv.c
+++ b/arch/x86/platform/uv/tlb_uv.c
@@ -1295,7 +1295,6 @@ static void __init enable_timeouts(void)
1295 */ 1295 */
1296 mmr_image |= (1L << SOFTACK_MSHIFT); 1296 mmr_image |= (1L << SOFTACK_MSHIFT);
1297 if (is_uv2_hub()) { 1297 if (is_uv2_hub()) {
1298 mmr_image &= ~(1L << UV2_LEG_SHFT);
1299 mmr_image |= (1L << UV2_EXT_SHFT); 1298 mmr_image |= (1L << UV2_EXT_SHFT);
1300 } 1299 }
1301 write_mmr_misc_control(pnode, mmr_image); 1300 write_mmr_misc_control(pnode, mmr_image);
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
index 29f9f0554f7d..7a35a6e71d44 100644
--- a/arch/x86/syscalls/syscall_32.tbl
+++ b/arch/x86/syscalls/syscall_32.tbl
@@ -355,3 +355,4 @@
355346 i386 setns sys_setns 355346 i386 setns sys_setns
356347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv 356347 i386 process_vm_readv sys_process_vm_readv compat_sys_process_vm_readv
357348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev 357348 i386 process_vm_writev sys_process_vm_writev compat_sys_process_vm_writev
358349 i386 kcmp sys_kcmp
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
index dd29a9ea27c5..51171aeff0dc 100644
--- a/arch/x86/syscalls/syscall_64.tbl
+++ b/arch/x86/syscalls/syscall_64.tbl
@@ -318,6 +318,8 @@
318309 common getcpu sys_getcpu 318309 common getcpu sys_getcpu
319310 64 process_vm_readv sys_process_vm_readv 319310 64 process_vm_readv sys_process_vm_readv
320311 64 process_vm_writev sys_process_vm_writev 320311 64 process_vm_writev sys_process_vm_writev
321312 64 kcmp sys_kcmp
322
321# 323#
322# x32-specific system call numbers start at 512 to avoid cache impact 324# x32-specific system call numbers start at 512 to avoid cache impact
323# for native 64-bit operation. 325# for native 64-bit operation.
diff --git a/arch/x86/tools/gen-insn-attr-x86.awk b/arch/x86/tools/gen-insn-attr-x86.awk
index 5f6a5b6c3a15..ddcf39b1a18d 100644
--- a/arch/x86/tools/gen-insn-attr-x86.awk
+++ b/arch/x86/tools/gen-insn-attr-x86.awk
@@ -66,9 +66,10 @@ BEGIN {
66 rex_expr = "^REX(\\.[XRWB]+)*" 66 rex_expr = "^REX(\\.[XRWB]+)*"
67 fpu_expr = "^ESC" # TODO 67 fpu_expr = "^ESC" # TODO
68 68
69 lprefix1_expr = "\\(66\\)" 69 lprefix1_expr = "\\((66|!F3)\\)"
70 lprefix2_expr = "\\(F3\\)" 70 lprefix2_expr = "\\(F3\\)"
71 lprefix3_expr = "\\(F2\\)" 71 lprefix3_expr = "\\((F2|!F3)\\)"
72 lprefix_expr = "\\((66|F2|F3)\\)"
72 max_lprefix = 4 73 max_lprefix = 4
73 74
74 # All opcodes starting with lower-case 'v' or with (v1) superscript 75 # All opcodes starting with lower-case 'v' or with (v1) superscript
@@ -333,13 +334,16 @@ function convert_operands(count,opnd, i,j,imm,mod)
333 if (match(ext, lprefix1_expr)) { 334 if (match(ext, lprefix1_expr)) {
334 lptable1[idx] = add_flags(lptable1[idx],flags) 335 lptable1[idx] = add_flags(lptable1[idx],flags)
335 variant = "INAT_VARIANT" 336 variant = "INAT_VARIANT"
336 } else if (match(ext, lprefix2_expr)) { 337 }
338 if (match(ext, lprefix2_expr)) {
337 lptable2[idx] = add_flags(lptable2[idx],flags) 339 lptable2[idx] = add_flags(lptable2[idx],flags)
338 variant = "INAT_VARIANT" 340 variant = "INAT_VARIANT"
339 } else if (match(ext, lprefix3_expr)) { 341 }
342 if (match(ext, lprefix3_expr)) {
340 lptable3[idx] = add_flags(lptable3[idx],flags) 343 lptable3[idx] = add_flags(lptable3[idx],flags)
341 variant = "INAT_VARIANT" 344 variant = "INAT_VARIANT"
342 } else { 345 }
346 if (!match(ext, lprefix_expr)){
343 table[idx] = add_flags(table[idx],flags) 347 table[idx] = add_flags(table[idx],flags)
344 } 348 }
345 } 349 }
diff --git a/arch/x86/um/signal.c b/arch/x86/um/signal.c
index bb0fb03b9f85..a508cea13503 100644
--- a/arch/x86/um/signal.c
+++ b/arch/x86/um/signal.c
@@ -486,7 +486,6 @@ long sys_sigreturn(struct pt_regs *regs)
486 copy_from_user(&set.sig[1], extramask, sig_size)) 486 copy_from_user(&set.sig[1], extramask, sig_size))
487 goto segfault; 487 goto segfault;
488 488
489 sigdelsetmask(&set, ~_BLOCKABLE);
490 set_current_blocked(&set); 489 set_current_blocked(&set);
491 490
492 if (copy_sc_from_user(&current->thread.regs, sc)) 491 if (copy_sc_from_user(&current->thread.regs, sc))
@@ -600,7 +599,6 @@ long sys_rt_sigreturn(struct pt_regs *regs)
600 if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set))) 599 if (copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
601 goto segfault; 600 goto segfault;
602 601
603 sigdelsetmask(&set, ~_BLOCKABLE);
604 set_current_blocked(&set); 602 set_current_blocked(&set);
605 603
606 if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext)) 604 if (copy_sc_from_user(&current->thread.regs, &uc->uc_mcontext))
diff --git a/arch/x86/um/sys_call_table_32.c b/arch/x86/um/sys_call_table_32.c
index 416bd40c0eba..68d1dc91b37b 100644
--- a/arch/x86/um/sys_call_table_32.c
+++ b/arch/x86/um/sys_call_table_32.c
@@ -39,9 +39,9 @@
39#undef __SYSCALL_I386 39#undef __SYSCALL_I386
40#define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym, 40#define __SYSCALL_I386(nr, sym, compat) [ nr ] = sym,
41 41
42typedef void (*sys_call_ptr_t)(void); 42typedef asmlinkage void (*sys_call_ptr_t)(void);
43 43
44extern void sys_ni_syscall(void); 44extern asmlinkage void sys_ni_syscall(void);
45 45
46const sys_call_ptr_t sys_call_table[] __cacheline_aligned = { 46const sys_call_ptr_t sys_call_table[] __cacheline_aligned = {
47 /* 47 /*
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index 75f33b2a5933..ff962d4b821e 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -209,6 +209,9 @@ static void __init xen_banner(void)
209 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : ""); 209 xen_feature(XENFEAT_mmu_pt_update_preserve_ad) ? " (preserve-AD)" : "");
210} 210}
211 211
212#define CPUID_THERM_POWER_LEAF 6
213#define APERFMPERF_PRESENT 0
214
212static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0; 215static __read_mostly unsigned int cpuid_leaf1_edx_mask = ~0;
213static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0; 216static __read_mostly unsigned int cpuid_leaf1_ecx_mask = ~0;
214 217
@@ -242,6 +245,11 @@ static void xen_cpuid(unsigned int *ax, unsigned int *bx,
242 *dx = cpuid_leaf5_edx_val; 245 *dx = cpuid_leaf5_edx_val;
243 return; 246 return;
244 247
248 case CPUID_THERM_POWER_LEAF:
249 /* Disabling APERFMPERF for kernel usage */
250 maskecx = ~(1 << APERFMPERF_PRESENT);
251 break;
252
245 case 0xb: 253 case 0xb:
246 /* Suppress extended topology stuff */ 254 /* Suppress extended topology stuff */
247 maskebx = 0; 255 maskebx = 0;
@@ -1116,7 +1124,10 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
1116 .wbinvd = native_wbinvd, 1124 .wbinvd = native_wbinvd,
1117 1125
1118 .read_msr = native_read_msr_safe, 1126 .read_msr = native_read_msr_safe,
1127 .rdmsr_regs = native_rdmsr_safe_regs,
1119 .write_msr = xen_write_msr_safe, 1128 .write_msr = xen_write_msr_safe,
1129 .wrmsr_regs = native_wrmsr_safe_regs,
1130
1120 .read_tsc = native_read_tsc, 1131 .read_tsc = native_read_tsc,
1121 .read_pmc = native_read_pmc, 1132 .read_pmc = native_read_pmc,
1122 1133
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index ffd08c414e91..64effdc6da94 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -706,6 +706,7 @@ int m2p_add_override(unsigned long mfn, struct page *page,
706 unsigned long uninitialized_var(address); 706 unsigned long uninitialized_var(address);
707 unsigned level; 707 unsigned level;
708 pte_t *ptep = NULL; 708 pte_t *ptep = NULL;
709 int ret = 0;
709 710
710 pfn = page_to_pfn(page); 711 pfn = page_to_pfn(page);
711 if (!PageHighMem(page)) { 712 if (!PageHighMem(page)) {
@@ -741,6 +742,24 @@ int m2p_add_override(unsigned long mfn, struct page *page,
741 list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]); 742 list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
742 spin_unlock_irqrestore(&m2p_override_lock, flags); 743 spin_unlock_irqrestore(&m2p_override_lock, flags);
743 744
745 /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
746 * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
747 * pfn so that the following mfn_to_pfn(mfn) calls will return the
748 * pfn from the m2p_override (the backend pfn) instead.
749 * We need to do this because the pages shared by the frontend
750 * (xen-blkfront) can be already locked (lock_page, called by
751 * do_read_cache_page); when the userspace backend tries to use them
752 * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
753 * do_blockdev_direct_IO is going to try to lock the same pages
754 * again resulting in a deadlock.
755 * As a side effect get_user_pages_fast might not be safe on the
756 * frontend pages while they are being shared with the backend,
757 * because mfn_to_pfn (that ends up being called by GUPF) will
758 * return the backend pfn rather than the frontend pfn. */
759 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
760 if (ret == 0 && get_phys_to_machine(pfn) == mfn)
761 set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
762
744 return 0; 763 return 0;
745} 764}
746EXPORT_SYMBOL_GPL(m2p_add_override); 765EXPORT_SYMBOL_GPL(m2p_add_override);
@@ -752,6 +771,7 @@ int m2p_remove_override(struct page *page, bool clear_pte)
752 unsigned long uninitialized_var(address); 771 unsigned long uninitialized_var(address);
753 unsigned level; 772 unsigned level;
754 pte_t *ptep = NULL; 773 pte_t *ptep = NULL;
774 int ret = 0;
755 775
756 pfn = page_to_pfn(page); 776 pfn = page_to_pfn(page);
757 mfn = get_phys_to_machine(pfn); 777 mfn = get_phys_to_machine(pfn);
@@ -821,6 +841,22 @@ int m2p_remove_override(struct page *page, bool clear_pte)
821 } else 841 } else
822 set_phys_to_machine(pfn, page->index); 842 set_phys_to_machine(pfn, page->index);
823 843
844 /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
845 * somewhere in this domain, even before being added to the
846 * m2p_override (see comment above in m2p_add_override).
847 * If there are no other entries in the m2p_override corresponding
848 * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
849 * the original pfn (the one shared by the frontend): the backend
850 * cannot do any IO on this page anymore because it has been
851 * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
852 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
853 * pfn again. */
854 mfn &= ~FOREIGN_FRAME_BIT;
855 ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
856 if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
857 m2p_find_override(mfn) == NULL)
858 set_phys_to_machine(pfn, mfn);
859
824 return 0; 860 return 0;
825} 861}
826EXPORT_SYMBOL_GPL(m2p_remove_override); 862EXPORT_SYMBOL_GPL(m2p_remove_override);
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 3ebba0753d38..a4790bf22c59 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -371,7 +371,8 @@ char * __init xen_memory_setup(void)
371 populated = xen_populate_chunk(map, memmap.nr_entries, 371 populated = xen_populate_chunk(map, memmap.nr_entries,
372 max_pfn, &last_pfn, xen_released_pages); 372 max_pfn, &last_pfn, xen_released_pages);
373 373
374 extra_pages += (xen_released_pages - populated); 374 xen_released_pages -= populated;
375 extra_pages += xen_released_pages;
375 376
376 if (last_pfn > max_pfn) { 377 if (last_pfn > max_pfn) {
377 max_pfn = min(MAX_DOMAIN_PAGES, last_pfn); 378 max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
diff --git a/arch/xtensa/Makefile b/arch/xtensa/Makefile
index 7608559de93a..f973754ddf90 100644
--- a/arch/xtensa/Makefile
+++ b/arch/xtensa/Makefile
@@ -68,8 +68,8 @@ endif
68 68
69# Only build variant and/or platform if it includes a Makefile 69# Only build variant and/or platform if it includes a Makefile
70 70
71buildvar := $(shell test -a $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/) 71buildvar := $(shell test -e $(srctree)/arch/xtensa/variants/$(VARIANT)/Makefile && echo arch/xtensa/variants/$(VARIANT)/)
72buildplf := $(shell test -a $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/) 72buildplf := $(shell test -e $(srctree)/arch/xtensa/platforms/$(PLATFORM)/Makefile && echo arch/xtensa/platforms/$(PLATFORM)/)
73 73
74# Find libgcc.a 74# Find libgcc.a
75 75
diff --git a/arch/xtensa/include/asm/syscall.h b/arch/xtensa/include/asm/syscall.h
index 0b9f2e13c781..c1dacca312f3 100644
--- a/arch/xtensa/include/asm/syscall.h
+++ b/arch/xtensa/include/asm/syscall.h
@@ -31,5 +31,5 @@ asmlinkage long sys_pselect6(int n, fd_set __user *inp, fd_set __user *outp,
31asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds, 31asmlinkage long sys_ppoll(struct pollfd __user *ufds, unsigned int nfds,
32 struct timespec __user *tsp, const sigset_t __user *sigmask, 32 struct timespec __user *tsp, const sigset_t __user *sigmask,
33 size_t sigsetsize); 33 size_t sigsetsize);
34 34asmlinkage long sys_rt_sigsuspend(sigset_t __user *unewset,
35 35 size_t sigsetsize);
diff --git a/arch/xtensa/kernel/signal.c b/arch/xtensa/kernel/signal.c
index c5e4ec0598d2..efe4e854b3cd 100644
--- a/arch/xtensa/kernel/signal.c
+++ b/arch/xtensa/kernel/signal.c
@@ -30,8 +30,6 @@
30 30
31#define DEBUG_SIG 0 31#define DEBUG_SIG 0
32 32
33#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
34
35extern struct task_struct *coproc_owners[]; 33extern struct task_struct *coproc_owners[];
36 34
37struct rt_sigframe 35struct rt_sigframe
@@ -261,7 +259,6 @@ asmlinkage long xtensa_rt_sigreturn(long a0, long a1, long a2, long a3,
261 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) 259 if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
262 goto badframe; 260 goto badframe;
263 261
264 sigdelsetmask(&set, ~_BLOCKABLE);
265 set_current_blocked(&set); 262 set_current_blocked(&set);
266 263
267 if (restore_sigcontext(regs, frame)) 264 if (restore_sigcontext(regs, frame))
@@ -452,15 +449,6 @@ static void do_signal(struct pt_regs *regs)
452 siginfo_t info; 449 siginfo_t info;
453 int signr; 450 int signr;
454 struct k_sigaction ka; 451 struct k_sigaction ka;
455 sigset_t oldset;
456
457 if (try_to_freeze())
458 goto no_signal;
459
460 if (test_thread_flag(TIF_RESTORE_SIGMASK))
461 oldset = &current->saved_sigmask;
462 else
463 oldset = &current->blocked;
464 452
465 task_pt_regs(current)->icountlevel = 0; 453 task_pt_regs(current)->icountlevel = 0;
466 454
@@ -501,19 +489,17 @@ static void do_signal(struct pt_regs *regs)
501 489
502 /* Whee! Actually deliver the signal. */ 490 /* Whee! Actually deliver the signal. */
503 /* Set up the stack frame */ 491 /* Set up the stack frame */
504 ret = setup_frame(signr, &ka, &info, oldset, regs); 492 ret = setup_frame(signr, &ka, &info, sigmask_to_save(), regs);
505 if (ret) 493 if (ret)
506 return; 494 return;
507 495
508 clear_thread_flag(TIF_RESTORE_SIGMASK); 496 signal_delivered(signr, &info, &ka, regs, 0);
509 block_sigmask(&ka, signr);
510 if (current->ptrace & PT_SINGLESTEP) 497 if (current->ptrace & PT_SINGLESTEP)
511 task_pt_regs(current)->icountlevel = 1; 498 task_pt_regs(current)->icountlevel = 1;
512 499
513 return; 500 return;
514 } 501 }
515 502
516no_signal:
517 /* Did we come from a system call? */ 503 /* Did we come from a system call? */
518 if ((signed) regs->syscall >= 0) { 504 if ((signed) regs->syscall >= 0) {
519 /* Restart the system call - no handlers present */ 505 /* Restart the system call - no handlers present */
@@ -532,8 +518,7 @@ no_signal:
532 } 518 }
533 519
534 /* If there's no signal to deliver, we just restore the saved mask. */ 520 /* If there's no signal to deliver, we just restore the saved mask. */
535 if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK)) 521 restore_saved_sigmask();
536 set_current_blocked(&current->saved_sigmask);
537 522
538 if (current->ptrace & PT_SINGLESTEP) 523 if (current->ptrace & PT_SINGLESTEP)
539 task_pt_regs(current)->icountlevel = 1; 524 task_pt_regs(current)->icountlevel = 1;
@@ -548,9 +533,6 @@ void do_notify_resume(struct pt_regs *regs)
548 if (test_thread_flag(TIF_SIGPENDING)) 533 if (test_thread_flag(TIF_SIGPENDING))
549 do_signal(regs); 534 do_signal(regs);
550 535
551 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME)) { 536 if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
552 tracehook_notify_resume(regs); 537 tracehook_notify_resume(regs);
553 if (current->replacement_session_keyring)
554 key_replace_session_keyring();
555 }
556} 538}
diff --git a/arch/xtensa/kernel/vmlinux.lds.S b/arch/xtensa/kernel/vmlinux.lds.S
index 88ecea3facb4..ee2e2089483d 100644
--- a/arch/xtensa/kernel/vmlinux.lds.S
+++ b/arch/xtensa/kernel/vmlinux.lds.S
@@ -83,7 +83,6 @@ SECTIONS
83 83
84 _text = .; 84 _text = .;
85 _stext = .; 85 _stext = .;
86 _ftext = .;
87 86
88 .text : 87 .text :
89 { 88 {
@@ -112,7 +111,7 @@ SECTIONS
112 EXCEPTION_TABLE(16) 111 EXCEPTION_TABLE(16)
113 /* Data section */ 112 /* Data section */
114 113
115 _fdata = .; 114 _sdata = .;
116 RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE) 115 RW_DATA_SECTION(XCHAL_ICACHE_LINESIZE, PAGE_SIZE, THREAD_SIZE)
117 _edata = .; 116 _edata = .;
118 117
diff --git a/arch/xtensa/mm/init.c b/arch/xtensa/mm/init.c
index ba150e5de2eb..db955179da2d 100644
--- a/arch/xtensa/mm/init.c
+++ b/arch/xtensa/mm/init.c
@@ -26,11 +26,7 @@
26 26
27#include <asm/bootparam.h> 27#include <asm/bootparam.h>
28#include <asm/page.h> 28#include <asm/page.h>
29 29#include <asm/sections.h>
30/* References to section boundaries */
31
32extern char _ftext, _etext, _fdata, _edata, _rodata_end;
33extern char __init_begin, __init_end;
34 30
35/* 31/*
36 * mem_reserve(start, end, must_exist) 32 * mem_reserve(start, end, must_exist)
@@ -197,9 +193,9 @@ void __init mem_init(void)
197 reservedpages++; 193 reservedpages++;
198 } 194 }
199 195
200 codesize = (unsigned long) &_etext - (unsigned long) &_ftext; 196 codesize = (unsigned long) _etext - (unsigned long) _stext;
201 datasize = (unsigned long) &_edata - (unsigned long) &_fdata; 197 datasize = (unsigned long) _edata - (unsigned long) _sdata;
202 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; 198 initsize = (unsigned long) __init_end - (unsigned long) __init_begin;
203 199
204 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, " 200 printk("Memory: %luk/%luk available (%ldk kernel code, %ldk reserved, "
205 "%ldk data, %ldk init %ldk highmem)\n", 201 "%ldk data, %ldk init %ldk highmem)\n",
@@ -237,7 +233,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
237 233
238void free_initmem(void) 234void free_initmem(void)
239{ 235{
240 free_reserved_mem(&__init_begin, &__init_end); 236 free_reserved_mem(__init_begin, __init_end);
241 printk("Freeing unused kernel memory: %dk freed\n", 237 printk("Freeing unused kernel memory: %zuk freed\n",
242 (&__init_end - &__init_begin) >> 10); 238 (__init_end - __init_begin) >> 10);
243} 239}
diff --git a/block/blk-ioc.c b/block/blk-ioc.c
index 1e2d53b04858..893b8007c657 100644
--- a/block/blk-ioc.c
+++ b/block/blk-ioc.c
@@ -235,6 +235,7 @@ void ioc_clear_queue(struct request_queue *q)
235int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node) 235int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
236{ 236{
237 struct io_context *ioc; 237 struct io_context *ioc;
238 int ret;
238 239
239 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO, 240 ioc = kmem_cache_alloc_node(iocontext_cachep, gfp_flags | __GFP_ZERO,
240 node); 241 node);
@@ -262,9 +263,12 @@ int create_task_io_context(struct task_struct *task, gfp_t gfp_flags, int node)
262 task->io_context = ioc; 263 task->io_context = ioc;
263 else 264 else
264 kmem_cache_free(iocontext_cachep, ioc); 265 kmem_cache_free(iocontext_cachep, ioc);
266
267 ret = task->io_context ? 0 : -EBUSY;
268
265 task_unlock(task); 269 task_unlock(task);
266 270
267 return 0; 271 return ret;
268} 272}
269 273
270/** 274/**
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index 47768ff87343..80998958cf45 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -208,7 +208,7 @@ config ACPI_IPMI
208 208
209config ACPI_HOTPLUG_CPU 209config ACPI_HOTPLUG_CPU
210 bool 210 bool
211 depends on ACPI_PROCESSOR && HOTPLUG_CPU 211 depends on EXPERIMENTAL && ACPI_PROCESSOR && HOTPLUG_CPU
212 select ACPI_CONTAINER 212 select ACPI_CONTAINER
213 default y 213 default y
214 214
diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c
index 86933ca8b472..7dd3f9fb9f3f 100644
--- a/drivers/acpi/battery.c
+++ b/drivers/acpi/battery.c
@@ -643,11 +643,19 @@ static int acpi_battery_update(struct acpi_battery *battery)
643 643
644static void acpi_battery_refresh(struct acpi_battery *battery) 644static void acpi_battery_refresh(struct acpi_battery *battery)
645{ 645{
646 int power_unit;
647
646 if (!battery->bat.dev) 648 if (!battery->bat.dev)
647 return; 649 return;
648 650
651 power_unit = battery->power_unit;
652
649 acpi_battery_get_info(battery); 653 acpi_battery_get_info(battery);
650 /* The battery may have changed its reporting units. */ 654
655 if (power_unit == battery->power_unit)
656 return;
657
658 /* The battery has changed its reporting units. */
651 sysfs_remove_battery(battery); 659 sysfs_remove_battery(battery);
652 sysfs_add_battery(battery); 660 sysfs_add_battery(battery);
653} 661}
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 3188da3df8da..adceafda9c17 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -182,41 +182,66 @@ EXPORT_SYMBOL(acpi_bus_get_private_data);
182 Power Management 182 Power Management
183 -------------------------------------------------------------------------- */ 183 -------------------------------------------------------------------------- */
184 184
185static const char *state_string(int state)
186{
187 switch (state) {
188 case ACPI_STATE_D0:
189 return "D0";
190 case ACPI_STATE_D1:
191 return "D1";
192 case ACPI_STATE_D2:
193 return "D2";
194 case ACPI_STATE_D3_HOT:
195 return "D3hot";
196 case ACPI_STATE_D3_COLD:
197 return "D3";
198 default:
199 return "(unknown)";
200 }
201}
202
185static int __acpi_bus_get_power(struct acpi_device *device, int *state) 203static int __acpi_bus_get_power(struct acpi_device *device, int *state)
186{ 204{
187 int result = 0; 205 int result = ACPI_STATE_UNKNOWN;
188 acpi_status status = 0;
189 unsigned long long psc = 0;
190 206
191 if (!device || !state) 207 if (!device || !state)
192 return -EINVAL; 208 return -EINVAL;
193 209
194 *state = ACPI_STATE_UNKNOWN; 210 if (!device->flags.power_manageable) {
195
196 if (device->flags.power_manageable) {
197 /*
198 * Get the device's power state either directly (via _PSC) or
199 * indirectly (via power resources).
200 */
201 if (device->power.flags.power_resources) {
202 result = acpi_power_get_inferred_state(device, state);
203 if (result)
204 return result;
205 } else if (device->power.flags.explicit_get) {
206 status = acpi_evaluate_integer(device->handle, "_PSC",
207 NULL, &psc);
208 if (ACPI_FAILURE(status))
209 return -ENODEV;
210 *state = (int)psc;
211 }
212 } else {
213 /* TBD: Non-recursive algorithm for walking up hierarchy. */ 211 /* TBD: Non-recursive algorithm for walking up hierarchy. */
214 *state = device->parent ? 212 *state = device->parent ?
215 device->parent->power.state : ACPI_STATE_D0; 213 device->parent->power.state : ACPI_STATE_D0;
214 goto out;
215 }
216
217 /*
218 * Get the device's power state either directly (via _PSC) or
219 * indirectly (via power resources).
220 */
221 if (device->power.flags.explicit_get) {
222 unsigned long long psc;
223 acpi_status status = acpi_evaluate_integer(device->handle,
224 "_PSC", NULL, &psc);
225 if (ACPI_FAILURE(status))
226 return -ENODEV;
227
228 result = psc;
229 }
230 /* The test below covers ACPI_STATE_UNKNOWN too. */
231 if (result <= ACPI_STATE_D2) {
232 ; /* Do nothing. */
233 } else if (device->power.flags.power_resources) {
234 int error = acpi_power_get_inferred_state(device, &result);
235 if (error)
236 return error;
237 } else if (result == ACPI_STATE_D3_HOT) {
238 result = ACPI_STATE_D3;
216 } 239 }
240 *state = result;
217 241
218 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is D%d\n", 242 out:
219 device->pnp.bus_id, *state)); 243 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] power state is %s\n",
244 device->pnp.bus_id, state_string(*state)));
220 245
221 return 0; 246 return 0;
222} 247}
@@ -234,13 +259,14 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
234 /* Make sure this is a valid target state */ 259 /* Make sure this is a valid target state */
235 260
236 if (state == device->power.state) { 261 if (state == device->power.state) {
237 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at D%d\n", 262 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device is already at %s\n",
238 state)); 263 state_string(state)));
239 return 0; 264 return 0;
240 } 265 }
241 266
242 if (!device->power.states[state].flags.valid) { 267 if (!device->power.states[state].flags.valid) {
243 printk(KERN_WARNING PREFIX "Device does not support D%d\n", state); 268 printk(KERN_WARNING PREFIX "Device does not support %s\n",
269 state_string(state));
244 return -ENODEV; 270 return -ENODEV;
245 } 271 }
246 if (device->parent && (state < device->parent->power.state)) { 272 if (device->parent && (state < device->parent->power.state)) {
@@ -294,13 +320,13 @@ static int __acpi_bus_set_power(struct acpi_device *device, int state)
294 end: 320 end:
295 if (result) 321 if (result)
296 printk(KERN_WARNING PREFIX 322 printk(KERN_WARNING PREFIX
297 "Device [%s] failed to transition to D%d\n", 323 "Device [%s] failed to transition to %s\n",
298 device->pnp.bus_id, state); 324 device->pnp.bus_id, state_string(state));
299 else { 325 else {
300 device->power.state = state; 326 device->power.state = state;
301 ACPI_DEBUG_PRINT((ACPI_DB_INFO, 327 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
302 "Device [%s] transitioned to D%d\n", 328 "Device [%s] transitioned to %s\n",
303 device->pnp.bus_id, state)); 329 device->pnp.bus_id, state_string(state)));
304 } 330 }
305 331
306 return result; 332 return result;
diff --git a/drivers/acpi/power.c b/drivers/acpi/power.c
index 0500f719f63e..dd6d6a3c6780 100644
--- a/drivers/acpi/power.c
+++ b/drivers/acpi/power.c
@@ -631,7 +631,7 @@ int acpi_power_get_inferred_state(struct acpi_device *device, int *state)
631 * We know a device's inferred power state when all the resources 631 * We know a device's inferred power state when all the resources
632 * required for a given D-state are 'on'. 632 * required for a given D-state are 'on'.
633 */ 633 */
634 for (i = ACPI_STATE_D0; i < ACPI_STATE_D3_HOT; i++) { 634 for (i = ACPI_STATE_D0; i <= ACPI_STATE_D3_HOT; i++) {
635 list = &device->power.states[i].resources; 635 list = &device->power.states[i].resources;
636 if (list->count < 1) 636 if (list->count < 1)
637 continue; 637 continue;
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index 0af48a8554cd..a093dc163a42 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -333,6 +333,7 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
333 struct acpi_buffer state = { 0, NULL }; 333 struct acpi_buffer state = { 0, NULL };
334 union acpi_object *pss = NULL; 334 union acpi_object *pss = NULL;
335 int i; 335 int i;
336 int last_invalid = -1;
336 337
337 338
338 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer); 339 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
@@ -394,14 +395,33 @@ static int acpi_processor_get_performance_states(struct acpi_processor *pr)
394 ((u32)(px->core_frequency * 1000) != 395 ((u32)(px->core_frequency * 1000) !=
395 (px->core_frequency * 1000))) { 396 (px->core_frequency * 1000))) {
396 printk(KERN_ERR FW_BUG PREFIX 397 printk(KERN_ERR FW_BUG PREFIX
397 "Invalid BIOS _PSS frequency: 0x%llx MHz\n", 398 "Invalid BIOS _PSS frequency found for processor %d: 0x%llx MHz\n",
398 px->core_frequency); 399 pr->id, px->core_frequency);
399 result = -EFAULT; 400 if (last_invalid == -1)
400 kfree(pr->performance->states); 401 last_invalid = i;
401 goto end; 402 } else {
403 if (last_invalid != -1) {
404 /*
405 * Copy this valid entry over last_invalid entry
406 */
407 memcpy(&(pr->performance->states[last_invalid]),
408 px, sizeof(struct acpi_processor_px));
409 ++last_invalid;
410 }
402 } 411 }
403 } 412 }
404 413
414 if (last_invalid == 0) {
415 printk(KERN_ERR FW_BUG PREFIX
416 "No valid BIOS _PSS frequency found for processor %d\n", pr->id);
417 result = -EFAULT;
418 kfree(pr->performance->states);
419 pr->performance->states = NULL;
420 }
421
422 if (last_invalid > 0)
423 pr->performance->state_count = last_invalid;
424
405 end: 425 end:
406 kfree(buffer.pointer); 426 kfree(buffer.pointer);
407 427
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 85cbfdccc97c..c8a1f3b68110 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -1567,6 +1567,7 @@ static int acpi_bus_scan_fixed(void)
1567 ACPI_BUS_TYPE_POWER_BUTTON, 1567 ACPI_BUS_TYPE_POWER_BUTTON,
1568 ACPI_STA_DEFAULT, 1568 ACPI_STA_DEFAULT,
1569 &ops); 1569 &ops);
1570 device_init_wakeup(&device->dev, true);
1570 } 1571 }
1571 1572
1572 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { 1573 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index ebaa04593236..88561029cca8 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -25,8 +25,6 @@
25#include <acpi/acpi_bus.h> 25#include <acpi/acpi_bus.h>
26#include <acpi/acpi_drivers.h> 26#include <acpi/acpi_drivers.h>
27 27
28#include <asm/realmode.h>
29
30#include "internal.h" 28#include "internal.h"
31#include "sleep.h" 29#include "sleep.h"
32 30
@@ -59,6 +57,7 @@ MODULE_PARM_DESC(gts, "Enable evaluation of _GTS on suspend.");
59MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".); 57MODULE_PARM_DESC(bfs, "Enable evaluation of _BFS on resume".);
60 58
61static u8 sleep_states[ACPI_S_STATE_COUNT]; 59static u8 sleep_states[ACPI_S_STATE_COUNT];
60static bool pwr_btn_event_pending;
62 61
63static void acpi_sleep_tts_switch(u32 acpi_state) 62static void acpi_sleep_tts_switch(u32 acpi_state)
64{ 63{
@@ -93,13 +92,11 @@ static struct notifier_block tts_notifier = {
93static int acpi_sleep_prepare(u32 acpi_state) 92static int acpi_sleep_prepare(u32 acpi_state)
94{ 93{
95#ifdef CONFIG_ACPI_SLEEP 94#ifdef CONFIG_ACPI_SLEEP
96 unsigned long wakeup_pa = real_mode_header->wakeup_start;
97 /* do we have a wakeup address for S2 and S3? */ 95 /* do we have a wakeup address for S2 and S3? */
98 if (acpi_state == ACPI_STATE_S3) { 96 if (acpi_state == ACPI_STATE_S3) {
99 if (!wakeup_pa) 97 if (!acpi_wakeup_address)
100 return -EFAULT; 98 return -EFAULT;
101 acpi_set_firmware_waking_vector( 99 acpi_set_firmware_waking_vector(acpi_wakeup_address);
102 (acpi_physical_address)wakeup_pa);
103 100
104 } 101 }
105 ACPI_FLUSH_CPU_CACHE(); 102 ACPI_FLUSH_CPU_CACHE();
@@ -188,6 +185,14 @@ static int acpi_pm_prepare(void)
188 return error; 185 return error;
189} 186}
190 187
188static int find_powerf_dev(struct device *dev, void *data)
189{
190 struct acpi_device *device = to_acpi_device(dev);
191 const char *hid = acpi_device_hid(device);
192
193 return !strcmp(hid, ACPI_BUTTON_HID_POWERF);
194}
195
191/** 196/**
192 * acpi_pm_finish - Instruct the platform to leave a sleep state. 197 * acpi_pm_finish - Instruct the platform to leave a sleep state.
193 * 198 *
@@ -196,6 +201,7 @@ static int acpi_pm_prepare(void)
196 */ 201 */
197static void acpi_pm_finish(void) 202static void acpi_pm_finish(void)
198{ 203{
204 struct device *pwr_btn_dev;
199 u32 acpi_state = acpi_target_sleep_state; 205 u32 acpi_state = acpi_target_sleep_state;
200 206
201 acpi_ec_unblock_transactions(); 207 acpi_ec_unblock_transactions();
@@ -213,6 +219,23 @@ static void acpi_pm_finish(void)
213 acpi_set_firmware_waking_vector((acpi_physical_address) 0); 219 acpi_set_firmware_waking_vector((acpi_physical_address) 0);
214 220
215 acpi_target_sleep_state = ACPI_STATE_S0; 221 acpi_target_sleep_state = ACPI_STATE_S0;
222
223 /* If we were woken with the fixed power button, provide a small
224 * hint to userspace in the form of a wakeup event on the fixed power
225 * button device (if it can be found).
226 *
227 * We delay the event generation til now, as the PM layer requires
228 * timekeeping to be running before we generate events. */
229 if (!pwr_btn_event_pending)
230 return;
231
232 pwr_btn_event_pending = false;
233 pwr_btn_dev = bus_find_device(&acpi_bus_type, NULL, NULL,
234 find_powerf_dev);
235 if (pwr_btn_dev) {
236 pm_wakeup_event(pwr_btn_dev, 0);
237 put_device(pwr_btn_dev);
238 }
216} 239}
217 240
218/** 241/**
@@ -302,9 +325,23 @@ static int acpi_suspend_enter(suspend_state_t pm_state)
302 /* ACPI 3.0 specs (P62) says that it's the responsibility 325 /* ACPI 3.0 specs (P62) says that it's the responsibility
303 * of the OSPM to clear the status bit [ implying that the 326 * of the OSPM to clear the status bit [ implying that the
304 * POWER_BUTTON event should not reach userspace ] 327 * POWER_BUTTON event should not reach userspace ]
328 *
329 * However, we do generate a small hint for userspace in the form of
330 * a wakeup event. We flag this condition for now and generate the
331 * event later, as we're currently too early in resume to be able to
332 * generate wakeup events.
305 */ 333 */
306 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) 334 if (ACPI_SUCCESS(status) && (acpi_state == ACPI_STATE_S3)) {
307 acpi_clear_event(ACPI_EVENT_POWER_BUTTON); 335 acpi_event_status pwr_btn_status;
336
337 acpi_get_event_status(ACPI_EVENT_POWER_BUTTON, &pwr_btn_status);
338
339 if (pwr_btn_status & ACPI_EVENT_FLAG_SET) {
340 acpi_clear_event(ACPI_EVENT_POWER_BUTTON);
341 /* Flag for later */
342 pwr_btn_event_pending = true;
343 }
344 }
308 345
309 /* 346 /*
310 * Disable and clear GPE status before interrupt is enabled. Some GPEs 347 * Disable and clear GPE status before interrupt is enabled. Some GPEs
@@ -734,8 +771,8 @@ int acpi_pm_device_sleep_state(struct device *dev, int *d_min_p)
734 * can wake the system. _S0W may be valid, too. 771 * can wake the system. _S0W may be valid, too.
735 */ 772 */
736 if (acpi_target_sleep_state == ACPI_STATE_S0 || 773 if (acpi_target_sleep_state == ACPI_STATE_S0 ||
737 (device_may_wakeup(dev) && 774 (device_may_wakeup(dev) && adev->wakeup.flags.valid &&
738 adev->wakeup.sleep_state <= acpi_target_sleep_state)) { 775 adev->wakeup.sleep_state >= acpi_target_sleep_state)) {
739 acpi_status status; 776 acpi_status status;
740 777
741 acpi_method[3] = 'W'; 778 acpi_method[3] = 'W';
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 9577b6fa2650..a576575617d7 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -1687,10 +1687,6 @@ static int acpi_video_bus_add(struct acpi_device *device)
1687 set_bit(KEY_BRIGHTNESS_ZERO, input->keybit); 1687 set_bit(KEY_BRIGHTNESS_ZERO, input->keybit);
1688 set_bit(KEY_DISPLAY_OFF, input->keybit); 1688 set_bit(KEY_DISPLAY_OFF, input->keybit);
1689 1689
1690 error = input_register_device(input);
1691 if (error)
1692 goto err_stop_video;
1693
1694 printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n", 1690 printk(KERN_INFO PREFIX "%s [%s] (multi-head: %s rom: %s post: %s)\n",
1695 ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device), 1691 ACPI_VIDEO_DEVICE_NAME, acpi_device_bid(device),
1696 video->flags.multihead ? "yes" : "no", 1692 video->flags.multihead ? "yes" : "no",
@@ -1701,12 +1697,16 @@ static int acpi_video_bus_add(struct acpi_device *device)
1701 video->pm_nb.priority = 0; 1697 video->pm_nb.priority = 0;
1702 error = register_pm_notifier(&video->pm_nb); 1698 error = register_pm_notifier(&video->pm_nb);
1703 if (error) 1699 if (error)
1704 goto err_unregister_input_dev; 1700 goto err_stop_video;
1701
1702 error = input_register_device(input);
1703 if (error)
1704 goto err_unregister_pm_notifier;
1705 1705
1706 return 0; 1706 return 0;
1707 1707
1708 err_unregister_input_dev: 1708 err_unregister_pm_notifier:
1709 input_unregister_device(input); 1709 unregister_pm_notifier(&video->pm_nb);
1710 err_stop_video: 1710 err_stop_video:
1711 acpi_video_bus_stop_devices(video); 1711 acpi_video_bus_stop_devices(video);
1712 err_free_input_dev: 1712 err_free_input_dev:
@@ -1743,9 +1743,18 @@ static int acpi_video_bus_remove(struct acpi_device *device, int type)
1743 return 0; 1743 return 0;
1744} 1744}
1745 1745
1746static int __init is_i740(struct pci_dev *dev)
1747{
1748 if (dev->device == 0x00D1)
1749 return 1;
1750 if (dev->device == 0x7000)
1751 return 1;
1752 return 0;
1753}
1754
1746static int __init intel_opregion_present(void) 1755static int __init intel_opregion_present(void)
1747{ 1756{
1748#if defined(CONFIG_DRM_I915) || defined(CONFIG_DRM_I915_MODULE) 1757 int opregion = 0;
1749 struct pci_dev *dev = NULL; 1758 struct pci_dev *dev = NULL;
1750 u32 address; 1759 u32 address;
1751 1760
@@ -1754,13 +1763,15 @@ static int __init intel_opregion_present(void)
1754 continue; 1763 continue;
1755 if (dev->vendor != PCI_VENDOR_ID_INTEL) 1764 if (dev->vendor != PCI_VENDOR_ID_INTEL)
1756 continue; 1765 continue;
1766 /* We don't want to poke around undefined i740 registers */
1767 if (is_i740(dev))
1768 continue;
1757 pci_read_config_dword(dev, 0xfc, &address); 1769 pci_read_config_dword(dev, 0xfc, &address);
1758 if (!address) 1770 if (!address)
1759 continue; 1771 continue;
1760 return 1; 1772 opregion = 1;
1761 } 1773 }
1762#endif 1774 return opregion;
1763 return 0;
1764} 1775}
1765 1776
1766int acpi_video_register(void) 1777int acpi_video_register(void)
diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
index 3239517f4d90..ac6a5beb28f3 100644
--- a/drivers/ata/pata_arasan_cf.c
+++ b/drivers/ata/pata_arasan_cf.c
@@ -4,7 +4,7 @@
4 * Arasan Compact Flash host controller source file 4 * Arasan Compact Flash host controller source file
5 * 5 *
6 * Copyright (C) 2011 ST Microelectronics 6 * Copyright (C) 2011 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
@@ -959,7 +959,7 @@ static struct platform_driver arasan_cf_driver = {
959 959
960module_platform_driver(arasan_cf_driver); 960module_platform_driver(arasan_cf_driver);
961 961
962MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 962MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
963MODULE_DESCRIPTION("Arasan ATA Compact Flash driver"); 963MODULE_DESCRIPTION("Arasan ATA Compact Flash driver");
964MODULE_LICENSE("GPL"); 964MODULE_LICENSE("GPL");
965MODULE_ALIAS("platform:" DRIVER_NAME); 965MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index e8cd652d2017..98510931c815 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -984,6 +984,7 @@ static uint32_t fpga_tx(struct solos_card *card)
984 } else if (skb && card->using_dma) { 984 } else if (skb && card->using_dma) {
985 SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data, 985 SKB_CB(skb)->dma_addr = pci_map_single(card->dev, skb->data,
986 skb->len, PCI_DMA_TODEVICE); 986 skb->len, PCI_DMA_TODEVICE);
987 card->tx_skb[port] = skb;
987 iowrite32(SKB_CB(skb)->dma_addr, 988 iowrite32(SKB_CB(skb)->dma_addr,
988 card->config_regs + TX_DMA_ADDR(port)); 989 card->config_regs + TX_DMA_ADDR(port));
989 } 990 }
@@ -1152,7 +1153,8 @@ static int fpga_probe(struct pci_dev *dev, const struct pci_device_id *id)
1152 db_fpga_upgrade = db_firmware_upgrade = 0; 1153 db_fpga_upgrade = db_firmware_upgrade = 0;
1153 } 1154 }
1154 1155
1155 if (card->fpga_version >= DMA_SUPPORTED){ 1156 if (card->fpga_version >= DMA_SUPPORTED) {
1157 pci_set_master(dev);
1156 card->using_dma = 1; 1158 card->using_dma = 1;
1157 } else { 1159 } else {
1158 card->using_dma = 0; 1160 card->using_dma = 0;
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 1b1cbb571d38..dcb8a6e48692 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -100,7 +100,7 @@ static void driver_deferred_probe_add(struct device *dev)
100 mutex_lock(&deferred_probe_mutex); 100 mutex_lock(&deferred_probe_mutex);
101 if (list_empty(&dev->p->deferred_probe)) { 101 if (list_empty(&dev->p->deferred_probe)) {
102 dev_dbg(dev, "Added to deferred list\n"); 102 dev_dbg(dev, "Added to deferred list\n");
103 list_add(&dev->p->deferred_probe, &deferred_probe_pending_list); 103 list_add_tail(&dev->p->deferred_probe, &deferred_probe_pending_list);
104 } 104 }
105 mutex_unlock(&deferred_probe_mutex); 105 mutex_unlock(&deferred_probe_mutex);
106} 106}
diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c
index 0bcda488f11c..c89aa01fb1de 100644
--- a/drivers/base/regmap/regmap.c
+++ b/drivers/base/regmap/regmap.c
@@ -246,11 +246,11 @@ struct regmap *regmap_init(struct device *dev,
246 map->lock = regmap_lock_mutex; 246 map->lock = regmap_lock_mutex;
247 map->unlock = regmap_unlock_mutex; 247 map->unlock = regmap_unlock_mutex;
248 } 248 }
249 map->format.buf_size = (config->reg_bits + config->val_bits) / 8;
250 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8); 249 map->format.reg_bytes = DIV_ROUND_UP(config->reg_bits, 8);
251 map->format.pad_bytes = config->pad_bits / 8; 250 map->format.pad_bytes = config->pad_bits / 8;
252 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8); 251 map->format.val_bytes = DIV_ROUND_UP(config->val_bits, 8);
253 map->format.buf_size += map->format.pad_bytes; 252 map->format.buf_size = DIV_ROUND_UP(config->reg_bits +
253 config->val_bits + config->pad_bits, 8);
254 map->reg_shift = config->pad_bits % 8; 254 map->reg_shift = config->pad_bits % 8;
255 if (config->reg_stride) 255 if (config->reg_stride)
256 map->reg_stride = config->reg_stride; 256 map->reg_stride = config->reg_stride;
@@ -368,7 +368,7 @@ struct regmap *regmap_init(struct device *dev,
368 368
369 ret = regcache_init(map, config); 369 ret = regcache_init(map, config);
370 if (ret < 0) 370 if (ret < 0)
371 goto err_free_workbuf; 371 goto err_debugfs;
372 372
373 /* Add a devres resource for dev_get_regmap() */ 373 /* Add a devres resource for dev_get_regmap() */
374 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); 374 m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL);
@@ -383,7 +383,8 @@ struct regmap *regmap_init(struct device *dev,
383 383
384err_cache: 384err_cache:
385 regcache_exit(map); 385 regcache_exit(map);
386err_free_workbuf: 386err_debugfs:
387 regmap_debugfs_exit(map);
387 kfree(map->work_buf); 388 kfree(map->work_buf);
388err_map: 389err_map:
389 kfree(map); 390 kfree(map);
@@ -471,6 +472,7 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config)
471 472
472 return ret; 473 return ret;
473} 474}
475EXPORT_SYMBOL_GPL(regmap_reinit_cache);
474 476
475/** 477/**
476 * regmap_exit(): Free a previously allocated register map 478 * regmap_exit(): Free a previously allocated register map
diff --git a/drivers/base/soc.c b/drivers/base/soc.c
index ba29b2e73d48..72b5e7280d14 100644
--- a/drivers/base/soc.c
+++ b/drivers/base/soc.c
@@ -42,7 +42,7 @@ struct device *soc_device_to_device(struct soc_device *soc_dev)
42 return &soc_dev->dev; 42 return &soc_dev->dev;
43} 43}
44 44
45static mode_t soc_attribute_mode(struct kobject *kobj, 45static umode_t soc_attribute_mode(struct kobject *kobj,
46 struct attribute *attr, 46 struct attribute *attr,
47 int index) 47 int index)
48{ 48{
diff --git a/drivers/bcma/driver_chipcommon_pmu.c b/drivers/bcma/driver_chipcommon_pmu.c
index a058842f14fd..61ce4054b3c3 100644
--- a/drivers/bcma/driver_chipcommon_pmu.c
+++ b/drivers/bcma/driver_chipcommon_pmu.c
@@ -139,7 +139,9 @@ void bcma_pmu_workarounds(struct bcma_drv_cc *cc)
139 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7); 139 bcma_chipco_chipctl_maskset(cc, 0, ~0, 0x7);
140 break; 140 break;
141 case 0x4331: 141 case 0x4331:
142 /* BCM4331 workaround is SPROM-related, we put it in sprom.c */ 142 case 43431:
143 /* Ext PA lines must be enabled for tx on BCM4331 */
144 bcma_chipco_bcm4331_ext_pa_lines_ctl(cc, true);
143 break; 145 break;
144 case 43224: 146 case 43224:
145 if (bus->chipinfo.rev == 0) { 147 if (bus->chipinfo.rev == 0) {
diff --git a/drivers/bcma/driver_pci.c b/drivers/bcma/driver_pci.c
index 9a96f14c8f47..c32ebd537abe 100644
--- a/drivers/bcma/driver_pci.c
+++ b/drivers/bcma/driver_pci.c
@@ -232,17 +232,19 @@ void __devinit bcma_core_pci_init(struct bcma_drv_pci *pc)
232int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core, 232int bcma_core_pci_irq_ctl(struct bcma_drv_pci *pc, struct bcma_device *core,
233 bool enable) 233 bool enable)
234{ 234{
235 struct pci_dev *pdev = pc->core->bus->host_pci; 235 struct pci_dev *pdev;
236 u32 coremask, tmp; 236 u32 coremask, tmp;
237 int err = 0; 237 int err = 0;
238 238
239 if (core->bus->hosttype != BCMA_HOSTTYPE_PCI) { 239 if (!pc || core->bus->hosttype != BCMA_HOSTTYPE_PCI) {
240 /* This bcma device is not on a PCI host-bus. So the IRQs are 240 /* This bcma device is not on a PCI host-bus. So the IRQs are
241 * not routed through the PCI core. 241 * not routed through the PCI core.
242 * So we must not enable routing through the PCI core. */ 242 * So we must not enable routing through the PCI core. */
243 goto out; 243 goto out;
244 } 244 }
245 245
246 pdev = pc->core->bus->host_pci;
247
246 err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp); 248 err = pci_read_config_dword(pdev, BCMA_PCI_IRQMASK, &tmp);
247 if (err) 249 if (err)
248 goto out; 250 goto out;
diff --git a/drivers/bcma/sprom.c b/drivers/bcma/sprom.c
index c7f93359acb0..f16f42d36071 100644
--- a/drivers/bcma/sprom.c
+++ b/drivers/bcma/sprom.c
@@ -579,13 +579,13 @@ int bcma_sprom_get(struct bcma_bus *bus)
579 if (!sprom) 579 if (!sprom)
580 return -ENOMEM; 580 return -ENOMEM;
581 581
582 if (bus->chipinfo.id == 0x4331) 582 if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
583 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false); 583 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, false);
584 584
585 pr_debug("SPROM offset 0x%x\n", offset); 585 pr_debug("SPROM offset 0x%x\n", offset);
586 bcma_sprom_read(bus, offset, sprom); 586 bcma_sprom_read(bus, offset, sprom);
587 587
588 if (bus->chipinfo.id == 0x4331) 588 if (bus->chipinfo.id == 0x4331 || bus->chipinfo.id == 43431)
589 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true); 589 bcma_chipco_bcm4331_ext_pa_lines_ctl(&bus->drv_cc, true);
590 590
591 err = bcma_sprom_valid(sprom); 591 err = bcma_sprom_valid(sprom);
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 304000c3d433..264bc77dcb91 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -294,18 +294,16 @@ static int hba_reset_nosleep(struct driver_data *dd)
294 */ 294 */
295static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag) 295static inline void mtip_issue_ncq_command(struct mtip_port *port, int tag)
296{ 296{
297 unsigned long flags = 0;
298
299 atomic_set(&port->commands[tag].active, 1); 297 atomic_set(&port->commands[tag].active, 1);
300 298
301 spin_lock_irqsave(&port->cmd_issue_lock, flags); 299 spin_lock(&port->cmd_issue_lock);
302 300
303 writel((1 << MTIP_TAG_BIT(tag)), 301 writel((1 << MTIP_TAG_BIT(tag)),
304 port->s_active[MTIP_TAG_INDEX(tag)]); 302 port->s_active[MTIP_TAG_INDEX(tag)]);
305 writel((1 << MTIP_TAG_BIT(tag)), 303 writel((1 << MTIP_TAG_BIT(tag)),
306 port->cmd_issue[MTIP_TAG_INDEX(tag)]); 304 port->cmd_issue[MTIP_TAG_INDEX(tag)]);
307 305
308 spin_unlock_irqrestore(&port->cmd_issue_lock, flags); 306 spin_unlock(&port->cmd_issue_lock);
309 307
310 /* Set the command's timeout value.*/ 308 /* Set the command's timeout value.*/
311 port->commands[tag].comp_time = jiffies + msecs_to_jiffies( 309 port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
@@ -436,8 +434,7 @@ static void mtip_init_port(struct mtip_port *port)
436 writel(0xFFFFFFFF, port->completed[i]); 434 writel(0xFFFFFFFF, port->completed[i]);
437 435
438 /* Clear any pending interrupts for this port */ 436 /* Clear any pending interrupts for this port */
439 writel(readl(port->dd->mmio + PORT_IRQ_STAT), 437 writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
440 port->dd->mmio + PORT_IRQ_STAT);
441 438
442 /* Clear any pending interrupts on the HBA. */ 439 /* Clear any pending interrupts on the HBA. */
443 writel(readl(port->dd->mmio + HOST_IRQ_STAT), 440 writel(readl(port->dd->mmio + HOST_IRQ_STAT),
@@ -782,13 +779,24 @@ static void mtip_handle_tfe(struct driver_data *dd)
782 779
783 /* Stop the timer to prevent command timeouts. */ 780 /* Stop the timer to prevent command timeouts. */
784 del_timer(&port->cmd_timer); 781 del_timer(&port->cmd_timer);
782 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
783
784 if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
785 test_bit(MTIP_TAG_INTERNAL, port->allocated)) {
786 cmd = &port->commands[MTIP_TAG_INTERNAL];
787 dbg_printk(MTIP_DRV_NAME " TFE for the internal command\n");
788
789 atomic_inc(&cmd->active); /* active > 1 indicates error */
790 if (cmd->comp_data && cmd->comp_func) {
791 cmd->comp_func(port, MTIP_TAG_INTERNAL,
792 cmd->comp_data, PORT_IRQ_TF_ERR);
793 }
794 goto handle_tfe_exit;
795 }
785 796
786 /* clear the tag accumulator */ 797 /* clear the tag accumulator */
787 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long)); 798 memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
788 799
789 /* Set eh_active */
790 set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
791
792 /* Loop through all the groups */ 800 /* Loop through all the groups */
793 for (group = 0; group < dd->slot_groups; group++) { 801 for (group = 0; group < dd->slot_groups; group++) {
794 completed = readl(port->completed[group]); 802 completed = readl(port->completed[group]);
@@ -940,6 +948,7 @@ static void mtip_handle_tfe(struct driver_data *dd)
940 } 948 }
941 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt); 949 print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
942 950
951handle_tfe_exit:
943 /* clear eh_active */ 952 /* clear eh_active */
944 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags); 953 clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
945 wake_up_interruptible(&port->svc_wait); 954 wake_up_interruptible(&port->svc_wait);
@@ -961,6 +970,8 @@ static inline void mtip_process_sdbf(struct driver_data *dd)
961 /* walk all bits in all slot groups */ 970 /* walk all bits in all slot groups */
962 for (group = 0; group < dd->slot_groups; group++) { 971 for (group = 0; group < dd->slot_groups; group++) {
963 completed = readl(port->completed[group]); 972 completed = readl(port->completed[group]);
973 if (!completed)
974 continue;
964 975
965 /* clear completed status register in the hardware.*/ 976 /* clear completed status register in the hardware.*/
966 writel(completed, port->completed[group]); 977 writel(completed, port->completed[group]);
@@ -1329,22 +1340,6 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1329 } 1340 }
1330 rv = -EAGAIN; 1341 rv = -EAGAIN;
1331 } 1342 }
1332
1333 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1334 & (1 << MTIP_TAG_INTERNAL)) {
1335 dev_warn(&port->dd->pdev->dev,
1336 "Retiring internal command but CI is 1.\n");
1337 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1338 &port->dd->dd_flag)) {
1339 hba_reset_nosleep(port->dd);
1340 rv = -ENXIO;
1341 } else {
1342 mtip_restart_port(port);
1343 rv = -EAGAIN;
1344 }
1345 goto exec_ic_exit;
1346 }
1347
1348 } else { 1343 } else {
1349 /* Spin for <timeout> checking if command still outstanding */ 1344 /* Spin for <timeout> checking if command still outstanding */
1350 timeout = jiffies + msecs_to_jiffies(timeout); 1345 timeout = jiffies + msecs_to_jiffies(timeout);
@@ -1361,21 +1356,25 @@ static int mtip_exec_internal_command(struct mtip_port *port,
1361 rv = -ENXIO; 1356 rv = -ENXIO;
1362 goto exec_ic_exit; 1357 goto exec_ic_exit;
1363 } 1358 }
1359 if (readl(port->mmio + PORT_IRQ_STAT) & PORT_IRQ_ERR) {
1360 atomic_inc(&int_cmd->active); /* error */
1361 break;
1362 }
1364 } 1363 }
1364 }
1365 1365
1366 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL]) 1366 if (atomic_read(&int_cmd->active) > 1) {
1367 dev_err(&port->dd->pdev->dev,
1368 "Internal command [%02X] failed\n", fis->command);
1369 rv = -EIO;
1370 }
1371 if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
1367 & (1 << MTIP_TAG_INTERNAL)) { 1372 & (1 << MTIP_TAG_INTERNAL)) {
1368 dev_err(&port->dd->pdev->dev, 1373 rv = -ENXIO;
1369 "Internal command did not complete [atomic]\n"); 1374 if (!test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1375 &port->dd->dd_flag)) {
1376 mtip_restart_port(port);
1370 rv = -EAGAIN; 1377 rv = -EAGAIN;
1371 if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
1372 &port->dd->dd_flag)) {
1373 hba_reset_nosleep(port->dd);
1374 rv = -ENXIO;
1375 } else {
1376 mtip_restart_port(port);
1377 rv = -EAGAIN;
1378 }
1379 } 1378 }
1380 } 1379 }
1381exec_ic_exit: 1380exec_ic_exit:
@@ -1893,13 +1892,33 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
1893 void __user *user_buffer) 1892 void __user *user_buffer)
1894{ 1893{
1895 struct host_to_dev_fis fis; 1894 struct host_to_dev_fis fis;
1896 struct host_to_dev_fis *reply = (port->rxfis + RX_FIS_D2H_REG); 1895 struct host_to_dev_fis *reply;
1896 u8 *buf = NULL;
1897 dma_addr_t dma_addr = 0;
1898 int rv = 0, xfer_sz = command[3];
1899
1900 if (xfer_sz) {
1901 if (user_buffer)
1902 return -EFAULT;
1903
1904 buf = dmam_alloc_coherent(&port->dd->pdev->dev,
1905 ATA_SECT_SIZE * xfer_sz,
1906 &dma_addr,
1907 GFP_KERNEL);
1908 if (!buf) {
1909 dev_err(&port->dd->pdev->dev,
1910 "Memory allocation failed (%d bytes)\n",
1911 ATA_SECT_SIZE * xfer_sz);
1912 return -ENOMEM;
1913 }
1914 memset(buf, 0, ATA_SECT_SIZE * xfer_sz);
1915 }
1897 1916
1898 /* Build the FIS. */ 1917 /* Build the FIS. */
1899 memset(&fis, 0, sizeof(struct host_to_dev_fis)); 1918 memset(&fis, 0, sizeof(struct host_to_dev_fis));
1900 fis.type = 0x27; 1919 fis.type = 0x27;
1901 fis.opts = 1 << 7; 1920 fis.opts = 1 << 7;
1902 fis.command = command[0]; 1921 fis.command = command[0];
1903 fis.features = command[2]; 1922 fis.features = command[2];
1904 fis.sect_count = command[3]; 1923 fis.sect_count = command[3];
1905 if (fis.command == ATA_CMD_SMART) { 1924 if (fis.command == ATA_CMD_SMART) {
@@ -1908,6 +1927,11 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
1908 fis.cyl_hi = 0xC2; 1927 fis.cyl_hi = 0xC2;
1909 } 1928 }
1910 1929
1930 if (xfer_sz)
1931 reply = (port->rxfis + RX_FIS_PIO_SETUP);
1932 else
1933 reply = (port->rxfis + RX_FIS_D2H_REG);
1934
1911 dbg_printk(MTIP_DRV_NAME 1935 dbg_printk(MTIP_DRV_NAME
1912 " %s: User Command: cmd %x, sect %x, " 1936 " %s: User Command: cmd %x, sect %x, "
1913 "feat %x, sectcnt %x\n", 1937 "feat %x, sectcnt %x\n",
@@ -1917,43 +1941,46 @@ static int exec_drive_command(struct mtip_port *port, u8 *command,
1917 command[2], 1941 command[2],
1918 command[3]); 1942 command[3]);
1919 1943
1920 memset(port->sector_buffer, 0x00, ATA_SECT_SIZE);
1921
1922 /* Execute the command. */ 1944 /* Execute the command. */
1923 if (mtip_exec_internal_command(port, 1945 if (mtip_exec_internal_command(port,
1924 &fis, 1946 &fis,
1925 5, 1947 5,
1926 port->sector_buffer_dma, 1948 (xfer_sz ? dma_addr : 0),
1927 (command[3] != 0) ? ATA_SECT_SIZE : 0, 1949 (xfer_sz ? ATA_SECT_SIZE * xfer_sz : 0),
1928 0, 1950 0,
1929 GFP_KERNEL, 1951 GFP_KERNEL,
1930 MTIP_IOCTL_COMMAND_TIMEOUT_MS) 1952 MTIP_IOCTL_COMMAND_TIMEOUT_MS)
1931 < 0) { 1953 < 0) {
1932 return -1; 1954 rv = -EFAULT;
1955 goto exit_drive_command;
1933 } 1956 }
1934 1957
1935 /* Collect the completion status. */ 1958 /* Collect the completion status. */
1936 command[0] = reply->command; /* Status*/ 1959 command[0] = reply->command; /* Status*/
1937 command[1] = reply->features; /* Error*/ 1960 command[1] = reply->features; /* Error*/
1938 command[2] = command[3]; 1961 command[2] = reply->sect_count;
1939 1962
1940 dbg_printk(MTIP_DRV_NAME 1963 dbg_printk(MTIP_DRV_NAME
1941 " %s: Completion Status: stat %x, " 1964 " %s: Completion Status: stat %x, "
1942 "err %x, cmd %x\n", 1965 "err %x, nsect %x\n",
1943 __func__, 1966 __func__,
1944 command[0], 1967 command[0],
1945 command[1], 1968 command[1],
1946 command[2]); 1969 command[2]);
1947 1970
1948 if (user_buffer && command[3]) { 1971 if (xfer_sz) {
1949 if (copy_to_user(user_buffer, 1972 if (copy_to_user(user_buffer,
1950 port->sector_buffer, 1973 buf,
1951 ATA_SECT_SIZE * command[3])) { 1974 ATA_SECT_SIZE * command[3])) {
1952 return -EFAULT; 1975 rv = -EFAULT;
1976 goto exit_drive_command;
1953 } 1977 }
1954 } 1978 }
1955 1979exit_drive_command:
1956 return 0; 1980 if (buf)
1981 dmam_free_coherent(&port->dd->pdev->dev,
1982 ATA_SECT_SIZE * xfer_sz, buf, dma_addr);
1983 return rv;
1957} 1984}
1958 1985
1959/* 1986/*
@@ -2003,6 +2030,32 @@ static unsigned int implicit_sector(unsigned char command,
2003 return rv; 2030 return rv;
2004} 2031}
2005 2032
2033static void mtip_set_timeout(struct host_to_dev_fis *fis, unsigned int *timeout)
2034{
2035 switch (fis->command) {
2036 case ATA_CMD_DOWNLOAD_MICRO:
2037 *timeout = 120000; /* 2 minutes */
2038 break;
2039 case ATA_CMD_SEC_ERASE_UNIT:
2040 case 0xFC:
2041 *timeout = 240000; /* 4 minutes */
2042 break;
2043 case ATA_CMD_STANDBYNOW1:
2044 *timeout = 10000; /* 10 seconds */
2045 break;
2046 case 0xF7:
2047 case 0xFA:
2048 *timeout = 60000; /* 60 seconds */
2049 break;
2050 case ATA_CMD_SMART:
2051 *timeout = 15000; /* 15 seconds */
2052 break;
2053 default:
2054 *timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
2055 break;
2056 }
2057}
2058
2006/* 2059/*
2007 * Executes a taskfile 2060 * Executes a taskfile
2008 * See ide_taskfile_ioctl() for derivation 2061 * See ide_taskfile_ioctl() for derivation
@@ -2023,7 +2076,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
2023 unsigned int taskin = 0; 2076 unsigned int taskin = 0;
2024 unsigned int taskout = 0; 2077 unsigned int taskout = 0;
2025 u8 nsect = 0; 2078 u8 nsect = 0;
2026 unsigned int timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS; 2079 unsigned int timeout;
2027 unsigned int force_single_sector; 2080 unsigned int force_single_sector;
2028 unsigned int transfer_size; 2081 unsigned int transfer_size;
2029 unsigned long task_file_data; 2082 unsigned long task_file_data;
@@ -2153,32 +2206,7 @@ static int exec_drive_taskfile(struct driver_data *dd,
2153 fis.lba_hi, 2206 fis.lba_hi,
2154 fis.device); 2207 fis.device);
2155 2208
2156 switch (fis.command) { 2209 mtip_set_timeout(&fis, &timeout);
2157 case ATA_CMD_DOWNLOAD_MICRO:
2158 /* Change timeout for Download Microcode to 2 minutes */
2159 timeout = 120000;
2160 break;
2161 case ATA_CMD_SEC_ERASE_UNIT:
2162 /* Change timeout for Security Erase Unit to 4 minutes.*/
2163 timeout = 240000;
2164 break;
2165 case ATA_CMD_STANDBYNOW1:
2166 /* Change timeout for standby immediate to 10 seconds.*/
2167 timeout = 10000;
2168 break;
2169 case 0xF7:
2170 case 0xFA:
2171 /* Change timeout for vendor unique command to 10 secs */
2172 timeout = 10000;
2173 break;
2174 case ATA_CMD_SMART:
2175 /* Change timeout for vendor unique command to 15 secs */
2176 timeout = 15000;
2177 break;
2178 default:
2179 timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
2180 break;
2181 }
2182 2210
2183 /* Determine the correct transfer size.*/ 2211 /* Determine the correct transfer size.*/
2184 if (force_single_sector) 2212 if (force_single_sector)
@@ -2295,13 +2323,12 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
2295{ 2323{
2296 switch (cmd) { 2324 switch (cmd) {
2297 case HDIO_GET_IDENTITY: 2325 case HDIO_GET_IDENTITY:
2298 if (mtip_get_identify(dd->port, (void __user *) arg) < 0) { 2326 {
2299 dev_warn(&dd->pdev->dev, 2327 if (copy_to_user((void __user *)arg, dd->port->identify,
2300 "Unable to read identity\n"); 2328 sizeof(u16) * ATA_ID_WORDS))
2301 return -EIO; 2329 return -EFAULT;
2302 }
2303
2304 break; 2330 break;
2331 }
2305 case HDIO_DRIVE_CMD: 2332 case HDIO_DRIVE_CMD:
2306 { 2333 {
2307 u8 drive_command[4]; 2334 u8 drive_command[4];
@@ -2537,40 +2564,58 @@ static ssize_t mtip_hw_show_registers(struct device *dev,
2537 int size = 0; 2564 int size = 0;
2538 int n; 2565 int n;
2539 2566
2540 size += sprintf(&buf[size], "S ACTive:\n"); 2567 size += sprintf(&buf[size], "Hardware\n--------\n");
2568 size += sprintf(&buf[size], "S ACTive : [ 0x");
2541 2569
2542 for (n = 0; n < dd->slot_groups; n++) 2570 for (n = dd->slot_groups-1; n >= 0; n--)
2543 size += sprintf(&buf[size], "0x%08x\n", 2571 size += sprintf(&buf[size], "%08X ",
2544 readl(dd->port->s_active[n])); 2572 readl(dd->port->s_active[n]));
2545 2573
2546 size += sprintf(&buf[size], "Command Issue:\n"); 2574 size += sprintf(&buf[size], "]\n");
2575 size += sprintf(&buf[size], "Command Issue : [ 0x");
2547 2576
2548 for (n = 0; n < dd->slot_groups; n++) 2577 for (n = dd->slot_groups-1; n >= 0; n--)
2549 size += sprintf(&buf[size], "0x%08x\n", 2578 size += sprintf(&buf[size], "%08X ",
2550 readl(dd->port->cmd_issue[n])); 2579 readl(dd->port->cmd_issue[n]));
2551 2580
2552 size += sprintf(&buf[size], "Allocated:\n"); 2581 size += sprintf(&buf[size], "]\n");
2582 size += sprintf(&buf[size], "Completed : [ 0x");
2583
2584 for (n = dd->slot_groups-1; n >= 0; n--)
2585 size += sprintf(&buf[size], "%08X ",
2586 readl(dd->port->completed[n]));
2587
2588 size += sprintf(&buf[size], "]\n");
2589 size += sprintf(&buf[size], "PORT IRQ STAT : [ 0x%08X ]\n",
2590 readl(dd->port->mmio + PORT_IRQ_STAT));
2591 size += sprintf(&buf[size], "HOST IRQ STAT : [ 0x%08X ]\n",
2592 readl(dd->mmio + HOST_IRQ_STAT));
2593 size += sprintf(&buf[size], "\n");
2553 2594
2554 for (n = 0; n < dd->slot_groups; n++) { 2595 size += sprintf(&buf[size], "Local\n-----\n");
2596 size += sprintf(&buf[size], "Allocated : [ 0x");
2597
2598 for (n = dd->slot_groups-1; n >= 0; n--) {
2555 if (sizeof(long) > sizeof(u32)) 2599 if (sizeof(long) > sizeof(u32))
2556 group_allocated = 2600 group_allocated =
2557 dd->port->allocated[n/2] >> (32*(n&1)); 2601 dd->port->allocated[n/2] >> (32*(n&1));
2558 else 2602 else
2559 group_allocated = dd->port->allocated[n]; 2603 group_allocated = dd->port->allocated[n];
2560 size += sprintf(&buf[size], "0x%08x\n", 2604 size += sprintf(&buf[size], "%08X ", group_allocated);
2561 group_allocated);
2562 } 2605 }
2606 size += sprintf(&buf[size], "]\n");
2563 2607
2564 size += sprintf(&buf[size], "Completed:\n"); 2608 size += sprintf(&buf[size], "Commands in Q: [ 0x");
2565
2566 for (n = 0; n < dd->slot_groups; n++)
2567 size += sprintf(&buf[size], "0x%08x\n",
2568 readl(dd->port->completed[n]));
2569 2609
2570 size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n", 2610 for (n = dd->slot_groups-1; n >= 0; n--) {
2571 readl(dd->port->mmio + PORT_IRQ_STAT)); 2611 if (sizeof(long) > sizeof(u32))
2572 size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n", 2612 group_allocated =
2573 readl(dd->mmio + HOST_IRQ_STAT)); 2613 dd->port->cmds_to_issue[n/2] >> (32*(n&1));
2614 else
2615 group_allocated = dd->port->cmds_to_issue[n];
2616 size += sprintf(&buf[size], "%08X ", group_allocated);
2617 }
2618 size += sprintf(&buf[size], "]\n");
2574 2619
2575 return size; 2620 return size;
2576} 2621}
@@ -2592,8 +2637,24 @@ static ssize_t mtip_hw_show_status(struct device *dev,
2592 return size; 2637 return size;
2593} 2638}
2594 2639
2640static ssize_t mtip_hw_show_flags(struct device *dev,
2641 struct device_attribute *attr,
2642 char *buf)
2643{
2644 struct driver_data *dd = dev_to_disk(dev)->private_data;
2645 int size = 0;
2646
2647 size += sprintf(&buf[size], "Flag in port struct : [ %08lX ]\n",
2648 dd->port->flags);
2649 size += sprintf(&buf[size], "Flag in dd struct : [ %08lX ]\n",
2650 dd->dd_flag);
2651
2652 return size;
2653}
2654
2595static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL); 2655static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
2596static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL); 2656static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
2657static DEVICE_ATTR(flags, S_IRUGO, mtip_hw_show_flags, NULL);
2597 2658
2598/* 2659/*
2599 * Create the sysfs related attributes. 2660 * Create the sysfs related attributes.
@@ -2616,6 +2677,9 @@ static int mtip_hw_sysfs_init(struct driver_data *dd, struct kobject *kobj)
2616 if (sysfs_create_file(kobj, &dev_attr_status.attr)) 2677 if (sysfs_create_file(kobj, &dev_attr_status.attr))
2617 dev_warn(&dd->pdev->dev, 2678 dev_warn(&dd->pdev->dev,
2618 "Error creating 'status' sysfs entry\n"); 2679 "Error creating 'status' sysfs entry\n");
2680 if (sysfs_create_file(kobj, &dev_attr_flags.attr))
2681 dev_warn(&dd->pdev->dev,
2682 "Error creating 'flags' sysfs entry\n");
2619 return 0; 2683 return 0;
2620} 2684}
2621 2685
@@ -2636,6 +2700,7 @@ static int mtip_hw_sysfs_exit(struct driver_data *dd, struct kobject *kobj)
2636 2700
2637 sysfs_remove_file(kobj, &dev_attr_registers.attr); 2701 sysfs_remove_file(kobj, &dev_attr_registers.attr);
2638 sysfs_remove_file(kobj, &dev_attr_status.attr); 2702 sysfs_remove_file(kobj, &dev_attr_status.attr);
2703 sysfs_remove_file(kobj, &dev_attr_flags.attr);
2639 2704
2640 return 0; 2705 return 0;
2641} 2706}
@@ -3634,7 +3699,10 @@ skip_create_disk:
3634 set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags); 3699 set_bit(QUEUE_FLAG_NONROT, &dd->queue->queue_flags);
3635 blk_queue_max_segments(dd->queue, MTIP_MAX_SG); 3700 blk_queue_max_segments(dd->queue, MTIP_MAX_SG);
3636 blk_queue_physical_block_size(dd->queue, 4096); 3701 blk_queue_physical_block_size(dd->queue, 4096);
3702 blk_queue_max_hw_sectors(dd->queue, 0xffff);
3703 blk_queue_max_segment_size(dd->queue, 0x400000);
3637 blk_queue_io_min(dd->queue, 4096); 3704 blk_queue_io_min(dd->queue, 4096);
3705
3638 /* 3706 /*
3639 * write back cache is not supported in the device. FUA depends on 3707 * write back cache is not supported in the device. FUA depends on
3640 * write back cache support, hence setting flush support to zero. 3708 * write back cache support, hence setting flush support to zero.
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index 4ef58336310a..b2c88da26b2a 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -113,33 +113,35 @@
113 113
114#define __force_bit2int (unsigned int __force) 114#define __force_bit2int (unsigned int __force)
115 115
116/* below are bit numbers in 'flags' defined in mtip_port */ 116enum {
117#define MTIP_PF_IC_ACTIVE_BIT 0 /* pio/ioctl */ 117 /* below are bit numbers in 'flags' defined in mtip_port */
118#define MTIP_PF_EH_ACTIVE_BIT 1 /* error handling */ 118 MTIP_PF_IC_ACTIVE_BIT = 0, /* pio/ioctl */
119#define MTIP_PF_SE_ACTIVE_BIT 2 /* secure erase */ 119 MTIP_PF_EH_ACTIVE_BIT = 1, /* error handling */
120#define MTIP_PF_DM_ACTIVE_BIT 3 /* download microcde */ 120 MTIP_PF_SE_ACTIVE_BIT = 2, /* secure erase */
121#define MTIP_PF_PAUSE_IO ((1 << MTIP_PF_IC_ACTIVE_BIT) | \ 121 MTIP_PF_DM_ACTIVE_BIT = 3, /* download microcde */
122 MTIP_PF_PAUSE_IO = ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
122 (1 << MTIP_PF_EH_ACTIVE_BIT) | \ 123 (1 << MTIP_PF_EH_ACTIVE_BIT) | \
123 (1 << MTIP_PF_SE_ACTIVE_BIT) | \ 124 (1 << MTIP_PF_SE_ACTIVE_BIT) | \
124 (1 << MTIP_PF_DM_ACTIVE_BIT)) 125 (1 << MTIP_PF_DM_ACTIVE_BIT)),
125 126
126#define MTIP_PF_SVC_THD_ACTIVE_BIT 4 127 MTIP_PF_SVC_THD_ACTIVE_BIT = 4,
127#define MTIP_PF_ISSUE_CMDS_BIT 5 128 MTIP_PF_ISSUE_CMDS_BIT = 5,
128#define MTIP_PF_REBUILD_BIT 6 129 MTIP_PF_REBUILD_BIT = 6,
129#define MTIP_PF_SVC_THD_STOP_BIT 8 130 MTIP_PF_SVC_THD_STOP_BIT = 8,
130 131
131/* below are bit numbers in 'dd_flag' defined in driver_data */ 132 /* below are bit numbers in 'dd_flag' defined in driver_data */
132#define MTIP_DDF_REMOVE_PENDING_BIT 1 133 MTIP_DDF_REMOVE_PENDING_BIT = 1,
133#define MTIP_DDF_OVER_TEMP_BIT 2 134 MTIP_DDF_OVER_TEMP_BIT = 2,
134#define MTIP_DDF_WRITE_PROTECT_BIT 3 135 MTIP_DDF_WRITE_PROTECT_BIT = 3,
135#define MTIP_DDF_STOP_IO ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \ 136 MTIP_DDF_STOP_IO = ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
136 (1 << MTIP_DDF_OVER_TEMP_BIT) | \ 137 (1 << MTIP_DDF_OVER_TEMP_BIT) | \
137 (1 << MTIP_DDF_WRITE_PROTECT_BIT)) 138 (1 << MTIP_DDF_WRITE_PROTECT_BIT)),
138 139
139#define MTIP_DDF_CLEANUP_BIT 5 140 MTIP_DDF_CLEANUP_BIT = 5,
140#define MTIP_DDF_RESUME_BIT 6 141 MTIP_DDF_RESUME_BIT = 6,
141#define MTIP_DDF_INIT_DONE_BIT 7 142 MTIP_DDF_INIT_DONE_BIT = 7,
142#define MTIP_DDF_REBUILD_FAILED_BIT 8 143 MTIP_DDF_REBUILD_FAILED_BIT = 8,
144};
143 145
144__packed struct smart_attr{ 146__packed struct smart_attr{
145 u8 attr_id; 147 u8 attr_id;
diff --git a/drivers/char/agp/intel-agp.c b/drivers/char/agp/intel-agp.c
index c98c5689bb0b..92622d44e12d 100644
--- a/drivers/char/agp/intel-agp.c
+++ b/drivers/char/agp/intel-agp.c
@@ -899,6 +899,7 @@ static struct pci_device_id agp_intel_pci_table[] = {
899 ID(PCI_DEVICE_ID_INTEL_B43_HB), 899 ID(PCI_DEVICE_ID_INTEL_B43_HB),
900 ID(PCI_DEVICE_ID_INTEL_B43_1_HB), 900 ID(PCI_DEVICE_ID_INTEL_B43_1_HB),
901 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB), 901 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB),
902 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB),
902 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB), 903 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB),
903 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB), 904 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB),
904 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), 905 ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB),
diff --git a/drivers/char/agp/intel-agp.h b/drivers/char/agp/intel-agp.h
index cf2e764b1760..57226424690c 100644
--- a/drivers/char/agp/intel-agp.h
+++ b/drivers/char/agp/intel-agp.h
@@ -212,6 +212,7 @@
212#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30 212#define PCI_DEVICE_ID_INTEL_G41_HB 0x2E30
213#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32 213#define PCI_DEVICE_ID_INTEL_G41_IG 0x2E32
214#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040 214#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_HB 0x0040
215#define PCI_DEVICE_ID_INTEL_IRONLAKE_D2_HB 0x0069
215#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042 216#define PCI_DEVICE_ID_INTEL_IRONLAKE_D_IG 0x0042
216#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044 217#define PCI_DEVICE_ID_INTEL_IRONLAKE_M_HB 0x0044
217#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062 218#define PCI_DEVICE_ID_INTEL_IRONLAKE_MA_HB 0x0062
diff --git a/drivers/char/hw_random/atmel-rng.c b/drivers/char/hw_random/atmel-rng.c
index f518b99f53f5..731c9046cf7b 100644
--- a/drivers/char/hw_random/atmel-rng.c
+++ b/drivers/char/hw_random/atmel-rng.c
@@ -34,8 +34,15 @@ static int atmel_trng_read(struct hwrng *rng, void *buf, size_t max,
34 u32 *data = buf; 34 u32 *data = buf;
35 35
36 /* data ready? */ 36 /* data ready? */
37 if (readl(trng->base + TRNG_ODATA) & 1) { 37 if (readl(trng->base + TRNG_ISR) & 1) {
38 *data = readl(trng->base + TRNG_ODATA); 38 *data = readl(trng->base + TRNG_ODATA);
39 /*
40 ensure data ready is only set again AFTER the next data
41 word is ready in case it got set between checking ISR
42 and reading ODATA, so we don't risk re-reading the
43 same word
44 */
45 readl(trng->base + TRNG_ISR);
39 return 4; 46 return 4;
40 } else 47 } else
41 return 0; 48 return 0;
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
index af34074e702b..6756e7c3bc07 100644
--- a/drivers/clk/spear/clk-aux-synth.c
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
index 4dbdb3fe18e0..958aa3ad1d60 100644
--- a/drivers/clk/spear/clk-frac-synth.c
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
index b471c9762a97..1afc18c4effc 100644
--- a/drivers/clk/spear/clk-gpt-synth.c
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
index dcd4bdf4b0d9..5f1b6badeb15 100644
--- a/drivers/clk/spear/clk-vco-pll.c
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
index 376d4e5ff326..7cd63788d546 100644
--- a/drivers/clk/spear/clk.c
+++ b/drivers/clk/spear/clk.c
@@ -1,6 +1,6 @@
1/* 1/*
2 * Copyright (C) 2012 ST Microelectronics 2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com> 3 * Viresh Kumar <viresh.linux@gmail.com>
4 * 4 *
5 * This file is licensed under the terms of the GNU General Public 5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any 6 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
index 3321c46a071c..931737677dfa 100644
--- a/drivers/clk/spear/clk.h
+++ b/drivers/clk/spear/clk.h
@@ -2,7 +2,7 @@
2 * Clock framework definitions for SPEAr platform 2 * Clock framework definitions for SPEAr platform
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
index 42b68df9aeef..8f05652d53e6 100644
--- a/drivers/clk/spear/spear1310_clock.c
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -4,7 +4,7 @@
4 * SPEAr1310 machine clock framework source file 4 * SPEAr1310 machine clock framework source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
index f130919d5bf8..e3ea72162236 100644
--- a/drivers/clk/spear/spear1340_clock.c
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -4,7 +4,7 @@
4 * SPEAr1340 machine clock framework source file 4 * SPEAr1340 machine clock framework source file
5 * 5 *
6 * Copyright (C) 2012 ST Microelectronics 6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
index 440bb3e4c971..01dd6daff2a1 100644
--- a/drivers/clk/spear/spear3xx_clock.c
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -2,7 +2,7 @@
2 * SPEAr3xx machines clock framework source file 2 * SPEAr3xx machines clock framework source file
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
index f9a20b382304..554d64b062a1 100644
--- a/drivers/clk/spear/spear6xx_clock.c
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -2,7 +2,7 @@
2 * SPEAr6xx machines clock framework source file 2 * SPEAr6xx machines clock framework source file
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/clocksource/Makefile b/drivers/clocksource/Makefile
index 8d81a1d32653..dd3e661a124d 100644
--- a/drivers/clocksource/Makefile
+++ b/drivers/clocksource/Makefile
@@ -6,6 +6,7 @@ obj-$(CONFIG_CS5535_CLOCK_EVENT_SRC) += cs5535-clockevt.o
6obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o 6obj-$(CONFIG_SH_TIMER_CMT) += sh_cmt.o
7obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o 7obj-$(CONFIG_SH_TIMER_MTU2) += sh_mtu2.o
8obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o 8obj-$(CONFIG_SH_TIMER_TMU) += sh_tmu.o
9obj-$(CONFIG_EM_TIMER_STI) += em_sti.o
9obj-$(CONFIG_CLKBLD_I8253) += i8253.o 10obj-$(CONFIG_CLKBLD_I8253) += i8253.o
10obj-$(CONFIG_CLKSRC_MMIO) += mmio.o 11obj-$(CONFIG_CLKSRC_MMIO) += mmio.o
11obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o 12obj-$(CONFIG_DW_APB_TIMER) += dw_apb_timer.o
diff --git a/drivers/clocksource/em_sti.c b/drivers/clocksource/em_sti.c
new file mode 100644
index 000000000000..372051d1bba8
--- /dev/null
+++ b/drivers/clocksource/em_sti.c
@@ -0,0 +1,406 @@
1/*
2 * Emma Mobile Timer Support - STI
3 *
4 * Copyright (C) 2012 Magnus Damm
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include <linux/init.h>
21#include <linux/platform_device.h>
22#include <linux/spinlock.h>
23#include <linux/interrupt.h>
24#include <linux/ioport.h>
25#include <linux/io.h>
26#include <linux/clk.h>
27#include <linux/irq.h>
28#include <linux/err.h>
29#include <linux/delay.h>
30#include <linux/clocksource.h>
31#include <linux/clockchips.h>
32#include <linux/slab.h>
33#include <linux/module.h>
34
35enum { USER_CLOCKSOURCE, USER_CLOCKEVENT, USER_NR };
36
37struct em_sti_priv {
38 void __iomem *base;
39 struct clk *clk;
40 struct platform_device *pdev;
41 unsigned int active[USER_NR];
42 unsigned long rate;
43 raw_spinlock_t lock;
44 struct clock_event_device ced;
45 struct clocksource cs;
46};
47
48#define STI_CONTROL 0x00
49#define STI_COMPA_H 0x10
50#define STI_COMPA_L 0x14
51#define STI_COMPB_H 0x18
52#define STI_COMPB_L 0x1c
53#define STI_COUNT_H 0x20
54#define STI_COUNT_L 0x24
55#define STI_COUNT_RAW_H 0x28
56#define STI_COUNT_RAW_L 0x2c
57#define STI_SET_H 0x30
58#define STI_SET_L 0x34
59#define STI_INTSTATUS 0x40
60#define STI_INTRAWSTATUS 0x44
61#define STI_INTENSET 0x48
62#define STI_INTENCLR 0x4c
63#define STI_INTFFCLR 0x50
64
65static inline unsigned long em_sti_read(struct em_sti_priv *p, int offs)
66{
67 return ioread32(p->base + offs);
68}
69
70static inline void em_sti_write(struct em_sti_priv *p, int offs,
71 unsigned long value)
72{
73 iowrite32(value, p->base + offs);
74}
75
76static int em_sti_enable(struct em_sti_priv *p)
77{
78 int ret;
79
80 /* enable clock */
81 ret = clk_enable(p->clk);
82 if (ret) {
83 dev_err(&p->pdev->dev, "cannot enable clock\n");
84 return ret;
85 }
86
87 /* configure channel, periodic mode and maximum timeout */
88 p->rate = clk_get_rate(p->clk);
89
90 /* reset the counter */
91 em_sti_write(p, STI_SET_H, 0x40000000);
92 em_sti_write(p, STI_SET_L, 0x00000000);
93
94 /* mask and clear pending interrupts */
95 em_sti_write(p, STI_INTENCLR, 3);
96 em_sti_write(p, STI_INTFFCLR, 3);
97
98 /* enable updates of counter registers */
99 em_sti_write(p, STI_CONTROL, 1);
100
101 return 0;
102}
103
104static void em_sti_disable(struct em_sti_priv *p)
105{
106 /* mask interrupts */
107 em_sti_write(p, STI_INTENCLR, 3);
108
109 /* stop clock */
110 clk_disable(p->clk);
111}
112
113static cycle_t em_sti_count(struct em_sti_priv *p)
114{
115 cycle_t ticks;
116 unsigned long flags;
117
118 /* the STI hardware buffers the 48-bit count, but to
119 * break it out into two 32-bit access the registers
120 * must be accessed in a certain order.
121 * Always read STI_COUNT_H before STI_COUNT_L.
122 */
123 raw_spin_lock_irqsave(&p->lock, flags);
124 ticks = (cycle_t)(em_sti_read(p, STI_COUNT_H) & 0xffff) << 32;
125 ticks |= em_sti_read(p, STI_COUNT_L);
126 raw_spin_unlock_irqrestore(&p->lock, flags);
127
128 return ticks;
129}
130
131static cycle_t em_sti_set_next(struct em_sti_priv *p, cycle_t next)
132{
133 unsigned long flags;
134
135 raw_spin_lock_irqsave(&p->lock, flags);
136
137 /* mask compare A interrupt */
138 em_sti_write(p, STI_INTENCLR, 1);
139
140 /* update compare A value */
141 em_sti_write(p, STI_COMPA_H, next >> 32);
142 em_sti_write(p, STI_COMPA_L, next & 0xffffffff);
143
144 /* clear compare A interrupt source */
145 em_sti_write(p, STI_INTFFCLR, 1);
146
147 /* unmask compare A interrupt */
148 em_sti_write(p, STI_INTENSET, 1);
149
150 raw_spin_unlock_irqrestore(&p->lock, flags);
151
152 return next;
153}
154
155static irqreturn_t em_sti_interrupt(int irq, void *dev_id)
156{
157 struct em_sti_priv *p = dev_id;
158
159 p->ced.event_handler(&p->ced);
160 return IRQ_HANDLED;
161}
162
163static int em_sti_start(struct em_sti_priv *p, unsigned int user)
164{
165 unsigned long flags;
166 int used_before;
167 int ret = 0;
168
169 raw_spin_lock_irqsave(&p->lock, flags);
170 used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
171 if (!used_before)
172 ret = em_sti_enable(p);
173
174 if (!ret)
175 p->active[user] = 1;
176 raw_spin_unlock_irqrestore(&p->lock, flags);
177
178 return ret;
179}
180
181static void em_sti_stop(struct em_sti_priv *p, unsigned int user)
182{
183 unsigned long flags;
184 int used_before, used_after;
185
186 raw_spin_lock_irqsave(&p->lock, flags);
187 used_before = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
188 p->active[user] = 0;
189 used_after = p->active[USER_CLOCKSOURCE] | p->active[USER_CLOCKEVENT];
190
191 if (used_before && !used_after)
192 em_sti_disable(p);
193 raw_spin_unlock_irqrestore(&p->lock, flags);
194}
195
196static struct em_sti_priv *cs_to_em_sti(struct clocksource *cs)
197{
198 return container_of(cs, struct em_sti_priv, cs);
199}
200
201static cycle_t em_sti_clocksource_read(struct clocksource *cs)
202{
203 return em_sti_count(cs_to_em_sti(cs));
204}
205
206static int em_sti_clocksource_enable(struct clocksource *cs)
207{
208 int ret;
209 struct em_sti_priv *p = cs_to_em_sti(cs);
210
211 ret = em_sti_start(p, USER_CLOCKSOURCE);
212 if (!ret)
213 __clocksource_updatefreq_hz(cs, p->rate);
214 return ret;
215}
216
217static void em_sti_clocksource_disable(struct clocksource *cs)
218{
219 em_sti_stop(cs_to_em_sti(cs), USER_CLOCKSOURCE);
220}
221
222static void em_sti_clocksource_resume(struct clocksource *cs)
223{
224 em_sti_clocksource_enable(cs);
225}
226
227static int em_sti_register_clocksource(struct em_sti_priv *p)
228{
229 struct clocksource *cs = &p->cs;
230
231 memset(cs, 0, sizeof(*cs));
232 cs->name = dev_name(&p->pdev->dev);
233 cs->rating = 200;
234 cs->read = em_sti_clocksource_read;
235 cs->enable = em_sti_clocksource_enable;
236 cs->disable = em_sti_clocksource_disable;
237 cs->suspend = em_sti_clocksource_disable;
238 cs->resume = em_sti_clocksource_resume;
239 cs->mask = CLOCKSOURCE_MASK(48);
240 cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
241
242 dev_info(&p->pdev->dev, "used as clock source\n");
243
244 /* Register with dummy 1 Hz value, gets updated in ->enable() */
245 clocksource_register_hz(cs, 1);
246 return 0;
247}
248
249static struct em_sti_priv *ced_to_em_sti(struct clock_event_device *ced)
250{
251 return container_of(ced, struct em_sti_priv, ced);
252}
253
254static void em_sti_clock_event_mode(enum clock_event_mode mode,
255 struct clock_event_device *ced)
256{
257 struct em_sti_priv *p = ced_to_em_sti(ced);
258
259 /* deal with old setting first */
260 switch (ced->mode) {
261 case CLOCK_EVT_MODE_ONESHOT:
262 em_sti_stop(p, USER_CLOCKEVENT);
263 break;
264 default:
265 break;
266 }
267
268 switch (mode) {
269 case CLOCK_EVT_MODE_ONESHOT:
270 dev_info(&p->pdev->dev, "used for oneshot clock events\n");
271 em_sti_start(p, USER_CLOCKEVENT);
272 clockevents_config(&p->ced, p->rate);
273 break;
274 case CLOCK_EVT_MODE_SHUTDOWN:
275 case CLOCK_EVT_MODE_UNUSED:
276 em_sti_stop(p, USER_CLOCKEVENT);
277 break;
278 default:
279 break;
280 }
281}
282
283static int em_sti_clock_event_next(unsigned long delta,
284 struct clock_event_device *ced)
285{
286 struct em_sti_priv *p = ced_to_em_sti(ced);
287 cycle_t next;
288 int safe;
289
290 next = em_sti_set_next(p, em_sti_count(p) + delta);
291 safe = em_sti_count(p) < (next - 1);
292
293 return !safe;
294}
295
296static void em_sti_register_clockevent(struct em_sti_priv *p)
297{
298 struct clock_event_device *ced = &p->ced;
299
300 memset(ced, 0, sizeof(*ced));
301 ced->name = dev_name(&p->pdev->dev);
302 ced->features = CLOCK_EVT_FEAT_ONESHOT;
303 ced->rating = 200;
304 ced->cpumask = cpumask_of(0);
305 ced->set_next_event = em_sti_clock_event_next;
306 ced->set_mode = em_sti_clock_event_mode;
307
308 dev_info(&p->pdev->dev, "used for clock events\n");
309
310 /* Register with dummy 1 Hz value, gets updated in ->set_mode() */
311 clockevents_config_and_register(ced, 1, 2, 0xffffffff);
312}
313
314static int __devinit em_sti_probe(struct platform_device *pdev)
315{
316 struct em_sti_priv *p;
317 struct resource *res;
318 int irq, ret;
319
320 p = kzalloc(sizeof(*p), GFP_KERNEL);
321 if (p == NULL) {
322 dev_err(&pdev->dev, "failed to allocate driver data\n");
323 ret = -ENOMEM;
324 goto err0;
325 }
326
327 p->pdev = pdev;
328 platform_set_drvdata(pdev, p);
329
330 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
331 if (!res) {
332 dev_err(&pdev->dev, "failed to get I/O memory\n");
333 ret = -EINVAL;
334 goto err0;
335 }
336
337 irq = platform_get_irq(pdev, 0);
338 if (irq < 0) {
339 dev_err(&pdev->dev, "failed to get irq\n");
340 ret = -EINVAL;
341 goto err0;
342 }
343
344 /* map memory, let base point to the STI instance */
345 p->base = ioremap_nocache(res->start, resource_size(res));
346 if (p->base == NULL) {
347 dev_err(&pdev->dev, "failed to remap I/O memory\n");
348 ret = -ENXIO;
349 goto err0;
350 }
351
352 /* get hold of clock */
353 p->clk = clk_get(&pdev->dev, "sclk");
354 if (IS_ERR(p->clk)) {
355 dev_err(&pdev->dev, "cannot get clock\n");
356 ret = PTR_ERR(p->clk);
357 goto err1;
358 }
359
360 if (request_irq(irq, em_sti_interrupt,
361 IRQF_TIMER | IRQF_IRQPOLL | IRQF_NOBALANCING,
362 dev_name(&pdev->dev), p)) {
363 dev_err(&pdev->dev, "failed to request low IRQ\n");
364 ret = -ENOENT;
365 goto err2;
366 }
367
368 raw_spin_lock_init(&p->lock);
369 em_sti_register_clockevent(p);
370 em_sti_register_clocksource(p);
371 return 0;
372
373err2:
374 clk_put(p->clk);
375err1:
376 iounmap(p->base);
377err0:
378 kfree(p);
379 return ret;
380}
381
382static int __devexit em_sti_remove(struct platform_device *pdev)
383{
384 return -EBUSY; /* cannot unregister clockevent and clocksource */
385}
386
387static const struct of_device_id em_sti_dt_ids[] __devinitconst = {
388 { .compatible = "renesas,em-sti", },
389 {},
390};
391MODULE_DEVICE_TABLE(of, em_sti_dt_ids);
392
393static struct platform_driver em_sti_device_driver = {
394 .probe = em_sti_probe,
395 .remove = __devexit_p(em_sti_remove),
396 .driver = {
397 .name = "em_sti",
398 .of_match_table = em_sti_dt_ids,
399 }
400};
401
402module_platform_driver(em_sti_device_driver);
403
404MODULE_AUTHOR("Magnus Damm");
405MODULE_DESCRIPTION("Renesas Emma Mobile STI Timer Driver");
406MODULE_LICENSE("GPL v2");
diff --git a/drivers/clocksource/sh_cmt.c b/drivers/clocksource/sh_cmt.c
index 32fe9ef5cc5c..98b06baafcc6 100644
--- a/drivers/clocksource/sh_cmt.c
+++ b/drivers/clocksource/sh_cmt.c
@@ -48,13 +48,13 @@ struct sh_cmt_priv {
48 unsigned long next_match_value; 48 unsigned long next_match_value;
49 unsigned long max_match_value; 49 unsigned long max_match_value;
50 unsigned long rate; 50 unsigned long rate;
51 spinlock_t lock; 51 raw_spinlock_t lock;
52 struct clock_event_device ced; 52 struct clock_event_device ced;
53 struct clocksource cs; 53 struct clocksource cs;
54 unsigned long total_cycles; 54 unsigned long total_cycles;
55}; 55};
56 56
57static DEFINE_SPINLOCK(sh_cmt_lock); 57static DEFINE_RAW_SPINLOCK(sh_cmt_lock);
58 58
59#define CMSTR -1 /* shared register */ 59#define CMSTR -1 /* shared register */
60#define CMCSR 0 /* channel register */ 60#define CMCSR 0 /* channel register */
@@ -139,7 +139,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
139 unsigned long flags, value; 139 unsigned long flags, value;
140 140
141 /* start stop register shared by multiple timer channels */ 141 /* start stop register shared by multiple timer channels */
142 spin_lock_irqsave(&sh_cmt_lock, flags); 142 raw_spin_lock_irqsave(&sh_cmt_lock, flags);
143 value = sh_cmt_read(p, CMSTR); 143 value = sh_cmt_read(p, CMSTR);
144 144
145 if (start) 145 if (start)
@@ -148,7 +148,7 @@ static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start)
148 value &= ~(1 << cfg->timer_bit); 148 value &= ~(1 << cfg->timer_bit);
149 149
150 sh_cmt_write(p, CMSTR, value); 150 sh_cmt_write(p, CMSTR, value);
151 spin_unlock_irqrestore(&sh_cmt_lock, flags); 151 raw_spin_unlock_irqrestore(&sh_cmt_lock, flags);
152} 152}
153 153
154static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) 154static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate)
@@ -328,9 +328,9 @@ static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta)
328{ 328{
329 unsigned long flags; 329 unsigned long flags;
330 330
331 spin_lock_irqsave(&p->lock, flags); 331 raw_spin_lock_irqsave(&p->lock, flags);
332 __sh_cmt_set_next(p, delta); 332 __sh_cmt_set_next(p, delta);
333 spin_unlock_irqrestore(&p->lock, flags); 333 raw_spin_unlock_irqrestore(&p->lock, flags);
334} 334}
335 335
336static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) 336static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id)
@@ -385,7 +385,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
385 int ret = 0; 385 int ret = 0;
386 unsigned long flags; 386 unsigned long flags;
387 387
388 spin_lock_irqsave(&p->lock, flags); 388 raw_spin_lock_irqsave(&p->lock, flags);
389 389
390 if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) 390 if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE)))
391 ret = sh_cmt_enable(p, &p->rate); 391 ret = sh_cmt_enable(p, &p->rate);
@@ -398,7 +398,7 @@ static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag)
398 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) 398 if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT)))
399 __sh_cmt_set_next(p, p->max_match_value); 399 __sh_cmt_set_next(p, p->max_match_value);
400 out: 400 out:
401 spin_unlock_irqrestore(&p->lock, flags); 401 raw_spin_unlock_irqrestore(&p->lock, flags);
402 402
403 return ret; 403 return ret;
404} 404}
@@ -408,7 +408,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
408 unsigned long flags; 408 unsigned long flags;
409 unsigned long f; 409 unsigned long f;
410 410
411 spin_lock_irqsave(&p->lock, flags); 411 raw_spin_lock_irqsave(&p->lock, flags);
412 412
413 f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); 413 f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE);
414 p->flags &= ~flag; 414 p->flags &= ~flag;
@@ -420,7 +420,7 @@ static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag)
420 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) 420 if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE))
421 __sh_cmt_set_next(p, p->max_match_value); 421 __sh_cmt_set_next(p, p->max_match_value);
422 422
423 spin_unlock_irqrestore(&p->lock, flags); 423 raw_spin_unlock_irqrestore(&p->lock, flags);
424} 424}
425 425
426static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) 426static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs)
@@ -435,13 +435,13 @@ static cycle_t sh_cmt_clocksource_read(struct clocksource *cs)
435 unsigned long value; 435 unsigned long value;
436 int has_wrapped; 436 int has_wrapped;
437 437
438 spin_lock_irqsave(&p->lock, flags); 438 raw_spin_lock_irqsave(&p->lock, flags);
439 value = p->total_cycles; 439 value = p->total_cycles;
440 raw = sh_cmt_get_counter(p, &has_wrapped); 440 raw = sh_cmt_get_counter(p, &has_wrapped);
441 441
442 if (unlikely(has_wrapped)) 442 if (unlikely(has_wrapped))
443 raw += p->match_value + 1; 443 raw += p->match_value + 1;
444 spin_unlock_irqrestore(&p->lock, flags); 444 raw_spin_unlock_irqrestore(&p->lock, flags);
445 445
446 return value + raw; 446 return value + raw;
447} 447}
@@ -591,7 +591,7 @@ static int sh_cmt_register(struct sh_cmt_priv *p, char *name,
591 p->max_match_value = (1 << p->width) - 1; 591 p->max_match_value = (1 << p->width) - 1;
592 592
593 p->match_value = p->max_match_value; 593 p->match_value = p->max_match_value;
594 spin_lock_init(&p->lock); 594 raw_spin_lock_init(&p->lock);
595 595
596 if (clockevent_rating) 596 if (clockevent_rating)
597 sh_cmt_register_clockevent(p, name, clockevent_rating); 597 sh_cmt_register_clockevent(p, name, clockevent_rating);
diff --git a/drivers/clocksource/sh_mtu2.c b/drivers/clocksource/sh_mtu2.c
index a2172f690418..d9b76ca64a61 100644
--- a/drivers/clocksource/sh_mtu2.c
+++ b/drivers/clocksource/sh_mtu2.c
@@ -43,7 +43,7 @@ struct sh_mtu2_priv {
43 struct clock_event_device ced; 43 struct clock_event_device ced;
44}; 44};
45 45
46static DEFINE_SPINLOCK(sh_mtu2_lock); 46static DEFINE_RAW_SPINLOCK(sh_mtu2_lock);
47 47
48#define TSTR -1 /* shared register */ 48#define TSTR -1 /* shared register */
49#define TCR 0 /* channel register */ 49#define TCR 0 /* channel register */
@@ -107,7 +107,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
107 unsigned long flags, value; 107 unsigned long flags, value;
108 108
109 /* start stop register shared by multiple timer channels */ 109 /* start stop register shared by multiple timer channels */
110 spin_lock_irqsave(&sh_mtu2_lock, flags); 110 raw_spin_lock_irqsave(&sh_mtu2_lock, flags);
111 value = sh_mtu2_read(p, TSTR); 111 value = sh_mtu2_read(p, TSTR);
112 112
113 if (start) 113 if (start)
@@ -116,7 +116,7 @@ static void sh_mtu2_start_stop_ch(struct sh_mtu2_priv *p, int start)
116 value &= ~(1 << cfg->timer_bit); 116 value &= ~(1 << cfg->timer_bit);
117 117
118 sh_mtu2_write(p, TSTR, value); 118 sh_mtu2_write(p, TSTR, value);
119 spin_unlock_irqrestore(&sh_mtu2_lock, flags); 119 raw_spin_unlock_irqrestore(&sh_mtu2_lock, flags);
120} 120}
121 121
122static int sh_mtu2_enable(struct sh_mtu2_priv *p) 122static int sh_mtu2_enable(struct sh_mtu2_priv *p)
diff --git a/drivers/clocksource/sh_tmu.c b/drivers/clocksource/sh_tmu.c
index 97f54b634be4..c1b51d49d106 100644
--- a/drivers/clocksource/sh_tmu.c
+++ b/drivers/clocksource/sh_tmu.c
@@ -45,7 +45,7 @@ struct sh_tmu_priv {
45 struct clocksource cs; 45 struct clocksource cs;
46}; 46};
47 47
48static DEFINE_SPINLOCK(sh_tmu_lock); 48static DEFINE_RAW_SPINLOCK(sh_tmu_lock);
49 49
50#define TSTR -1 /* shared register */ 50#define TSTR -1 /* shared register */
51#define TCOR 0 /* channel register */ 51#define TCOR 0 /* channel register */
@@ -95,7 +95,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
95 unsigned long flags, value; 95 unsigned long flags, value;
96 96
97 /* start stop register shared by multiple timer channels */ 97 /* start stop register shared by multiple timer channels */
98 spin_lock_irqsave(&sh_tmu_lock, flags); 98 raw_spin_lock_irqsave(&sh_tmu_lock, flags);
99 value = sh_tmu_read(p, TSTR); 99 value = sh_tmu_read(p, TSTR);
100 100
101 if (start) 101 if (start)
@@ -104,7 +104,7 @@ static void sh_tmu_start_stop_ch(struct sh_tmu_priv *p, int start)
104 value &= ~(1 << cfg->timer_bit); 104 value &= ~(1 << cfg->timer_bit);
105 105
106 sh_tmu_write(p, TSTR, value); 106 sh_tmu_write(p, TSTR, value);
107 spin_unlock_irqrestore(&sh_tmu_lock, flags); 107 raw_spin_unlock_irqrestore(&sh_tmu_lock, flags);
108} 108}
109 109
110static int sh_tmu_enable(struct sh_tmu_priv *p) 110static int sh_tmu_enable(struct sh_tmu_priv *p)
@@ -245,12 +245,7 @@ static void sh_tmu_clock_event_start(struct sh_tmu_priv *p, int periodic)
245 245
246 sh_tmu_enable(p); 246 sh_tmu_enable(p);
247 247
248 /* TODO: calculate good shift from rate and counter bit width */ 248 clockevents_config(ced, p->rate);
249
250 ced->shift = 32;
251 ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift);
252 ced->max_delta_ns = clockevent_delta2ns(0xffffffff, ced);
253 ced->min_delta_ns = 5000;
254 249
255 if (periodic) { 250 if (periodic) {
256 p->periodic = (p->rate + HZ/2) / HZ; 251 p->periodic = (p->rate + HZ/2) / HZ;
@@ -323,7 +318,8 @@ static void sh_tmu_register_clockevent(struct sh_tmu_priv *p,
323 ced->set_mode = sh_tmu_clock_event_mode; 318 ced->set_mode = sh_tmu_clock_event_mode;
324 319
325 dev_info(&p->pdev->dev, "used for clock events\n"); 320 dev_info(&p->pdev->dev, "used for clock events\n");
326 clockevents_register_device(ced); 321
322 clockevents_config_and_register(ced, 1, 0x300, 0xffffffff);
327 323
328 ret = setup_irq(p->irqaction.irq, &p->irqaction); 324 ret = setup_irq(p->irqaction.irq, &p->irqaction);
329 if (ret) { 325 if (ret) {
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index e23dc82d43ac..721296157577 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -1626,4 +1626,4 @@ module_exit(dw_exit);
1626MODULE_LICENSE("GPL v2"); 1626MODULE_LICENSE("GPL v2");
1627MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver"); 1627MODULE_DESCRIPTION("Synopsys DesignWare DMA Controller driver");
1628MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); 1628MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
1629MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 1629MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index fb4f4990f5eb..1dc2a4ad0026 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -815,8 +815,6 @@ static int sdma_request_channel(struct sdma_channel *sdmac)
815 815
816 init_completion(&sdmac->done); 816 init_completion(&sdmac->done);
817 817
818 sdmac->buf_tail = 0;
819
820 return 0; 818 return 0;
821out: 819out:
822 820
@@ -927,6 +925,8 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
927 925
928 sdmac->flags = 0; 926 sdmac->flags = 0;
929 927
928 sdmac->buf_tail = 0;
929
930 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n", 930 dev_dbg(sdma->dev, "setting up %d entries for channel %d.\n",
931 sg_len, channel); 931 sg_len, channel);
932 932
@@ -1027,6 +1027,8 @@ static struct dma_async_tx_descriptor *sdma_prep_dma_cyclic(
1027 1027
1028 sdmac->status = DMA_IN_PROGRESS; 1028 sdmac->status = DMA_IN_PROGRESS;
1029 1029
1030 sdmac->buf_tail = 0;
1031
1030 sdmac->flags |= IMX_DMA_SG_LOOP; 1032 sdmac->flags |= IMX_DMA_SG_LOOP;
1031 sdmac->direction = direction; 1033 sdmac->direction = direction;
1032 ret = sdma_load_context(sdmac); 1034 ret = sdma_load_context(sdmac);
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index cbcc28e79be6..e4feba6b03c0 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -392,6 +392,8 @@ struct pl330_req {
392 struct pl330_reqcfg *cfg; 392 struct pl330_reqcfg *cfg;
393 /* Pointer to first xfer in the request. */ 393 /* Pointer to first xfer in the request. */
394 struct pl330_xfer *x; 394 struct pl330_xfer *x;
395 /* Hook to attach to DMAC's list of reqs with due callback */
396 struct list_head rqd;
395}; 397};
396 398
397/* 399/*
@@ -461,8 +463,6 @@ struct _pl330_req {
461 /* Number of bytes taken to setup MC for the req */ 463 /* Number of bytes taken to setup MC for the req */
462 u32 mc_len; 464 u32 mc_len;
463 struct pl330_req *r; 465 struct pl330_req *r;
464 /* Hook to attach to DMAC's list of reqs with due callback */
465 struct list_head rqd;
466}; 466};
467 467
468/* ToBeDone for tasklet */ 468/* ToBeDone for tasklet */
@@ -1683,7 +1683,7 @@ static void pl330_dotask(unsigned long data)
1683/* Returns 1 if state was updated, 0 otherwise */ 1683/* Returns 1 if state was updated, 0 otherwise */
1684static int pl330_update(const struct pl330_info *pi) 1684static int pl330_update(const struct pl330_info *pi)
1685{ 1685{
1686 struct _pl330_req *rqdone; 1686 struct pl330_req *rqdone, *tmp;
1687 struct pl330_dmac *pl330; 1687 struct pl330_dmac *pl330;
1688 unsigned long flags; 1688 unsigned long flags;
1689 void __iomem *regs; 1689 void __iomem *regs;
@@ -1750,7 +1750,10 @@ static int pl330_update(const struct pl330_info *pi)
1750 if (active == -1) /* Aborted */ 1750 if (active == -1) /* Aborted */
1751 continue; 1751 continue;
1752 1752
1753 rqdone = &thrd->req[active]; 1753 /* Detach the req */
1754 rqdone = thrd->req[active].r;
1755 thrd->req[active].r = NULL;
1756
1754 mark_free(thrd, active); 1757 mark_free(thrd, active);
1755 1758
1756 /* Get going again ASAP */ 1759 /* Get going again ASAP */
@@ -1762,20 +1765,11 @@ static int pl330_update(const struct pl330_info *pi)
1762 } 1765 }
1763 1766
1764 /* Now that we are in no hurry, do the callbacks */ 1767 /* Now that we are in no hurry, do the callbacks */
1765 while (!list_empty(&pl330->req_done)) { 1768 list_for_each_entry_safe(rqdone, tmp, &pl330->req_done, rqd) {
1766 struct pl330_req *r; 1769 list_del(&rqdone->rqd);
1767
1768 rqdone = container_of(pl330->req_done.next,
1769 struct _pl330_req, rqd);
1770
1771 list_del_init(&rqdone->rqd);
1772
1773 /* Detach the req */
1774 r = rqdone->r;
1775 rqdone->r = NULL;
1776 1770
1777 spin_unlock_irqrestore(&pl330->lock, flags); 1771 spin_unlock_irqrestore(&pl330->lock, flags);
1778 _callback(r, PL330_ERR_NONE); 1772 _callback(rqdone, PL330_ERR_NONE);
1779 spin_lock_irqsave(&pl330->lock, flags); 1773 spin_lock_irqsave(&pl330->lock, flags);
1780 } 1774 }
1781 1775
@@ -2321,7 +2315,7 @@ static void pl330_tasklet(unsigned long data)
2321 /* Pick up ripe tomatoes */ 2315 /* Pick up ripe tomatoes */
2322 list_for_each_entry_safe(desc, _dt, &pch->work_list, node) 2316 list_for_each_entry_safe(desc, _dt, &pch->work_list, node)
2323 if (desc->status == DONE) { 2317 if (desc->status == DONE) {
2324 if (pch->cyclic) 2318 if (!pch->cyclic)
2325 dma_cookie_complete(&desc->txd); 2319 dma_cookie_complete(&desc->txd);
2326 list_move_tail(&desc->node, &list); 2320 list_move_tail(&desc->node, &list);
2327 } 2321 }
@@ -2539,7 +2533,7 @@ static inline void _init_desc(struct dma_pl330_desc *desc)
2539} 2533}
2540 2534
2541/* Returns the number of descriptors added to the DMAC pool */ 2535/* Returns the number of descriptors added to the DMAC pool */
2542int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count) 2536static int add_desc(struct dma_pl330_dmac *pdmac, gfp_t flg, int count)
2543{ 2537{
2544 struct dma_pl330_desc *desc; 2538 struct dma_pl330_desc *desc;
2545 unsigned long flags; 2539 unsigned long flags;
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index 10f375032e96..de5ba86e8b89 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -164,7 +164,7 @@ void *edac_align_ptr(void **p, unsigned size, int n_elems)
164 else 164 else
165 return (char *)ptr; 165 return (char *)ptr;
166 166
167 r = size % align; 167 r = (unsigned long)p % align;
168 168
169 if (r == 0) 169 if (r == 0)
170 return (char *)ptr; 170 return (char *)ptr;
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index d27778f65a5d..a499c7ed820a 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1814,12 +1814,6 @@ static int i7core_mce_check_error(struct notifier_block *nb, unsigned long val,
1814 if (mce->bank != 8) 1814 if (mce->bank != 8)
1815 return NOTIFY_DONE; 1815 return NOTIFY_DONE;
1816 1816
1817#ifdef CONFIG_SMP
1818 /* Only handle if it is the right mc controller */
1819 if (mce->socketid != pvt->i7core_dev->socket)
1820 return NOTIFY_DONE;
1821#endif
1822
1823 smp_rmb(); 1817 smp_rmb();
1824 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) { 1818 if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
1825 smp_wmb(); 1819 smp_wmb();
@@ -2116,8 +2110,6 @@ static void i7core_unregister_mci(struct i7core_dev *i7core_dev)
2116 if (pvt->enable_scrub) 2110 if (pvt->enable_scrub)
2117 disable_sdram_scrub_setting(mci); 2111 disable_sdram_scrub_setting(mci);
2118 2112
2119 mce_unregister_decode_chain(&i7_mce_dec);
2120
2121 /* Disable EDAC polling */ 2113 /* Disable EDAC polling */
2122 i7core_pci_ctl_release(pvt); 2114 i7core_pci_ctl_release(pvt);
2123 2115
@@ -2222,8 +2214,6 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2222 /* DCLK for scrub rate setting */ 2214 /* DCLK for scrub rate setting */
2223 pvt->dclk_freq = get_dclk_freq(); 2215 pvt->dclk_freq = get_dclk_freq();
2224 2216
2225 mce_register_decode_chain(&i7_mce_dec);
2226
2227 return 0; 2217 return 0;
2228 2218
2229fail0: 2219fail0:
@@ -2367,8 +2357,10 @@ static int __init i7core_init(void)
2367 2357
2368 pci_rc = pci_register_driver(&i7core_driver); 2358 pci_rc = pci_register_driver(&i7core_driver);
2369 2359
2370 if (pci_rc >= 0) 2360 if (pci_rc >= 0) {
2361 mce_register_decode_chain(&i7_mce_dec);
2371 return 0; 2362 return 0;
2363 }
2372 2364
2373 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n", 2365 i7core_printk(KERN_ERR, "Failed to register device with error %d.\n",
2374 pci_rc); 2366 pci_rc);
@@ -2384,6 +2376,7 @@ static void __exit i7core_exit(void)
2384{ 2376{
2385 debugf2("MC: " __FILE__ ": %s()\n", __func__); 2377 debugf2("MC: " __FILE__ ": %s()\n", __func__);
2386 pci_unregister_driver(&i7core_driver); 2378 pci_unregister_driver(&i7core_driver);
2379 mce_unregister_decode_chain(&i7_mce_dec);
2387} 2380}
2388 2381
2389module_init(i7core_init); 2382module_init(i7core_init);
diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
index c6074c5cd1ef..8c87a5e87057 100644
--- a/drivers/edac/mce_amd.h
+++ b/drivers/edac/mce_amd.h
@@ -5,8 +5,6 @@
5 5
6#include <asm/mce.h> 6#include <asm/mce.h>
7 7
8#define BIT_64(n) (U64_C(1) << (n))
9
10#define EC(x) ((x) & 0xffff) 8#define EC(x) ((x) & 0xffff)
11#define XEC(x, mask) (((x) >> 16) & mask) 9#define XEC(x, mask) (((x) >> 16) & mask)
12 10
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 4c402353ba98..0e374625f6f8 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -980,7 +980,8 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
980 layers[1].type = EDAC_MC_LAYER_CHANNEL; 980 layers[1].type = EDAC_MC_LAYER_CHANNEL;
981 layers[1].size = 1; 981 layers[1].size = 1;
982 layers[1].is_virt_csrow = false; 982 layers[1].is_virt_csrow = false;
983 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata)); 983 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
984 sizeof(*pdata));
984 if (!mci) { 985 if (!mci) {
985 devres_release_group(&op->dev, mpc85xx_mc_err_probe); 986 devres_release_group(&op->dev, mpc85xx_mc_err_probe);
986 return -ENOMEM; 987 return -ENOMEM;
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 4adaf4b7da99..36ad17e79d61 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -555,7 +555,7 @@ static int get_dimm_config(struct mem_ctl_info *mci)
555 pvt->is_close_pg = false; 555 pvt->is_close_pg = false;
556 } 556 }
557 557
558 pci_read_config_dword(pvt->pci_ta, RANK_CFG_A, &reg); 558 pci_read_config_dword(pvt->pci_ddrio, RANK_CFG_A, &reg);
559 if (IS_RDIMM_ENABLED(reg)) { 559 if (IS_RDIMM_ENABLED(reg)) {
560 /* FIXME: Can also be LRDIMM */ 560 /* FIXME: Can also be LRDIMM */
561 debugf0("Memory is registered\n"); 561 debugf0("Memory is registered\n");
@@ -1604,8 +1604,6 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
1604 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n", 1604 debugf0("MC: " __FILE__ ": %s(): mci = %p, dev = %p\n",
1605 __func__, mci, &sbridge_dev->pdev[0]->dev); 1605 __func__, mci, &sbridge_dev->pdev[0]->dev);
1606 1606
1607 mce_unregister_decode_chain(&sbridge_mce_dec);
1608
1609 /* Remove MC sysfs nodes */ 1607 /* Remove MC sysfs nodes */
1610 edac_mc_del_mc(mci->dev); 1608 edac_mc_del_mc(mci->dev);
1611 1609
@@ -1682,7 +1680,6 @@ static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
1682 goto fail0; 1680 goto fail0;
1683 } 1681 }
1684 1682
1685 mce_register_decode_chain(&sbridge_mce_dec);
1686 return 0; 1683 return 0;
1687 1684
1688fail0: 1685fail0:
@@ -1811,8 +1808,10 @@ static int __init sbridge_init(void)
1811 1808
1812 pci_rc = pci_register_driver(&sbridge_driver); 1809 pci_rc = pci_register_driver(&sbridge_driver);
1813 1810
1814 if (pci_rc >= 0) 1811 if (pci_rc >= 0) {
1812 mce_register_decode_chain(&sbridge_mce_dec);
1815 return 0; 1813 return 0;
1814 }
1816 1815
1817 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n", 1816 sbridge_printk(KERN_ERR, "Failed to register device with error %d.\n",
1818 pci_rc); 1817 pci_rc);
@@ -1828,6 +1827,7 @@ static void __exit sbridge_exit(void)
1828{ 1827{
1829 debugf2("MC: " __FILE__ ": %s()\n", __func__); 1828 debugf2("MC: " __FILE__ ": %s()\n", __func__);
1830 pci_unregister_driver(&sbridge_driver); 1829 pci_unregister_driver(&sbridge_driver);
1830 mce_unregister_decode_chain(&sbridge_mce_dec);
1831} 1831}
1832 1832
1833module_init(sbridge_init); 1833module_init(sbridge_init);
diff --git a/drivers/extcon/extcon-max8997.c b/drivers/extcon/extcon-max8997.c
index 23416e443765..a4ed30bd9a41 100644
--- a/drivers/extcon/extcon-max8997.c
+++ b/drivers/extcon/extcon-max8997.c
@@ -116,8 +116,8 @@ const char *max8997_extcon_cable[] = {
116 [5] = "Charge-downstream", 116 [5] = "Charge-downstream",
117 [6] = "MHL", 117 [6] = "MHL",
118 [7] = "Dock-desk", 118 [7] = "Dock-desk",
119 [7] = "Dock-card", 119 [8] = "Dock-card",
120 [8] = "JIG", 120 [9] = "JIG",
121 121
122 NULL, 122 NULL,
123}; 123};
@@ -514,6 +514,7 @@ static int __devexit max8997_muic_remove(struct platform_device *pdev)
514 514
515 extcon_dev_unregister(info->edev); 515 extcon_dev_unregister(info->edev);
516 516
517 kfree(info->edev);
517 kfree(info); 518 kfree(info);
518 519
519 return 0; 520 return 0;
diff --git a/drivers/extcon/extcon_class.c b/drivers/extcon/extcon_class.c
index f598a700ec15..159aeb07b3ba 100644
--- a/drivers/extcon/extcon_class.c
+++ b/drivers/extcon/extcon_class.c
@@ -762,7 +762,7 @@ int extcon_dev_register(struct extcon_dev *edev, struct device *dev)
762#if defined(CONFIG_ANDROID) 762#if defined(CONFIG_ANDROID)
763 if (switch_class) 763 if (switch_class)
764 ret = class_compat_create_link(switch_class, edev->dev, 764 ret = class_compat_create_link(switch_class, edev->dev,
765 dev); 765 NULL);
766#endif /* CONFIG_ANDROID */ 766#endif /* CONFIG_ANDROID */
767 767
768 spin_lock_init(&edev->lock); 768 spin_lock_init(&edev->lock);
diff --git a/drivers/extcon/extcon_gpio.c b/drivers/extcon/extcon_gpio.c
index fe7a07b47336..8a0dcc11c7c7 100644
--- a/drivers/extcon/extcon_gpio.c
+++ b/drivers/extcon/extcon_gpio.c
@@ -125,6 +125,7 @@ static int __devinit gpio_extcon_probe(struct platform_device *pdev)
125 if (ret < 0) 125 if (ret < 0)
126 goto err_request_irq; 126 goto err_request_irq;
127 127
128 platform_set_drvdata(pdev, extcon_data);
128 /* Perform initial detection */ 129 /* Perform initial detection */
129 gpio_extcon_work(&extcon_data->work.work); 130 gpio_extcon_work(&extcon_data->work.work);
130 131
@@ -146,6 +147,7 @@ static int __devexit gpio_extcon_remove(struct platform_device *pdev)
146 struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev); 147 struct gpio_extcon_data *extcon_data = platform_get_drvdata(pdev);
147 148
148 cancel_delayed_work_sync(&extcon_data->work); 149 cancel_delayed_work_sync(&extcon_data->work);
150 free_irq(extcon_data->irq, extcon_data);
149 gpio_free(extcon_data->gpio); 151 gpio_free(extcon_data->gpio);
150 extcon_dev_unregister(&extcon_data->edev); 152 extcon_dev_unregister(&extcon_data->edev);
151 devm_kfree(&pdev->dev, extcon_data); 153 devm_kfree(&pdev->dev, extcon_data);
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 7bb00448e13d..b6453d0e44ad 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2833,7 +2833,7 @@ static __init void exynos5_gpiolib_init(void)
2833 } 2833 }
2834 2834
2835 /* need to set base address for gpc4 */ 2835 /* need to set base address for gpc4 */
2836 exonys5_gpios_1[11].base = gpio_base1 + 0x2E0; 2836 exynos5_gpios_1[11].base = gpio_base1 + 0x2E0;
2837 2837
2838 /* need to set base address for gpx */ 2838 /* need to set base address for gpx */
2839 chip = &exynos5_gpios_1[21]; 2839 chip = &exynos5_gpios_1[21];
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index d7038230b71e..7053140c6596 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -35,9 +35,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
35 {0,} 35 {0,}
36}; 36};
37 37
38
39static void cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
40{
41 struct apertures_struct *ap;
42 bool primary = false;
43
44 ap = alloc_apertures(1);
45 ap->ranges[0].base = pci_resource_start(pdev, 0);
46 ap->ranges[0].size = pci_resource_len(pdev, 0);
47
48#ifdef CONFIG_X86
49 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
50#endif
51 remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
52 kfree(ap);
53}
54
38static int __devinit 55static int __devinit
39cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 56cirrus_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40{ 57{
58 cirrus_kick_out_firmware_fb(pdev);
59
41 return drm_get_pci_dev(pdev, ent, &driver); 60 return drm_get_pci_dev(pdev, ent, &driver);
42} 61}
43 62
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index 21bdfa8836f7..64ea597cb6d3 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -145,7 +145,7 @@ struct cirrus_device {
145 struct ttm_bo_device bdev; 145 struct ttm_bo_device bdev;
146 atomic_t validate_sequence; 146 atomic_t validate_sequence;
147 } ttm; 147 } ttm;
148 148 bool mm_inited;
149}; 149};
150 150
151 151
diff --git a/drivers/gpu/drm/cirrus/cirrus_ttm.c b/drivers/gpu/drm/cirrus/cirrus_ttm.c
index 2ebcd11a5023..50e170f879de 100644
--- a/drivers/gpu/drm/cirrus/cirrus_ttm.c
+++ b/drivers/gpu/drm/cirrus/cirrus_ttm.c
@@ -275,12 +275,17 @@ int cirrus_mm_init(struct cirrus_device *cirrus)
275 pci_resource_len(dev->pdev, 0), 275 pci_resource_len(dev->pdev, 0),
276 DRM_MTRR_WC); 276 DRM_MTRR_WC);
277 277
278 cirrus->mm_inited = true;
278 return 0; 279 return 0;
279} 280}
280 281
281void cirrus_mm_fini(struct cirrus_device *cirrus) 282void cirrus_mm_fini(struct cirrus_device *cirrus)
282{ 283{
283 struct drm_device *dev = cirrus->dev; 284 struct drm_device *dev = cirrus->dev;
285
286 if (!cirrus->mm_inited)
287 return;
288
284 ttm_bo_device_release(&cirrus->ttm.bdev); 289 ttm_bo_device_release(&cirrus->ttm.bdev);
285 290
286 cirrus_ttm_global_release(cirrus); 291 cirrus_ttm_global_release(cirrus);
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index c3b5139eba7f..5873e481e5d2 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -30,7 +30,7 @@
30#include <linux/kernel.h> 30#include <linux/kernel.h>
31#include <linux/slab.h> 31#include <linux/slab.h>
32#include <linux/i2c.h> 32#include <linux/i2c.h>
33#include <linux/export.h> 33#include <linux/module.h>
34#include "drmP.h" 34#include "drmP.h"
35#include "drm_edid.h" 35#include "drm_edid.h"
36#include "drm_edid_modes.h" 36#include "drm_edid_modes.h"
@@ -149,6 +149,10 @@ int drm_edid_header_is_valid(const u8 *raw_edid)
149} 149}
150EXPORT_SYMBOL(drm_edid_header_is_valid); 150EXPORT_SYMBOL(drm_edid_header_is_valid);
151 151
152static int edid_fixup __read_mostly = 6;
153module_param_named(edid_fixup, edid_fixup, int, 0400);
154MODULE_PARM_DESC(edid_fixup,
155 "Minimum number of valid EDID header bytes (0-8, default 6)");
152 156
153/* 157/*
154 * Sanity check the EDID block (base or extension). Return 0 if the block 158 * Sanity check the EDID block (base or extension). Return 0 if the block
@@ -160,10 +164,13 @@ bool drm_edid_block_valid(u8 *raw_edid, int block)
160 u8 csum = 0; 164 u8 csum = 0;
161 struct edid *edid = (struct edid *)raw_edid; 165 struct edid *edid = (struct edid *)raw_edid;
162 166
167 if (edid_fixup > 8 || edid_fixup < 0)
168 edid_fixup = 6;
169
163 if (block == 0) { 170 if (block == 0) {
164 int score = drm_edid_header_is_valid(raw_edid); 171 int score = drm_edid_header_is_valid(raw_edid);
165 if (score == 8) ; 172 if (score == 8) ;
166 else if (score >= 6) { 173 else if (score >= edid_fixup) {
167 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n"); 174 DRM_DEBUG("Fixing EDID header, your hardware may be failing\n");
168 memcpy(raw_edid, edid_header, sizeof(edid_header)); 175 memcpy(raw_edid, edid_header, sizeof(edid_header));
169 } else { 176 } else {
@@ -603,7 +610,7 @@ static bool
603drm_monitor_supports_rb(struct edid *edid) 610drm_monitor_supports_rb(struct edid *edid)
604{ 611{
605 if (edid->revision >= 4) { 612 if (edid->revision >= 4) {
606 bool ret; 613 bool ret = false;
607 drm_for_each_detailed_block((u8 *)edid, is_rb, &ret); 614 drm_for_each_detailed_block((u8 *)edid, is_rb, &ret);
608 return ret; 615 return ret;
609 } 616 }
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index 420953197d0a..d6de2e07fa03 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -244,8 +244,8 @@ static const struct file_operations exynos_drm_driver_fops = {
244}; 244};
245 245
246static struct drm_driver exynos_drm_driver = { 246static struct drm_driver exynos_drm_driver = {
247 .driver_features = DRIVER_HAVE_IRQ | DRIVER_BUS_PLATFORM | 247 .driver_features = DRIVER_HAVE_IRQ | DRIVER_MODESET |
248 DRIVER_MODESET | DRIVER_GEM | DRIVER_PRIME, 248 DRIVER_GEM | DRIVER_PRIME,
249 .load = exynos_drm_load, 249 .load = exynos_drm_load,
250 .unload = exynos_drm_unload, 250 .unload = exynos_drm_unload,
251 .open = exynos_drm_open, 251 .open = exynos_drm_open,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_encoder.c b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
index 6e9ac7bd1dcf..23d5ad379f86 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_encoder.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_encoder.c
@@ -172,19 +172,12 @@ static void exynos_drm_encoder_commit(struct drm_encoder *encoder)
172 manager_ops->commit(manager->dev); 172 manager_ops->commit(manager->dev);
173} 173}
174 174
175static struct drm_crtc *
176exynos_drm_encoder_get_crtc(struct drm_encoder *encoder)
177{
178 return encoder->crtc;
179}
180
181static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = { 175static struct drm_encoder_helper_funcs exynos_encoder_helper_funcs = {
182 .dpms = exynos_drm_encoder_dpms, 176 .dpms = exynos_drm_encoder_dpms,
183 .mode_fixup = exynos_drm_encoder_mode_fixup, 177 .mode_fixup = exynos_drm_encoder_mode_fixup,
184 .mode_set = exynos_drm_encoder_mode_set, 178 .mode_set = exynos_drm_encoder_mode_set,
185 .prepare = exynos_drm_encoder_prepare, 179 .prepare = exynos_drm_encoder_prepare,
186 .commit = exynos_drm_encoder_commit, 180 .commit = exynos_drm_encoder_commit,
187 .get_crtc = exynos_drm_encoder_get_crtc,
188}; 181};
189 182
190static void exynos_drm_encoder_destroy(struct drm_encoder *encoder) 183static void exynos_drm_encoder_destroy(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index f82a299553fb..4ccfe4328fab 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -51,11 +51,22 @@ struct exynos_drm_fb {
51static void exynos_drm_fb_destroy(struct drm_framebuffer *fb) 51static void exynos_drm_fb_destroy(struct drm_framebuffer *fb)
52{ 52{
53 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb); 53 struct exynos_drm_fb *exynos_fb = to_exynos_fb(fb);
54 unsigned int i;
54 55
55 DRM_DEBUG_KMS("%s\n", __FILE__); 56 DRM_DEBUG_KMS("%s\n", __FILE__);
56 57
57 drm_framebuffer_cleanup(fb); 58 drm_framebuffer_cleanup(fb);
58 59
60 for (i = 0; i < ARRAY_SIZE(exynos_fb->exynos_gem_obj); i++) {
61 struct drm_gem_object *obj;
62
63 if (exynos_fb->exynos_gem_obj[i] == NULL)
64 continue;
65
66 obj = &exynos_fb->exynos_gem_obj[i]->base;
67 drm_gem_object_unreference_unlocked(obj);
68 }
69
59 kfree(exynos_fb); 70 kfree(exynos_fb);
60 exynos_fb = NULL; 71 exynos_fb = NULL;
61} 72}
@@ -134,11 +145,11 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
134 return ERR_PTR(-ENOENT); 145 return ERR_PTR(-ENOENT);
135 } 146 }
136 147
137 drm_gem_object_unreference_unlocked(obj);
138
139 fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj); 148 fb = exynos_drm_framebuffer_init(dev, mode_cmd, obj);
140 if (IS_ERR(fb)) 149 if (IS_ERR(fb)) {
150 drm_gem_object_unreference_unlocked(obj);
141 return fb; 151 return fb;
152 }
142 153
143 exynos_fb = to_exynos_fb(fb); 154 exynos_fb = to_exynos_fb(fb);
144 nr = exynos_drm_format_num_buffers(fb->pixel_format); 155 nr = exynos_drm_format_num_buffers(fb->pixel_format);
@@ -152,8 +163,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
152 return ERR_PTR(-ENOENT); 163 return ERR_PTR(-ENOENT);
153 } 164 }
154 165
155 drm_gem_object_unreference_unlocked(obj);
156
157 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj); 166 exynos_fb->exynos_gem_obj[i] = to_exynos_gem_obj(obj);
158 } 167 }
159 168
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.h b/drivers/gpu/drm/exynos/exynos_drm_fb.h
index 3ecb30d93552..50823756cdea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.h
@@ -31,10 +31,10 @@
31static inline int exynos_drm_format_num_buffers(uint32_t format) 31static inline int exynos_drm_format_num_buffers(uint32_t format)
32{ 32{
33 switch (format) { 33 switch (format) {
34 case DRM_FORMAT_NV12M: 34 case DRM_FORMAT_NV12:
35 case DRM_FORMAT_NV12MT: 35 case DRM_FORMAT_NV12MT:
36 return 2; 36 return 2;
37 case DRM_FORMAT_YUV420M: 37 case DRM_FORMAT_YUV420:
38 return 3; 38 return 3;
39 default: 39 default:
40 return 1; 40 return 1;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index fc91293c4560..5c8b683029ea 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -689,7 +689,6 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
689 struct drm_device *dev, uint32_t handle, 689 struct drm_device *dev, uint32_t handle,
690 uint64_t *offset) 690 uint64_t *offset)
691{ 691{
692 struct exynos_drm_gem_obj *exynos_gem_obj;
693 struct drm_gem_object *obj; 692 struct drm_gem_object *obj;
694 int ret = 0; 693 int ret = 0;
695 694
@@ -710,15 +709,13 @@ int exynos_drm_gem_dumb_map_offset(struct drm_file *file_priv,
710 goto unlock; 709 goto unlock;
711 } 710 }
712 711
713 exynos_gem_obj = to_exynos_gem_obj(obj); 712 if (!obj->map_list.map) {
714 713 ret = drm_gem_create_mmap_offset(obj);
715 if (!exynos_gem_obj->base.map_list.map) {
716 ret = drm_gem_create_mmap_offset(&exynos_gem_obj->base);
717 if (ret) 714 if (ret)
718 goto out; 715 goto out;
719 } 716 }
720 717
721 *offset = (u64)exynos_gem_obj->base.map_list.hash.key << PAGE_SHIFT; 718 *offset = (u64)obj->map_list.hash.key << PAGE_SHIFT;
722 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset); 719 DRM_DEBUG_KMS("offset = 0x%lx\n", (unsigned long)*offset);
723 720
724out: 721out:
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 68ef01028375..e2147a2ddcec 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -365,7 +365,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
365 switch (win_data->pixel_format) { 365 switch (win_data->pixel_format) {
366 case DRM_FORMAT_NV12MT: 366 case DRM_FORMAT_NV12MT:
367 tiled_mode = true; 367 tiled_mode = true;
368 case DRM_FORMAT_NV12M: 368 case DRM_FORMAT_NV12:
369 crcb_mode = false; 369 crcb_mode = false;
370 buf_num = 2; 370 buf_num = 2;
371 break; 371 break;
@@ -601,18 +601,20 @@ static void mixer_win_reset(struct mixer_context *ctx)
601 mixer_reg_write(res, MXR_BG_COLOR2, 0x008080); 601 mixer_reg_write(res, MXR_BG_COLOR2, 0x008080);
602 602
603 /* setting graphical layers */ 603 /* setting graphical layers */
604
605 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */ 604 val = MXR_GRP_CFG_COLOR_KEY_DISABLE; /* no blank key */
606 val |= MXR_GRP_CFG_WIN_BLEND_EN; 605 val |= MXR_GRP_CFG_WIN_BLEND_EN;
606 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
607 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
607 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */ 608 val |= MXR_GRP_CFG_ALPHA_VAL(0xff); /* non-transparent alpha */
608 609
609 /* the same configuration for both layers */ 610 /* the same configuration for both layers */
610 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val); 611 mixer_reg_write(res, MXR_GRAPHIC_CFG(0), val);
611
612 val |= MXR_GRP_CFG_BLEND_PRE_MUL;
613 val |= MXR_GRP_CFG_PIXEL_BLEND_EN;
614 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val); 612 mixer_reg_write(res, MXR_GRAPHIC_CFG(1), val);
615 613
614 /* setting video layers */
615 val = MXR_GRP_CFG_ALPHA_VAL(0);
616 mixer_reg_write(res, MXR_VIDEO_CFG, val);
617
616 /* configuration of Video Processor Registers */ 618 /* configuration of Video Processor Registers */
617 vp_win_reset(ctx); 619 vp_win_reset(ctx);
618 vp_default_filter(res); 620 vp_default_filter(res);
diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
index f920fb5e42b6..fa9439159ebd 100644
--- a/drivers/gpu/drm/i810/i810_dma.c
+++ b/drivers/gpu/drm/i810/i810_dma.c
@@ -130,11 +130,10 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
130 return -EINVAL; 130 return -EINVAL;
131 131
132 /* This is all entirely broken */ 132 /* This is all entirely broken */
133 down_write(&current->mm->mmap_sem);
134 old_fops = file_priv->filp->f_op; 133 old_fops = file_priv->filp->f_op;
135 file_priv->filp->f_op = &i810_buffer_fops; 134 file_priv->filp->f_op = &i810_buffer_fops;
136 dev_priv->mmap_buffer = buf; 135 dev_priv->mmap_buffer = buf;
137 buf_priv->virtual = (void *)do_mmap(file_priv->filp, 0, buf->total, 136 buf_priv->virtual = (void *)vm_mmap(file_priv->filp, 0, buf->total,
138 PROT_READ | PROT_WRITE, 137 PROT_READ | PROT_WRITE,
139 MAP_SHARED, buf->bus_address); 138 MAP_SHARED, buf->bus_address);
140 dev_priv->mmap_buffer = NULL; 139 dev_priv->mmap_buffer = NULL;
@@ -145,7 +144,6 @@ static int i810_map_buffer(struct drm_buf *buf, struct drm_file *file_priv)
145 retcode = PTR_ERR(buf_priv->virtual); 144 retcode = PTR_ERR(buf_priv->virtual);
146 buf_priv->virtual = NULL; 145 buf_priv->virtual = NULL;
147 } 146 }
148 up_write(&current->mm->mmap_sem);
149 147
150 return retcode; 148 return retcode;
151} 149}
diff --git a/drivers/gpu/drm/i915/i915_drv.c b/drivers/gpu/drm/i915/i915_drv.c
index 05adbf23951a..a378c0800304 100644
--- a/drivers/gpu/drm/i915/i915_drv.c
+++ b/drivers/gpu/drm/i915/i915_drv.c
@@ -233,6 +233,7 @@ static const struct intel_device_info intel_sandybridge_d_info = {
233 .has_blt_ring = 1, 233 .has_blt_ring = 1,
234 .has_llc = 1, 234 .has_llc = 1,
235 .has_pch_split = 1, 235 .has_pch_split = 1,
236 .has_force_wake = 1,
236}; 237};
237 238
238static const struct intel_device_info intel_sandybridge_m_info = { 239static const struct intel_device_info intel_sandybridge_m_info = {
@@ -243,6 +244,7 @@ static const struct intel_device_info intel_sandybridge_m_info = {
243 .has_blt_ring = 1, 244 .has_blt_ring = 1,
244 .has_llc = 1, 245 .has_llc = 1,
245 .has_pch_split = 1, 246 .has_pch_split = 1,
247 .has_force_wake = 1,
246}; 248};
247 249
248static const struct intel_device_info intel_ivybridge_d_info = { 250static const struct intel_device_info intel_ivybridge_d_info = {
@@ -252,6 +254,7 @@ static const struct intel_device_info intel_ivybridge_d_info = {
252 .has_blt_ring = 1, 254 .has_blt_ring = 1,
253 .has_llc = 1, 255 .has_llc = 1,
254 .has_pch_split = 1, 256 .has_pch_split = 1,
257 .has_force_wake = 1,
255}; 258};
256 259
257static const struct intel_device_info intel_ivybridge_m_info = { 260static const struct intel_device_info intel_ivybridge_m_info = {
@@ -262,6 +265,7 @@ static const struct intel_device_info intel_ivybridge_m_info = {
262 .has_blt_ring = 1, 265 .has_blt_ring = 1,
263 .has_llc = 1, 266 .has_llc = 1,
264 .has_pch_split = 1, 267 .has_pch_split = 1,
268 .has_force_wake = 1,
265}; 269};
266 270
267static const struct intel_device_info intel_valleyview_m_info = { 271static const struct intel_device_info intel_valleyview_m_info = {
@@ -289,6 +293,7 @@ static const struct intel_device_info intel_haswell_d_info = {
289 .has_blt_ring = 1, 293 .has_blt_ring = 1,
290 .has_llc = 1, 294 .has_llc = 1,
291 .has_pch_split = 1, 295 .has_pch_split = 1,
296 .has_force_wake = 1,
292}; 297};
293 298
294static const struct intel_device_info intel_haswell_m_info = { 299static const struct intel_device_info intel_haswell_m_info = {
@@ -298,6 +303,7 @@ static const struct intel_device_info intel_haswell_m_info = {
298 .has_blt_ring = 1, 303 .has_blt_ring = 1,
299 .has_llc = 1, 304 .has_llc = 1,
300 .has_pch_split = 1, 305 .has_pch_split = 1,
306 .has_force_wake = 1,
301}; 307};
302 308
303static const struct pci_device_id pciidlist[] = { /* aka */ 309static const struct pci_device_id pciidlist[] = { /* aka */
@@ -1144,10 +1150,9 @@ MODULE_LICENSE("GPL and additional rights");
1144 1150
1145/* We give fast paths for the really cool registers */ 1151/* We give fast paths for the really cool registers */
1146#define NEEDS_FORCE_WAKE(dev_priv, reg) \ 1152#define NEEDS_FORCE_WAKE(dev_priv, reg) \
1147 (((dev_priv)->info->gen >= 6) && \ 1153 ((HAS_FORCE_WAKE((dev_priv)->dev)) && \
1148 ((reg) < 0x40000) && \ 1154 ((reg) < 0x40000) && \
1149 ((reg) != FORCEWAKE)) && \ 1155 ((reg) != FORCEWAKE))
1150 (!IS_VALLEYVIEW((dev_priv)->dev))
1151 1156
1152static bool IS_DISPLAYREG(u32 reg) 1157static bool IS_DISPLAYREG(u32 reg)
1153{ 1158{
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 24ef5d77927f..a0c15abbdcef 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -286,6 +286,7 @@ struct intel_device_info {
286 u8 is_ivybridge:1; 286 u8 is_ivybridge:1;
287 u8 is_valleyview:1; 287 u8 is_valleyview:1;
288 u8 has_pch_split:1; 288 u8 has_pch_split:1;
289 u8 has_force_wake:1;
289 u8 is_haswell:1; 290 u8 is_haswell:1;
290 u8 has_fbc:1; 291 u8 has_fbc:1;
291 u8 has_pipe_cxsr:1; 292 u8 has_pipe_cxsr:1;
@@ -1122,6 +1123,8 @@ struct drm_i915_file_private {
1122#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT) 1123#define HAS_PCH_CPT(dev) (INTEL_PCH_TYPE(dev) == PCH_CPT)
1123#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX) 1124#define HAS_PCH_IBX(dev) (INTEL_PCH_TYPE(dev) == PCH_IBX)
1124 1125
1126#define HAS_FORCE_WAKE(dev) (INTEL_INFO(dev)->has_force_wake)
1127
1125#include "i915_trace.h" 1128#include "i915_trace.h"
1126 1129
1127/** 1130/**
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index 84975e1e1f05..23f2ea0f0651 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -585,7 +585,7 @@ out:
585 return ret; 585 return ret;
586} 586}
587 587
588static void pch_irq_handler(struct drm_device *dev, u32 pch_iir) 588static void ibx_irq_handler(struct drm_device *dev, u32 pch_iir)
589{ 589{
590 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private; 590 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
591 int pipe; 591 int pipe;
@@ -625,6 +625,35 @@ static void pch_irq_handler(struct drm_device *dev, u32 pch_iir)
625 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n"); 625 DRM_DEBUG_DRIVER("PCH transcoder A underrun interrupt\n");
626} 626}
627 627
628static void cpt_irq_handler(struct drm_device *dev, u32 pch_iir)
629{
630 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
631 int pipe;
632
633 if (pch_iir & SDE_AUDIO_POWER_MASK_CPT)
634 DRM_DEBUG_DRIVER("PCH audio power change on port %d\n",
635 (pch_iir & SDE_AUDIO_POWER_MASK_CPT) >>
636 SDE_AUDIO_POWER_SHIFT_CPT);
637
638 if (pch_iir & SDE_AUX_MASK_CPT)
639 DRM_DEBUG_DRIVER("AUX channel interrupt\n");
640
641 if (pch_iir & SDE_GMBUS_CPT)
642 DRM_DEBUG_DRIVER("PCH GMBUS interrupt\n");
643
644 if (pch_iir & SDE_AUDIO_CP_REQ_CPT)
645 DRM_DEBUG_DRIVER("Audio CP request interrupt\n");
646
647 if (pch_iir & SDE_AUDIO_CP_CHG_CPT)
648 DRM_DEBUG_DRIVER("Audio CP change interrupt\n");
649
650 if (pch_iir & SDE_FDI_MASK_CPT)
651 for_each_pipe(pipe)
652 DRM_DEBUG_DRIVER(" pipe %c FDI IIR: 0x%08x\n",
653 pipe_name(pipe),
654 I915_READ(FDI_RX_IIR(pipe)));
655}
656
628static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS) 657static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
629{ 658{
630 struct drm_device *dev = (struct drm_device *) arg; 659 struct drm_device *dev = (struct drm_device *) arg;
@@ -666,7 +695,7 @@ static irqreturn_t ivybridge_irq_handler(DRM_IRQ_ARGS)
666 695
667 if (pch_iir & SDE_HOTPLUG_MASK_CPT) 696 if (pch_iir & SDE_HOTPLUG_MASK_CPT)
668 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 697 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
669 pch_irq_handler(dev, pch_iir); 698 cpt_irq_handler(dev, pch_iir);
670 699
671 /* clear PCH hotplug event before clear CPU irq */ 700 /* clear PCH hotplug event before clear CPU irq */
672 I915_WRITE(SDEIIR, pch_iir); 701 I915_WRITE(SDEIIR, pch_iir);
@@ -759,7 +788,10 @@ static irqreturn_t ironlake_irq_handler(DRM_IRQ_ARGS)
759 if (de_iir & DE_PCH_EVENT) { 788 if (de_iir & DE_PCH_EVENT) {
760 if (pch_iir & hotplug_mask) 789 if (pch_iir & hotplug_mask)
761 queue_work(dev_priv->wq, &dev_priv->hotplug_work); 790 queue_work(dev_priv->wq, &dev_priv->hotplug_work);
762 pch_irq_handler(dev, pch_iir); 791 if (HAS_PCH_CPT(dev))
792 cpt_irq_handler(dev, pch_iir);
793 else
794 ibx_irq_handler(dev, pch_iir);
763 } 795 }
764 796
765 if (de_iir & DE_PCU_EVENT) { 797 if (de_iir & DE_PCU_EVENT) {
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 0a61481cd2c2..9dfc4c5ff31e 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -210,9 +210,17 @@
210#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2) 210#define MI_DISPLAY_FLIP MI_INSTR(0x14, 2)
211#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1) 211#define MI_DISPLAY_FLIP_I915 MI_INSTR(0x14, 1)
212#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20) 212#define MI_DISPLAY_FLIP_PLANE(n) ((n) << 20)
213/* IVB has funny definitions for which plane to flip. */
214#define MI_DISPLAY_FLIP_IVB_PLANE_A (0 << 19)
215#define MI_DISPLAY_FLIP_IVB_PLANE_B (1 << 19)
216#define MI_DISPLAY_FLIP_IVB_SPRITE_A (2 << 19)
217#define MI_DISPLAY_FLIP_IVB_SPRITE_B (3 << 19)
218#define MI_DISPLAY_FLIP_IVB_PLANE_C (4 << 19)
219#define MI_DISPLAY_FLIP_IVB_SPRITE_C (5 << 19)
213#define MI_ARB_ON_OFF MI_INSTR(0x08, 0) 220#define MI_ARB_ON_OFF MI_INSTR(0x08, 0)
214#define MI_ARB_ENABLE (1<<0) 221#define MI_ARB_ENABLE (1<<0)
215#define MI_ARB_DISABLE (0<<0) 222#define MI_ARB_DISABLE (0<<0)
223
216#define MI_SET_CONTEXT MI_INSTR(0x18, 0) 224#define MI_SET_CONTEXT MI_INSTR(0x18, 0)
217#define MI_MM_SPACE_GTT (1<<8) 225#define MI_MM_SPACE_GTT (1<<8)
218#define MI_MM_SPACE_PHYSICAL (0<<8) 226#define MI_MM_SPACE_PHYSICAL (0<<8)
@@ -3391,7 +3399,7 @@
3391 3399
3392/* PCH */ 3400/* PCH */
3393 3401
3394/* south display engine interrupt */ 3402/* south display engine interrupt: IBX */
3395#define SDE_AUDIO_POWER_D (1 << 27) 3403#define SDE_AUDIO_POWER_D (1 << 27)
3396#define SDE_AUDIO_POWER_C (1 << 26) 3404#define SDE_AUDIO_POWER_C (1 << 26)
3397#define SDE_AUDIO_POWER_B (1 << 25) 3405#define SDE_AUDIO_POWER_B (1 << 25)
@@ -3427,15 +3435,44 @@
3427#define SDE_TRANSA_CRC_ERR (1 << 1) 3435#define SDE_TRANSA_CRC_ERR (1 << 1)
3428#define SDE_TRANSA_FIFO_UNDER (1 << 0) 3436#define SDE_TRANSA_FIFO_UNDER (1 << 0)
3429#define SDE_TRANS_MASK (0x3f) 3437#define SDE_TRANS_MASK (0x3f)
3430/* CPT */ 3438
3431#define SDE_CRT_HOTPLUG_CPT (1 << 19) 3439/* south display engine interrupt: CPT/PPT */
3440#define SDE_AUDIO_POWER_D_CPT (1 << 31)
3441#define SDE_AUDIO_POWER_C_CPT (1 << 30)
3442#define SDE_AUDIO_POWER_B_CPT (1 << 29)
3443#define SDE_AUDIO_POWER_SHIFT_CPT 29
3444#define SDE_AUDIO_POWER_MASK_CPT (7 << 29)
3445#define SDE_AUXD_CPT (1 << 27)
3446#define SDE_AUXC_CPT (1 << 26)
3447#define SDE_AUXB_CPT (1 << 25)
3448#define SDE_AUX_MASK_CPT (7 << 25)
3432#define SDE_PORTD_HOTPLUG_CPT (1 << 23) 3449#define SDE_PORTD_HOTPLUG_CPT (1 << 23)
3433#define SDE_PORTC_HOTPLUG_CPT (1 << 22) 3450#define SDE_PORTC_HOTPLUG_CPT (1 << 22)
3434#define SDE_PORTB_HOTPLUG_CPT (1 << 21) 3451#define SDE_PORTB_HOTPLUG_CPT (1 << 21)
3452#define SDE_CRT_HOTPLUG_CPT (1 << 19)
3435#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \ 3453#define SDE_HOTPLUG_MASK_CPT (SDE_CRT_HOTPLUG_CPT | \
3436 SDE_PORTD_HOTPLUG_CPT | \ 3454 SDE_PORTD_HOTPLUG_CPT | \
3437 SDE_PORTC_HOTPLUG_CPT | \ 3455 SDE_PORTC_HOTPLUG_CPT | \
3438 SDE_PORTB_HOTPLUG_CPT) 3456 SDE_PORTB_HOTPLUG_CPT)
3457#define SDE_GMBUS_CPT (1 << 17)
3458#define SDE_AUDIO_CP_REQ_C_CPT (1 << 10)
3459#define SDE_AUDIO_CP_CHG_C_CPT (1 << 9)
3460#define SDE_FDI_RXC_CPT (1 << 8)
3461#define SDE_AUDIO_CP_REQ_B_CPT (1 << 6)
3462#define SDE_AUDIO_CP_CHG_B_CPT (1 << 5)
3463#define SDE_FDI_RXB_CPT (1 << 4)
3464#define SDE_AUDIO_CP_REQ_A_CPT (1 << 2)
3465#define SDE_AUDIO_CP_CHG_A_CPT (1 << 1)
3466#define SDE_FDI_RXA_CPT (1 << 0)
3467#define SDE_AUDIO_CP_REQ_CPT (SDE_AUDIO_CP_REQ_C_CPT | \
3468 SDE_AUDIO_CP_REQ_B_CPT | \
3469 SDE_AUDIO_CP_REQ_A_CPT)
3470#define SDE_AUDIO_CP_CHG_CPT (SDE_AUDIO_CP_CHG_C_CPT | \
3471 SDE_AUDIO_CP_CHG_B_CPT | \
3472 SDE_AUDIO_CP_CHG_A_CPT)
3473#define SDE_FDI_MASK_CPT (SDE_FDI_RXC_CPT | \
3474 SDE_FDI_RXB_CPT | \
3475 SDE_FDI_RXA_CPT)
3439 3476
3440#define SDEISR 0xc4000 3477#define SDEISR 0xc4000
3441#define SDEIMR 0xc4004 3478#define SDEIMR 0xc4004
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 06721c0e9f98..b3052ef70d16 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -6377,17 +6377,34 @@ static int intel_gen7_queue_flip(struct drm_device *dev,
6377 struct drm_i915_private *dev_priv = dev->dev_private; 6377 struct drm_i915_private *dev_priv = dev->dev_private;
6378 struct intel_crtc *intel_crtc = to_intel_crtc(crtc); 6378 struct intel_crtc *intel_crtc = to_intel_crtc(crtc);
6379 struct intel_ring_buffer *ring = &dev_priv->ring[BCS]; 6379 struct intel_ring_buffer *ring = &dev_priv->ring[BCS];
6380 uint32_t plane_bit = 0;
6380 int ret; 6381 int ret;
6381 6382
6382 ret = intel_pin_and_fence_fb_obj(dev, obj, ring); 6383 ret = intel_pin_and_fence_fb_obj(dev, obj, ring);
6383 if (ret) 6384 if (ret)
6384 goto err; 6385 goto err;
6385 6386
6387 switch(intel_crtc->plane) {
6388 case PLANE_A:
6389 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_A;
6390 break;
6391 case PLANE_B:
6392 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_B;
6393 break;
6394 case PLANE_C:
6395 plane_bit = MI_DISPLAY_FLIP_IVB_PLANE_C;
6396 break;
6397 default:
6398 WARN_ONCE(1, "unknown plane in flip command\n");
6399 ret = -ENODEV;
6400 goto err;
6401 }
6402
6386 ret = intel_ring_begin(ring, 4); 6403 ret = intel_ring_begin(ring, 4);
6387 if (ret) 6404 if (ret)
6388 goto err_unpin; 6405 goto err_unpin;
6389 6406
6390 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | (intel_crtc->plane << 19)); 6407 intel_ring_emit(ring, MI_DISPLAY_FLIP_I915 | plane_bit);
6391 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode)); 6408 intel_ring_emit(ring, (fb->pitches[0] | obj->tiling_mode));
6392 intel_ring_emit(ring, (obj->gtt_offset)); 6409 intel_ring_emit(ring, (obj->gtt_offset));
6393 intel_ring_emit(ring, (MI_NOOP)); 6410 intel_ring_emit(ring, (MI_NOOP));
@@ -6760,7 +6777,7 @@ static void intel_setup_outputs(struct drm_device *dev)
6760 if (I915_READ(HDMIC) & PORT_DETECTED) 6777 if (I915_READ(HDMIC) & PORT_DETECTED)
6761 intel_hdmi_init(dev, HDMIC); 6778 intel_hdmi_init(dev, HDMIC);
6762 6779
6763 if (I915_READ(HDMID) & PORT_DETECTED) 6780 if (!dpd_is_edp && I915_READ(HDMID) & PORT_DETECTED)
6764 intel_hdmi_init(dev, HDMID); 6781 intel_hdmi_init(dev, HDMID);
6765 6782
6766 if (I915_READ(PCH_DP_C) & DP_DETECTED) 6783 if (I915_READ(PCH_DP_C) & DP_DETECTED)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 6538c46fe959..76a708029dcb 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -32,6 +32,7 @@
32#include "drm.h" 32#include "drm.h"
33#include "drm_crtc.h" 33#include "drm_crtc.h"
34#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35#include "drm_edid.h"
35#include "intel_drv.h" 36#include "intel_drv.h"
36#include "i915_drm.h" 37#include "i915_drm.h"
37#include "i915_drv.h" 38#include "i915_drv.h"
@@ -67,6 +68,8 @@ struct intel_dp {
67 struct drm_display_mode *panel_fixed_mode; /* for eDP */ 68 struct drm_display_mode *panel_fixed_mode; /* for eDP */
68 struct delayed_work panel_vdd_work; 69 struct delayed_work panel_vdd_work;
69 bool want_panel_vdd; 70 bool want_panel_vdd;
71 struct edid *edid; /* cached EDID for eDP */
72 int edid_mode_count;
70}; 73};
71 74
72/** 75/**
@@ -383,7 +386,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
383 int recv_bytes; 386 int recv_bytes;
384 uint32_t status; 387 uint32_t status;
385 uint32_t aux_clock_divider; 388 uint32_t aux_clock_divider;
386 int try, precharge = 5; 389 int try, precharge;
387 390
388 intel_dp_check_edp(intel_dp); 391 intel_dp_check_edp(intel_dp);
389 /* The clock divider is based off the hrawclk, 392 /* The clock divider is based off the hrawclk,
@@ -403,6 +406,11 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
403 else 406 else
404 aux_clock_divider = intel_hrawclk(dev) / 2; 407 aux_clock_divider = intel_hrawclk(dev) / 2;
405 408
409 if (IS_GEN6(dev))
410 precharge = 3;
411 else
412 precharge = 5;
413
406 /* Try to wait for any previous AUX channel activity */ 414 /* Try to wait for any previous AUX channel activity */
407 for (try = 0; try < 3; try++) { 415 for (try = 0; try < 3; try++) {
408 status = I915_READ(ch_ctl); 416 status = I915_READ(ch_ctl);
@@ -1980,6 +1988,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
1980 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT)) 1988 if (!(intel_dp->dpcd[DP_DOWN_STREAM_PORT_COUNT] & DP_OUI_SUPPORT))
1981 return; 1989 return;
1982 1990
1991 ironlake_edp_panel_vdd_on(intel_dp);
1992
1983 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3)) 1993 if (intel_dp_aux_native_read_retry(intel_dp, DP_SINK_OUI, buf, 3))
1984 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n", 1994 DRM_DEBUG_KMS("Sink OUI: %02hx%02hx%02hx\n",
1985 buf[0], buf[1], buf[2]); 1995 buf[0], buf[1], buf[2]);
@@ -1987,6 +1997,8 @@ intel_dp_probe_oui(struct intel_dp *intel_dp)
1987 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3)) 1997 if (intel_dp_aux_native_read_retry(intel_dp, DP_BRANCH_OUI, buf, 3))
1988 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n", 1998 DRM_DEBUG_KMS("Branch OUI: %02hx%02hx%02hx\n",
1989 buf[0], buf[1], buf[2]); 1999 buf[0], buf[1], buf[2]);
2000
2001 ironlake_edp_panel_vdd_off(intel_dp, false);
1990} 2002}
1991 2003
1992static bool 2004static bool
@@ -2121,10 +2133,22 @@ intel_dp_get_edid(struct drm_connector *connector, struct i2c_adapter *adapter)
2121{ 2133{
2122 struct intel_dp *intel_dp = intel_attached_dp(connector); 2134 struct intel_dp *intel_dp = intel_attached_dp(connector);
2123 struct edid *edid; 2135 struct edid *edid;
2136 int size;
2137
2138 if (is_edp(intel_dp)) {
2139 if (!intel_dp->edid)
2140 return NULL;
2141
2142 size = (intel_dp->edid->extensions + 1) * EDID_LENGTH;
2143 edid = kmalloc(size, GFP_KERNEL);
2144 if (!edid)
2145 return NULL;
2146
2147 memcpy(edid, intel_dp->edid, size);
2148 return edid;
2149 }
2124 2150
2125 ironlake_edp_panel_vdd_on(intel_dp);
2126 edid = drm_get_edid(connector, adapter); 2151 edid = drm_get_edid(connector, adapter);
2127 ironlake_edp_panel_vdd_off(intel_dp, false);
2128 return edid; 2152 return edid;
2129} 2153}
2130 2154
@@ -2134,9 +2158,17 @@ intel_dp_get_edid_modes(struct drm_connector *connector, struct i2c_adapter *ada
2134 struct intel_dp *intel_dp = intel_attached_dp(connector); 2158 struct intel_dp *intel_dp = intel_attached_dp(connector);
2135 int ret; 2159 int ret;
2136 2160
2137 ironlake_edp_panel_vdd_on(intel_dp); 2161 if (is_edp(intel_dp)) {
2162 drm_mode_connector_update_edid_property(connector,
2163 intel_dp->edid);
2164 ret = drm_add_edid_modes(connector, intel_dp->edid);
2165 drm_edid_to_eld(connector,
2166 intel_dp->edid);
2167 connector->display_info.raw_edid = NULL;
2168 return intel_dp->edid_mode_count;
2169 }
2170
2138 ret = intel_ddc_get_modes(connector, adapter); 2171 ret = intel_ddc_get_modes(connector, adapter);
2139 ironlake_edp_panel_vdd_off(intel_dp, false);
2140 return ret; 2172 return ret;
2141} 2173}
2142 2174
@@ -2326,6 +2358,7 @@ static void intel_dp_encoder_destroy(struct drm_encoder *encoder)
2326 i2c_del_adapter(&intel_dp->adapter); 2358 i2c_del_adapter(&intel_dp->adapter);
2327 drm_encoder_cleanup(encoder); 2359 drm_encoder_cleanup(encoder);
2328 if (is_edp(intel_dp)) { 2360 if (is_edp(intel_dp)) {
2361 kfree(intel_dp->edid);
2329 cancel_delayed_work_sync(&intel_dp->panel_vdd_work); 2362 cancel_delayed_work_sync(&intel_dp->panel_vdd_work);
2330 ironlake_panel_vdd_off_sync(intel_dp); 2363 ironlake_panel_vdd_off_sync(intel_dp);
2331 } 2364 }
@@ -2509,11 +2542,14 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2509 break; 2542 break;
2510 } 2543 }
2511 2544
2545 intel_dp_i2c_init(intel_dp, intel_connector, name);
2546
2512 /* Cache some DPCD data in the eDP case */ 2547 /* Cache some DPCD data in the eDP case */
2513 if (is_edp(intel_dp)) { 2548 if (is_edp(intel_dp)) {
2514 bool ret; 2549 bool ret;
2515 struct edp_power_seq cur, vbt; 2550 struct edp_power_seq cur, vbt;
2516 u32 pp_on, pp_off, pp_div; 2551 u32 pp_on, pp_off, pp_div;
2552 struct edid *edid;
2517 2553
2518 pp_on = I915_READ(PCH_PP_ON_DELAYS); 2554 pp_on = I915_READ(PCH_PP_ON_DELAYS);
2519 pp_off = I915_READ(PCH_PP_OFF_DELAYS); 2555 pp_off = I915_READ(PCH_PP_OFF_DELAYS);
@@ -2581,9 +2617,19 @@ intel_dp_init(struct drm_device *dev, int output_reg)
2581 intel_dp_destroy(&intel_connector->base); 2617 intel_dp_destroy(&intel_connector->base);
2582 return; 2618 return;
2583 } 2619 }
2584 }
2585 2620
2586 intel_dp_i2c_init(intel_dp, intel_connector, name); 2621 ironlake_edp_panel_vdd_on(intel_dp);
2622 edid = drm_get_edid(connector, &intel_dp->adapter);
2623 if (edid) {
2624 drm_mode_connector_update_edid_property(connector,
2625 edid);
2626 intel_dp->edid_mode_count =
2627 drm_add_edid_modes(connector, edid);
2628 drm_edid_to_eld(connector, edid);
2629 intel_dp->edid = edid;
2630 }
2631 ironlake_edp_panel_vdd_off(intel_dp, false);
2632 }
2587 2633
2588 intel_encoder->hot_plug = intel_dp_hot_plug; 2634 intel_encoder->hot_plug = intel_dp_hot_plug;
2589 2635
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 7a16f16371e6..f30a53a8917e 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -267,10 +267,15 @@ u32 intel_ring_get_active_head(struct intel_ring_buffer *ring)
267 267
268static int init_ring_common(struct intel_ring_buffer *ring) 268static int init_ring_common(struct intel_ring_buffer *ring)
269{ 269{
270 drm_i915_private_t *dev_priv = ring->dev->dev_private; 270 struct drm_device *dev = ring->dev;
271 drm_i915_private_t *dev_priv = dev->dev_private;
271 struct drm_i915_gem_object *obj = ring->obj; 272 struct drm_i915_gem_object *obj = ring->obj;
273 int ret = 0;
272 u32 head; 274 u32 head;
273 275
276 if (HAS_FORCE_WAKE(dev))
277 gen6_gt_force_wake_get(dev_priv);
278
274 /* Stop the ring if it's running. */ 279 /* Stop the ring if it's running. */
275 I915_WRITE_CTL(ring, 0); 280 I915_WRITE_CTL(ring, 0);
276 I915_WRITE_HEAD(ring, 0); 281 I915_WRITE_HEAD(ring, 0);
@@ -318,7 +323,8 @@ static int init_ring_common(struct intel_ring_buffer *ring)
318 I915_READ_HEAD(ring), 323 I915_READ_HEAD(ring),
319 I915_READ_TAIL(ring), 324 I915_READ_TAIL(ring),
320 I915_READ_START(ring)); 325 I915_READ_START(ring));
321 return -EIO; 326 ret = -EIO;
327 goto out;
322 } 328 }
323 329
324 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET)) 330 if (!drm_core_check_feature(ring->dev, DRIVER_MODESET))
@@ -327,9 +333,14 @@ static int init_ring_common(struct intel_ring_buffer *ring)
327 ring->head = I915_READ_HEAD(ring); 333 ring->head = I915_READ_HEAD(ring);
328 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR; 334 ring->tail = I915_READ_TAIL(ring) & TAIL_ADDR;
329 ring->space = ring_space(ring); 335 ring->space = ring_space(ring);
336 ring->last_retired_head = -1;
330 } 337 }
331 338
332 return 0; 339out:
340 if (HAS_FORCE_WAKE(dev))
341 gen6_gt_force_wake_put(dev_priv);
342
343 return ret;
333} 344}
334 345
335static int 346static int
@@ -1006,6 +1017,10 @@ static int intel_init_ring_buffer(struct drm_device *dev,
1006 if (ret) 1017 if (ret)
1007 goto err_unref; 1018 goto err_unref;
1008 1019
1020 ret = i915_gem_object_set_to_gtt_domain(obj, true);
1021 if (ret)
1022 goto err_unpin;
1023
1009 ring->virtual_start = 1024 ring->virtual_start =
1010 ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset, 1025 ioremap_wc(dev_priv->mm.gtt->gma_bus_addr + obj->gtt_offset,
1011 ring->size); 1026 ring->size);
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 3c8e04f54713..93e832d6c328 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -41,9 +41,28 @@ static DEFINE_PCI_DEVICE_TABLE(pciidlist) = {
41 41
42MODULE_DEVICE_TABLE(pci, pciidlist); 42MODULE_DEVICE_TABLE(pci, pciidlist);
43 43
44static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
45{
46 struct apertures_struct *ap;
47 bool primary = false;
48
49 ap = alloc_apertures(1);
50 ap->ranges[0].base = pci_resource_start(pdev, 0);
51 ap->ranges[0].size = pci_resource_len(pdev, 0);
52
53#ifdef CONFIG_X86
54 primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
55#endif
56 remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
57 kfree(ap);
58}
59
60
44static int __devinit 61static int __devinit
45mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) 62mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
46{ 63{
64 mgag200_kick_out_firmware_fb(pdev);
65
47 return drm_get_pci_dev(pdev, ent, &driver); 66 return drm_get_pci_dev(pdev, ent, &driver);
48} 67}
49 68
diff --git a/drivers/gpu/drm/radeon/atombios_crtc.c b/drivers/gpu/drm/radeon/atombios_crtc.c
index 01d77d1554f4..3904d7964a4b 100644
--- a/drivers/gpu/drm/radeon/atombios_crtc.c
+++ b/drivers/gpu/drm/radeon/atombios_crtc.c
@@ -1149,7 +1149,9 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1149 } 1149 }
1150 1150
1151 if (tiling_flags & RADEON_TILING_MACRO) { 1151 if (tiling_flags & RADEON_TILING_MACRO) {
1152 if (rdev->family >= CHIP_CAYMAN) 1152 if (rdev->family >= CHIP_TAHITI)
1153 tmp = rdev->config.si.tile_config;
1154 else if (rdev->family >= CHIP_CAYMAN)
1153 tmp = rdev->config.cayman.tile_config; 1155 tmp = rdev->config.cayman.tile_config;
1154 else 1156 else
1155 tmp = rdev->config.evergreen.tile_config; 1157 tmp = rdev->config.evergreen.tile_config;
@@ -1177,6 +1179,12 @@ static int dce4_crtc_do_set_base(struct drm_crtc *crtc,
1177 } else if (tiling_flags & RADEON_TILING_MICRO) 1179 } else if (tiling_flags & RADEON_TILING_MICRO)
1178 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1); 1180 fb_format |= EVERGREEN_GRPH_ARRAY_MODE(EVERGREEN_GRPH_ARRAY_1D_TILED_THIN1);
1179 1181
1182 if ((rdev->family == CHIP_TAHITI) ||
1183 (rdev->family == CHIP_PITCAIRN))
1184 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P8_32x32_8x16);
1185 else if (rdev->family == CHIP_VERDE)
1186 fb_format |= SI_GRPH_PIPE_CONFIG(SI_ADDR_SURF_P4_8x16);
1187
1180 switch (radeon_crtc->crtc_id) { 1188 switch (radeon_crtc->crtc_id) {
1181 case 0: 1189 case 0:
1182 WREG32(AVIVO_D1VGA_CONTROL, 0); 1190 WREG32(AVIVO_D1VGA_CONTROL, 0);
diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
index e7b1ec5ae8c6..486ccdf4aacd 100644
--- a/drivers/gpu/drm/radeon/atombios_encoders.c
+++ b/drivers/gpu/drm/radeon/atombios_encoders.c
@@ -1926,7 +1926,9 @@ radeon_atom_encoder_mode_set(struct drm_encoder *encoder,
1926 1926
1927 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) { 1927 if (atombios_get_encoder_mode(encoder) == ATOM_ENCODER_MODE_HDMI) {
1928 r600_hdmi_enable(encoder); 1928 r600_hdmi_enable(encoder);
1929 if (ASIC_IS_DCE4(rdev)) 1929 if (ASIC_IS_DCE6(rdev))
1930 ; /* TODO (use pointers instead of if-s?) */
1931 else if (ASIC_IS_DCE4(rdev))
1930 evergreen_hdmi_setmode(encoder, adjusted_mode); 1932 evergreen_hdmi_setmode(encoder, adjusted_mode);
1931 else 1933 else
1932 r600_hdmi_setmode(encoder, adjusted_mode); 1934 r600_hdmi_setmode(encoder, adjusted_mode);
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 58991af90502..7fb3d2e0434c 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1029,6 +1029,11 @@ int evergreen_pcie_gart_enable(struct radeon_device *rdev)
1029 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 1029 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
1030 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 1030 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
1031 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 1031 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
1032 if ((rdev->family == CHIP_JUNIPER) ||
1033 (rdev->family == CHIP_CYPRESS) ||
1034 (rdev->family == CHIP_HEMLOCK) ||
1035 (rdev->family == CHIP_BARTS))
1036 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
1032 } 1037 }
1033 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 1038 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
1034 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 1039 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
@@ -1553,163 +1558,10 @@ int evergreen_cp_resume(struct radeon_device *rdev)
1553/* 1558/*
1554 * Core functions 1559 * Core functions
1555 */ 1560 */
1556static u32 evergreen_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
1557 u32 num_tile_pipes,
1558 u32 num_backends,
1559 u32 backend_disable_mask)
1560{
1561 u32 backend_map = 0;
1562 u32 enabled_backends_mask = 0;
1563 u32 enabled_backends_count = 0;
1564 u32 cur_pipe;
1565 u32 swizzle_pipe[EVERGREEN_MAX_PIPES];
1566 u32 cur_backend = 0;
1567 u32 i;
1568 bool force_no_swizzle;
1569
1570 if (num_tile_pipes > EVERGREEN_MAX_PIPES)
1571 num_tile_pipes = EVERGREEN_MAX_PIPES;
1572 if (num_tile_pipes < 1)
1573 num_tile_pipes = 1;
1574 if (num_backends > EVERGREEN_MAX_BACKENDS)
1575 num_backends = EVERGREEN_MAX_BACKENDS;
1576 if (num_backends < 1)
1577 num_backends = 1;
1578
1579 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1580 if (((backend_disable_mask >> i) & 1) == 0) {
1581 enabled_backends_mask |= (1 << i);
1582 ++enabled_backends_count;
1583 }
1584 if (enabled_backends_count == num_backends)
1585 break;
1586 }
1587
1588 if (enabled_backends_count == 0) {
1589 enabled_backends_mask = 1;
1590 enabled_backends_count = 1;
1591 }
1592
1593 if (enabled_backends_count != num_backends)
1594 num_backends = enabled_backends_count;
1595
1596 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * EVERGREEN_MAX_PIPES);
1597 switch (rdev->family) {
1598 case CHIP_CEDAR:
1599 case CHIP_REDWOOD:
1600 case CHIP_PALM:
1601 case CHIP_SUMO:
1602 case CHIP_SUMO2:
1603 case CHIP_TURKS:
1604 case CHIP_CAICOS:
1605 force_no_swizzle = false;
1606 break;
1607 case CHIP_CYPRESS:
1608 case CHIP_HEMLOCK:
1609 case CHIP_JUNIPER:
1610 case CHIP_BARTS:
1611 default:
1612 force_no_swizzle = true;
1613 break;
1614 }
1615 if (force_no_swizzle) {
1616 bool last_backend_enabled = false;
1617
1618 force_no_swizzle = false;
1619 for (i = 0; i < EVERGREEN_MAX_BACKENDS; ++i) {
1620 if (((enabled_backends_mask >> i) & 1) == 1) {
1621 if (last_backend_enabled)
1622 force_no_swizzle = true;
1623 last_backend_enabled = true;
1624 } else
1625 last_backend_enabled = false;
1626 }
1627 }
1628
1629 switch (num_tile_pipes) {
1630 case 1:
1631 case 3:
1632 case 5:
1633 case 7:
1634 DRM_ERROR("odd number of pipes!\n");
1635 break;
1636 case 2:
1637 swizzle_pipe[0] = 0;
1638 swizzle_pipe[1] = 1;
1639 break;
1640 case 4:
1641 if (force_no_swizzle) {
1642 swizzle_pipe[0] = 0;
1643 swizzle_pipe[1] = 1;
1644 swizzle_pipe[2] = 2;
1645 swizzle_pipe[3] = 3;
1646 } else {
1647 swizzle_pipe[0] = 0;
1648 swizzle_pipe[1] = 2;
1649 swizzle_pipe[2] = 1;
1650 swizzle_pipe[3] = 3;
1651 }
1652 break;
1653 case 6:
1654 if (force_no_swizzle) {
1655 swizzle_pipe[0] = 0;
1656 swizzle_pipe[1] = 1;
1657 swizzle_pipe[2] = 2;
1658 swizzle_pipe[3] = 3;
1659 swizzle_pipe[4] = 4;
1660 swizzle_pipe[5] = 5;
1661 } else {
1662 swizzle_pipe[0] = 0;
1663 swizzle_pipe[1] = 2;
1664 swizzle_pipe[2] = 4;
1665 swizzle_pipe[3] = 1;
1666 swizzle_pipe[4] = 3;
1667 swizzle_pipe[5] = 5;
1668 }
1669 break;
1670 case 8:
1671 if (force_no_swizzle) {
1672 swizzle_pipe[0] = 0;
1673 swizzle_pipe[1] = 1;
1674 swizzle_pipe[2] = 2;
1675 swizzle_pipe[3] = 3;
1676 swizzle_pipe[4] = 4;
1677 swizzle_pipe[5] = 5;
1678 swizzle_pipe[6] = 6;
1679 swizzle_pipe[7] = 7;
1680 } else {
1681 swizzle_pipe[0] = 0;
1682 swizzle_pipe[1] = 2;
1683 swizzle_pipe[2] = 4;
1684 swizzle_pipe[3] = 6;
1685 swizzle_pipe[4] = 1;
1686 swizzle_pipe[5] = 3;
1687 swizzle_pipe[6] = 5;
1688 swizzle_pipe[7] = 7;
1689 }
1690 break;
1691 }
1692
1693 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1694 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1695 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1696
1697 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
1698
1699 cur_backend = (cur_backend + 1) % EVERGREEN_MAX_BACKENDS;
1700 }
1701
1702 return backend_map;
1703}
1704
1705static void evergreen_gpu_init(struct radeon_device *rdev) 1561static void evergreen_gpu_init(struct radeon_device *rdev)
1706{ 1562{
1707 u32 cc_rb_backend_disable = 0; 1563 u32 gb_addr_config;
1708 u32 cc_gc_shader_pipe_config;
1709 u32 gb_addr_config = 0;
1710 u32 mc_shared_chmap, mc_arb_ramcfg; 1564 u32 mc_shared_chmap, mc_arb_ramcfg;
1711 u32 gb_backend_map;
1712 u32 grbm_gfx_index;
1713 u32 sx_debug_1; 1565 u32 sx_debug_1;
1714 u32 smx_dc_ctl0; 1566 u32 smx_dc_ctl0;
1715 u32 sq_config; 1567 u32 sq_config;
@@ -1724,6 +1576,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1724 u32 sq_stack_resource_mgmt_3; 1576 u32 sq_stack_resource_mgmt_3;
1725 u32 vgt_cache_invalidation; 1577 u32 vgt_cache_invalidation;
1726 u32 hdp_host_path_cntl, tmp; 1578 u32 hdp_host_path_cntl, tmp;
1579 u32 disabled_rb_mask;
1727 int i, j, num_shader_engines, ps_thread_count; 1580 int i, j, num_shader_engines, ps_thread_count;
1728 1581
1729 switch (rdev->family) { 1582 switch (rdev->family) {
@@ -1748,6 +1601,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1748 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1601 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1749 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1602 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1750 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1603 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1604 gb_addr_config = CYPRESS_GB_ADDR_CONFIG_GOLDEN;
1751 break; 1605 break;
1752 case CHIP_JUNIPER: 1606 case CHIP_JUNIPER:
1753 rdev->config.evergreen.num_ses = 1; 1607 rdev->config.evergreen.num_ses = 1;
@@ -1769,6 +1623,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1769 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1623 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1770 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1624 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1771 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1625 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1626 gb_addr_config = JUNIPER_GB_ADDR_CONFIG_GOLDEN;
1772 break; 1627 break;
1773 case CHIP_REDWOOD: 1628 case CHIP_REDWOOD:
1774 rdev->config.evergreen.num_ses = 1; 1629 rdev->config.evergreen.num_ses = 1;
@@ -1790,6 +1645,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1790 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1645 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1791 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1646 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1792 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1647 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1648 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1793 break; 1649 break;
1794 case CHIP_CEDAR: 1650 case CHIP_CEDAR:
1795 default: 1651 default:
@@ -1812,6 +1668,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1812 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1668 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1813 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1669 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1814 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1670 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1671 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
1815 break; 1672 break;
1816 case CHIP_PALM: 1673 case CHIP_PALM:
1817 rdev->config.evergreen.num_ses = 1; 1674 rdev->config.evergreen.num_ses = 1;
@@ -1833,6 +1690,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1833 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1690 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1834 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1691 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1835 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1692 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1693 gb_addr_config = CEDAR_GB_ADDR_CONFIG_GOLDEN;
1836 break; 1694 break;
1837 case CHIP_SUMO: 1695 case CHIP_SUMO:
1838 rdev->config.evergreen.num_ses = 1; 1696 rdev->config.evergreen.num_ses = 1;
@@ -1860,6 +1718,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1860 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1718 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1861 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1719 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1862 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1720 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1721 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1863 break; 1722 break;
1864 case CHIP_SUMO2: 1723 case CHIP_SUMO2:
1865 rdev->config.evergreen.num_ses = 1; 1724 rdev->config.evergreen.num_ses = 1;
@@ -1881,6 +1740,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1881 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1740 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1882 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1741 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1883 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1742 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1743 gb_addr_config = REDWOOD_GB_ADDR_CONFIG_GOLDEN;
1884 break; 1744 break;
1885 case CHIP_BARTS: 1745 case CHIP_BARTS:
1886 rdev->config.evergreen.num_ses = 2; 1746 rdev->config.evergreen.num_ses = 2;
@@ -1902,6 +1762,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1902 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1762 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1903 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1763 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1904 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1764 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1765 gb_addr_config = BARTS_GB_ADDR_CONFIG_GOLDEN;
1905 break; 1766 break;
1906 case CHIP_TURKS: 1767 case CHIP_TURKS:
1907 rdev->config.evergreen.num_ses = 1; 1768 rdev->config.evergreen.num_ses = 1;
@@ -1923,6 +1784,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1923 rdev->config.evergreen.sc_prim_fifo_size = 0x100; 1784 rdev->config.evergreen.sc_prim_fifo_size = 0x100;
1924 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1785 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1925 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1786 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1787 gb_addr_config = TURKS_GB_ADDR_CONFIG_GOLDEN;
1926 break; 1788 break;
1927 case CHIP_CAICOS: 1789 case CHIP_CAICOS:
1928 rdev->config.evergreen.num_ses = 1; 1790 rdev->config.evergreen.num_ses = 1;
@@ -1944,6 +1806,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1944 rdev->config.evergreen.sc_prim_fifo_size = 0x40; 1806 rdev->config.evergreen.sc_prim_fifo_size = 0x40;
1945 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30; 1807 rdev->config.evergreen.sc_hiz_tile_fifo_size = 0x30;
1946 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130; 1808 rdev->config.evergreen.sc_earlyz_tile_fifo_size = 0x130;
1809 gb_addr_config = CAICOS_GB_ADDR_CONFIG_GOLDEN;
1947 break; 1810 break;
1948 } 1811 }
1949 1812
@@ -1960,20 +1823,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1960 1823
1961 evergreen_fix_pci_max_read_req_size(rdev); 1824 evergreen_fix_pci_max_read_req_size(rdev);
1962 1825
1963 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & ~2;
1964
1965 cc_gc_shader_pipe_config |=
1966 INACTIVE_QD_PIPES((EVERGREEN_MAX_PIPES_MASK << rdev->config.evergreen.max_pipes)
1967 & EVERGREEN_MAX_PIPES_MASK);
1968 cc_gc_shader_pipe_config |=
1969 INACTIVE_SIMDS((EVERGREEN_MAX_SIMDS_MASK << rdev->config.evergreen.max_simds)
1970 & EVERGREEN_MAX_SIMDS_MASK);
1971
1972 cc_rb_backend_disable =
1973 BACKEND_DISABLE((EVERGREEN_MAX_BACKENDS_MASK << rdev->config.evergreen.max_backends)
1974 & EVERGREEN_MAX_BACKENDS_MASK);
1975
1976
1977 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1826 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1978 if ((rdev->family == CHIP_PALM) || 1827 if ((rdev->family == CHIP_PALM) ||
1979 (rdev->family == CHIP_SUMO) || 1828 (rdev->family == CHIP_SUMO) ||
@@ -1982,134 +1831,6 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
1982 else 1831 else
1983 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1832 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1984 1833
1985 switch (rdev->config.evergreen.max_tile_pipes) {
1986 case 1:
1987 default:
1988 gb_addr_config |= NUM_PIPES(0);
1989 break;
1990 case 2:
1991 gb_addr_config |= NUM_PIPES(1);
1992 break;
1993 case 4:
1994 gb_addr_config |= NUM_PIPES(2);
1995 break;
1996 case 8:
1997 gb_addr_config |= NUM_PIPES(3);
1998 break;
1999 }
2000
2001 gb_addr_config |= PIPE_INTERLEAVE_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
2002 gb_addr_config |= BANK_INTERLEAVE_SIZE(0);
2003 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.evergreen.num_ses - 1);
2004 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(1);
2005 gb_addr_config |= NUM_GPUS(0); /* Hemlock? */
2006 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
2007
2008 if (((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) > 2)
2009 gb_addr_config |= ROW_SIZE(2);
2010 else
2011 gb_addr_config |= ROW_SIZE((mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT);
2012
2013 if (rdev->ddev->pdev->device == 0x689e) {
2014 u32 efuse_straps_4;
2015 u32 efuse_straps_3;
2016 u8 efuse_box_bit_131_124;
2017
2018 WREG32(RCU_IND_INDEX, 0x204);
2019 efuse_straps_4 = RREG32(RCU_IND_DATA);
2020 WREG32(RCU_IND_INDEX, 0x203);
2021 efuse_straps_3 = RREG32(RCU_IND_DATA);
2022 efuse_box_bit_131_124 = (u8)(((efuse_straps_4 & 0xf) << 4) | ((efuse_straps_3 & 0xf0000000) >> 28));
2023
2024 switch(efuse_box_bit_131_124) {
2025 case 0x00:
2026 gb_backend_map = 0x76543210;
2027 break;
2028 case 0x55:
2029 gb_backend_map = 0x77553311;
2030 break;
2031 case 0x56:
2032 gb_backend_map = 0x77553300;
2033 break;
2034 case 0x59:
2035 gb_backend_map = 0x77552211;
2036 break;
2037 case 0x66:
2038 gb_backend_map = 0x77443300;
2039 break;
2040 case 0x99:
2041 gb_backend_map = 0x66552211;
2042 break;
2043 case 0x5a:
2044 gb_backend_map = 0x77552200;
2045 break;
2046 case 0xaa:
2047 gb_backend_map = 0x66442200;
2048 break;
2049 case 0x95:
2050 gb_backend_map = 0x66553311;
2051 break;
2052 default:
2053 DRM_ERROR("bad backend map, using default\n");
2054 gb_backend_map =
2055 evergreen_get_tile_pipe_to_backend_map(rdev,
2056 rdev->config.evergreen.max_tile_pipes,
2057 rdev->config.evergreen.max_backends,
2058 ((EVERGREEN_MAX_BACKENDS_MASK <<
2059 rdev->config.evergreen.max_backends) &
2060 EVERGREEN_MAX_BACKENDS_MASK));
2061 break;
2062 }
2063 } else if (rdev->ddev->pdev->device == 0x68b9) {
2064 u32 efuse_straps_3;
2065 u8 efuse_box_bit_127_124;
2066
2067 WREG32(RCU_IND_INDEX, 0x203);
2068 efuse_straps_3 = RREG32(RCU_IND_DATA);
2069 efuse_box_bit_127_124 = (u8)((efuse_straps_3 & 0xF0000000) >> 28);
2070
2071 switch(efuse_box_bit_127_124) {
2072 case 0x0:
2073 gb_backend_map = 0x00003210;
2074 break;
2075 case 0x5:
2076 case 0x6:
2077 case 0x9:
2078 case 0xa:
2079 gb_backend_map = 0x00003311;
2080 break;
2081 default:
2082 DRM_ERROR("bad backend map, using default\n");
2083 gb_backend_map =
2084 evergreen_get_tile_pipe_to_backend_map(rdev,
2085 rdev->config.evergreen.max_tile_pipes,
2086 rdev->config.evergreen.max_backends,
2087 ((EVERGREEN_MAX_BACKENDS_MASK <<
2088 rdev->config.evergreen.max_backends) &
2089 EVERGREEN_MAX_BACKENDS_MASK));
2090 break;
2091 }
2092 } else {
2093 switch (rdev->family) {
2094 case CHIP_CYPRESS:
2095 case CHIP_HEMLOCK:
2096 case CHIP_BARTS:
2097 gb_backend_map = 0x66442200;
2098 break;
2099 case CHIP_JUNIPER:
2100 gb_backend_map = 0x00002200;
2101 break;
2102 default:
2103 gb_backend_map =
2104 evergreen_get_tile_pipe_to_backend_map(rdev,
2105 rdev->config.evergreen.max_tile_pipes,
2106 rdev->config.evergreen.max_backends,
2107 ((EVERGREEN_MAX_BACKENDS_MASK <<
2108 rdev->config.evergreen.max_backends) &
2109 EVERGREEN_MAX_BACKENDS_MASK));
2110 }
2111 }
2112
2113 /* setup tiling info dword. gb_addr_config is not adequate since it does 1834 /* setup tiling info dword. gb_addr_config is not adequate since it does
2114 * not have bank info, so create a custom tiling dword. 1835 * not have bank info, so create a custom tiling dword.
2115 * bits 3:0 num_pipes 1836 * bits 3:0 num_pipes
@@ -2136,45 +1857,54 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2136 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 1857 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
2137 if (rdev->flags & RADEON_IS_IGP) 1858 if (rdev->flags & RADEON_IS_IGP)
2138 rdev->config.evergreen.tile_config |= 1 << 4; 1859 rdev->config.evergreen.tile_config |= 1 << 4;
2139 else 1860 else {
2140 rdev->config.evergreen.tile_config |= 1861 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
2141 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 1862 rdev->config.evergreen.tile_config |= 1 << 4;
2142 rdev->config.evergreen.tile_config |= 1863 else
2143 ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) << 8; 1864 rdev->config.evergreen.tile_config |= 0 << 4;
1865 }
1866 rdev->config.evergreen.tile_config |= 0 << 8;
2144 rdev->config.evergreen.tile_config |= 1867 rdev->config.evergreen.tile_config |=
2145 ((gb_addr_config & 0x30000000) >> 28) << 12; 1868 ((gb_addr_config & 0x30000000) >> 28) << 12;
2146 1869
2147 rdev->config.evergreen.backend_map = gb_backend_map; 1870 num_shader_engines = (gb_addr_config & NUM_SHADER_ENGINES(3) >> 12) + 1;
2148 WREG32(GB_BACKEND_MAP, gb_backend_map);
2149 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2150 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2151 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2152 1871
2153 num_shader_engines = ((RREG32(GB_ADDR_CONFIG) & NUM_SHADER_ENGINES(3)) >> 12) + 1; 1872 if ((rdev->family >= CHIP_CEDAR) && (rdev->family <= CHIP_HEMLOCK)) {
2154 grbm_gfx_index = INSTANCE_BROADCAST_WRITES; 1873 u32 efuse_straps_4;
2155 1874 u32 efuse_straps_3;
2156 for (i = 0; i < rdev->config.evergreen.num_ses; i++) {
2157 u32 rb = cc_rb_backend_disable | (0xf0 << 16);
2158 u32 sp = cc_gc_shader_pipe_config;
2159 u32 gfx = grbm_gfx_index | SE_INDEX(i);
2160 1875
2161 if (i == num_shader_engines) { 1876 WREG32(RCU_IND_INDEX, 0x204);
2162 rb |= BACKEND_DISABLE(EVERGREEN_MAX_BACKENDS_MASK); 1877 efuse_straps_4 = RREG32(RCU_IND_DATA);
2163 sp |= INACTIVE_SIMDS(EVERGREEN_MAX_SIMDS_MASK); 1878 WREG32(RCU_IND_INDEX, 0x203);
1879 efuse_straps_3 = RREG32(RCU_IND_DATA);
1880 tmp = (((efuse_straps_4 & 0xf) << 4) |
1881 ((efuse_straps_3 & 0xf0000000) >> 28));
1882 } else {
1883 tmp = 0;
1884 for (i = (rdev->config.evergreen.num_ses - 1); i >= 0; i--) {
1885 u32 rb_disable_bitmap;
1886
1887 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1888 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
1889 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
1890 tmp <<= 4;
1891 tmp |= rb_disable_bitmap;
2164 } 1892 }
1893 }
1894 /* enabled rb are just the one not disabled :) */
1895 disabled_rb_mask = tmp;
2165 1896
2166 WREG32(GRBM_GFX_INDEX, gfx); 1897 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2167 WREG32(RLC_GFX_INDEX, gfx); 1898 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
2168 1899
2169 WREG32(CC_RB_BACKEND_DISABLE, rb); 1900 WREG32(GB_ADDR_CONFIG, gb_addr_config);
2170 WREG32(CC_SYS_RB_BACKEND_DISABLE, rb); 1901 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
2171 WREG32(GC_USER_RB_BACKEND_DISABLE, rb); 1902 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
2172 WREG32(CC_GC_SHADER_PIPE_CONFIG, sp);
2173 }
2174 1903
2175 grbm_gfx_index |= SE_BROADCAST_WRITES; 1904 tmp = gb_addr_config & NUM_PIPES_MASK;
2176 WREG32(GRBM_GFX_INDEX, grbm_gfx_index); 1905 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.evergreen.max_backends,
2177 WREG32(RLC_GFX_INDEX, grbm_gfx_index); 1906 EVERGREEN_MAX_BACKENDS, disabled_rb_mask);
1907 WREG32(GB_BACKEND_MAP, tmp);
2178 1908
2179 WREG32(CGTS_SYS_TCC_DISABLE, 0); 1909 WREG32(CGTS_SYS_TCC_DISABLE, 0);
2180 WREG32(CGTS_TCC_DISABLE, 0); 1910 WREG32(CGTS_TCC_DISABLE, 0);
@@ -2202,6 +1932,9 @@ static void evergreen_gpu_init(struct radeon_device *rdev)
2202 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets); 1932 smx_dc_ctl0 |= NUMBER_OF_SETS(rdev->config.evergreen.sx_num_of_sets);
2203 WREG32(SMX_DC_CTL0, smx_dc_ctl0); 1933 WREG32(SMX_DC_CTL0, smx_dc_ctl0);
2204 1934
1935 if (rdev->family <= CHIP_SUMO2)
1936 WREG32(SMX_SAR_CTL0, 0x00010000);
1937
2205 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) | 1938 WREG32(SX_EXPORT_BUFFER_SIZES, (COLOR_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_size / 4) - 1) |
2206 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) | 1939 POSITION_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_pos_size / 4) - 1) |
2207 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1))); 1940 SMX_BUFFER_SIZE((rdev->config.evergreen.sx_max_export_smx_size / 4) - 1)));
diff --git a/drivers/gpu/drm/radeon/evergreen_cs.c b/drivers/gpu/drm/radeon/evergreen_cs.c
index 4e7dd2b4843d..c16554122ccd 100644
--- a/drivers/gpu/drm/radeon/evergreen_cs.c
+++ b/drivers/gpu/drm/radeon/evergreen_cs.c
@@ -52,6 +52,7 @@ struct evergreen_cs_track {
52 u32 cb_color_view[12]; 52 u32 cb_color_view[12];
53 u32 cb_color_pitch[12]; 53 u32 cb_color_pitch[12];
54 u32 cb_color_slice[12]; 54 u32 cb_color_slice[12];
55 u32 cb_color_slice_idx[12];
55 u32 cb_color_attrib[12]; 56 u32 cb_color_attrib[12];
56 u32 cb_color_cmask_slice[8];/* unused */ 57 u32 cb_color_cmask_slice[8];/* unused */
57 u32 cb_color_fmask_slice[8];/* unused */ 58 u32 cb_color_fmask_slice[8];/* unused */
@@ -127,12 +128,14 @@ static void evergreen_cs_track_init(struct evergreen_cs_track *track)
127 track->cb_color_info[i] = 0; 128 track->cb_color_info[i] = 0;
128 track->cb_color_view[i] = 0xFFFFFFFF; 129 track->cb_color_view[i] = 0xFFFFFFFF;
129 track->cb_color_pitch[i] = 0; 130 track->cb_color_pitch[i] = 0;
130 track->cb_color_slice[i] = 0; 131 track->cb_color_slice[i] = 0xfffffff;
132 track->cb_color_slice_idx[i] = 0;
131 } 133 }
132 track->cb_target_mask = 0xFFFFFFFF; 134 track->cb_target_mask = 0xFFFFFFFF;
133 track->cb_shader_mask = 0xFFFFFFFF; 135 track->cb_shader_mask = 0xFFFFFFFF;
134 track->cb_dirty = true; 136 track->cb_dirty = true;
135 137
138 track->db_depth_slice = 0xffffffff;
136 track->db_depth_view = 0xFFFFC000; 139 track->db_depth_view = 0xFFFFC000;
137 track->db_depth_size = 0xFFFFFFFF; 140 track->db_depth_size = 0xFFFFFFFF;
138 track->db_depth_control = 0xFFFFFFFF; 141 track->db_depth_control = 0xFFFFFFFF;
@@ -250,10 +253,9 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
250{ 253{
251 struct evergreen_cs_track *track = p->track; 254 struct evergreen_cs_track *track = p->track;
252 unsigned palign, halign, tileb, slice_pt; 255 unsigned palign, halign, tileb, slice_pt;
256 unsigned mtile_pr, mtile_ps, mtileb;
253 257
254 tileb = 64 * surf->bpe * surf->nsamples; 258 tileb = 64 * surf->bpe * surf->nsamples;
255 palign = track->group_size / (8 * surf->bpe * surf->nsamples);
256 palign = MAX(8, palign);
257 slice_pt = 1; 259 slice_pt = 1;
258 if (tileb > surf->tsplit) { 260 if (tileb > surf->tsplit) {
259 slice_pt = tileb / surf->tsplit; 261 slice_pt = tileb / surf->tsplit;
@@ -262,7 +264,10 @@ static int evergreen_surface_check_2d(struct radeon_cs_parser *p,
262 /* macro tile width & height */ 264 /* macro tile width & height */
263 palign = (8 * surf->bankw * track->npipes) * surf->mtilea; 265 palign = (8 * surf->bankw * track->npipes) * surf->mtilea;
264 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea; 266 halign = (8 * surf->bankh * surf->nbanks) / surf->mtilea;
265 surf->layer_size = surf->nbx * surf->nby * surf->bpe * slice_pt; 267 mtileb = (palign / 8) * (halign / 8) * tileb;;
268 mtile_pr = surf->nbx / palign;
269 mtile_ps = (mtile_pr * surf->nby) / halign;
270 surf->layer_size = mtile_ps * mtileb * slice_pt;
266 surf->base_align = (palign / 8) * (halign / 8) * tileb; 271 surf->base_align = (palign / 8) * (halign / 8) * tileb;
267 surf->palign = palign; 272 surf->palign = palign;
268 surf->halign = halign; 273 surf->halign = halign;
@@ -434,6 +439,39 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
434 439
435 offset += surf.layer_size * mslice; 440 offset += surf.layer_size * mslice;
436 if (offset > radeon_bo_size(track->cb_color_bo[id])) { 441 if (offset > radeon_bo_size(track->cb_color_bo[id])) {
442 /* old ddx are broken they allocate bo with w*h*bpp but
443 * program slice with ALIGN(h, 8), catch this and patch
444 * command stream.
445 */
446 if (!surf.mode) {
447 volatile u32 *ib = p->ib.ptr;
448 unsigned long tmp, nby, bsize, size, min = 0;
449
450 /* find the height the ddx wants */
451 if (surf.nby > 8) {
452 min = surf.nby - 8;
453 }
454 bsize = radeon_bo_size(track->cb_color_bo[id]);
455 tmp = track->cb_color_bo_offset[id] << 8;
456 for (nby = surf.nby; nby > min; nby--) {
457 size = nby * surf.nbx * surf.bpe * surf.nsamples;
458 if ((tmp + size * mslice) <= bsize) {
459 break;
460 }
461 }
462 if (nby > min) {
463 surf.nby = nby;
464 slice = ((nby * surf.nbx) / 64) - 1;
465 if (!evergreen_surface_check(p, &surf, "cb")) {
466 /* check if this one works */
467 tmp += surf.layer_size * mslice;
468 if (tmp <= bsize) {
469 ib[track->cb_color_slice_idx[id]] = slice;
470 goto old_ddx_ok;
471 }
472 }
473 }
474 }
437 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, " 475 dev_warn(p->dev, "%s:%d cb[%d] bo too small (layer size %d, "
438 "offset %d, max layer %d, bo size %ld, slice %d)\n", 476 "offset %d, max layer %d, bo size %ld, slice %d)\n",
439 __func__, __LINE__, id, surf.layer_size, 477 __func__, __LINE__, id, surf.layer_size,
@@ -446,6 +484,7 @@ static int evergreen_cs_track_validate_cb(struct radeon_cs_parser *p, unsigned i
446 surf.tsplit, surf.mtilea); 484 surf.tsplit, surf.mtilea);
447 return -EINVAL; 485 return -EINVAL;
448 } 486 }
487old_ddx_ok:
449 488
450 return 0; 489 return 0;
451} 490}
@@ -1532,6 +1571,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1532 case CB_COLOR7_SLICE: 1571 case CB_COLOR7_SLICE:
1533 tmp = (reg - CB_COLOR0_SLICE) / 0x3c; 1572 tmp = (reg - CB_COLOR0_SLICE) / 0x3c;
1534 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); 1573 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1574 track->cb_color_slice_idx[tmp] = idx;
1535 track->cb_dirty = true; 1575 track->cb_dirty = true;
1536 break; 1576 break;
1537 case CB_COLOR8_SLICE: 1577 case CB_COLOR8_SLICE:
@@ -1540,6 +1580,7 @@ static int evergreen_cs_check_reg(struct radeon_cs_parser *p, u32 reg, u32 idx)
1540 case CB_COLOR11_SLICE: 1580 case CB_COLOR11_SLICE:
1541 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8; 1581 tmp = ((reg - CB_COLOR8_SLICE) / 0x1c) + 8;
1542 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx); 1582 track->cb_color_slice[tmp] = radeon_get_ib_value(p, idx);
1583 track->cb_color_slice_idx[tmp] = idx;
1543 track->cb_dirty = true; 1584 track->cb_dirty = true;
1544 break; 1585 break;
1545 case CB_COLOR0_ATTRIB: 1586 case CB_COLOR0_ATTRIB:
diff --git a/drivers/gpu/drm/radeon/evergreen_hdmi.c b/drivers/gpu/drm/radeon/evergreen_hdmi.c
index a51f880985f8..65c54160028b 100644
--- a/drivers/gpu/drm/radeon/evergreen_hdmi.c
+++ b/drivers/gpu/drm/radeon/evergreen_hdmi.c
@@ -156,9 +156,6 @@ void evergreen_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode
156 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 156 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
157 uint32_t offset; 157 uint32_t offset;
158 158
159 if (ASIC_IS_DCE5(rdev))
160 return;
161
162 /* Silent, r600_hdmi_enable will raise WARN for us */ 159 /* Silent, r600_hdmi_enable will raise WARN for us */
163 if (!dig->afmt->enabled) 160 if (!dig->afmt->enabled)
164 return; 161 return;
diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h
index 79130bfd1d6f..b50b15c70498 100644
--- a/drivers/gpu/drm/radeon/evergreend.h
+++ b/drivers/gpu/drm/radeon/evergreend.h
@@ -37,6 +37,15 @@
37#define EVERGREEN_MAX_PIPES_MASK 0xFF 37#define EVERGREEN_MAX_PIPES_MASK 0xFF
38#define EVERGREEN_MAX_LDS_NUM 0xFFFF 38#define EVERGREEN_MAX_LDS_NUM 0xFFFF
39 39
40#define CYPRESS_GB_ADDR_CONFIG_GOLDEN 0x02011003
41#define BARTS_GB_ADDR_CONFIG_GOLDEN 0x02011003
42#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
43#define JUNIPER_GB_ADDR_CONFIG_GOLDEN 0x02010002
44#define REDWOOD_GB_ADDR_CONFIG_GOLDEN 0x02010002
45#define TURKS_GB_ADDR_CONFIG_GOLDEN 0x02010002
46#define CEDAR_GB_ADDR_CONFIG_GOLDEN 0x02010001
47#define CAICOS_GB_ADDR_CONFIG_GOLDEN 0x02010001
48
40/* Registers */ 49/* Registers */
41 50
42#define RCU_IND_INDEX 0x100 51#define RCU_IND_INDEX 0x100
@@ -54,6 +63,7 @@
54#define BACKEND_DISABLE(x) ((x) << 16) 63#define BACKEND_DISABLE(x) ((x) << 16)
55#define GB_ADDR_CONFIG 0x98F8 64#define GB_ADDR_CONFIG 0x98F8
56#define NUM_PIPES(x) ((x) << 0) 65#define NUM_PIPES(x) ((x) << 0)
66#define NUM_PIPES_MASK 0x0000000f
57#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4) 67#define PIPE_INTERLEAVE_SIZE(x) ((x) << 4)
58#define BANK_INTERLEAVE_SIZE(x) ((x) << 8) 68#define BANK_INTERLEAVE_SIZE(x) ((x) << 8)
59#define NUM_SHADER_ENGINES(x) ((x) << 12) 69#define NUM_SHADER_ENGINES(x) ((x) << 12)
@@ -452,6 +462,7 @@
452#define MC_VM_MD_L1_TLB0_CNTL 0x2654 462#define MC_VM_MD_L1_TLB0_CNTL 0x2654
453#define MC_VM_MD_L1_TLB1_CNTL 0x2658 463#define MC_VM_MD_L1_TLB1_CNTL 0x2658
454#define MC_VM_MD_L1_TLB2_CNTL 0x265C 464#define MC_VM_MD_L1_TLB2_CNTL 0x265C
465#define MC_VM_MD_L1_TLB3_CNTL 0x2698
455 466
456#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C 467#define FUS_MC_VM_MD_L1_TLB0_CNTL 0x265C
457#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660 468#define FUS_MC_VM_MD_L1_TLB1_CNTL 0x2660
@@ -492,6 +503,7 @@
492#define SCRATCH_UMSK 0x8540 503#define SCRATCH_UMSK 0x8540
493#define SCRATCH_ADDR 0x8544 504#define SCRATCH_ADDR 0x8544
494 505
506#define SMX_SAR_CTL0 0xA008
495#define SMX_DC_CTL0 0xA020 507#define SMX_DC_CTL0 0xA020
496#define USE_HASH_FUNCTION (1 << 0) 508#define USE_HASH_FUNCTION (1 << 0)
497#define NUMBER_OF_SETS(x) ((x) << 1) 509#define NUMBER_OF_SETS(x) ((x) << 1)
diff --git a/drivers/gpu/drm/radeon/ni.c b/drivers/gpu/drm/radeon/ni.c
index ce4e7cc6c905..b7bf18e40215 100644
--- a/drivers/gpu/drm/radeon/ni.c
+++ b/drivers/gpu/drm/radeon/ni.c
@@ -417,215 +417,17 @@ out:
417/* 417/*
418 * Core functions 418 * Core functions
419 */ 419 */
420static u32 cayman_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
421 u32 num_tile_pipes,
422 u32 num_backends_per_asic,
423 u32 *backend_disable_mask_per_asic,
424 u32 num_shader_engines)
425{
426 u32 backend_map = 0;
427 u32 enabled_backends_mask = 0;
428 u32 enabled_backends_count = 0;
429 u32 num_backends_per_se;
430 u32 cur_pipe;
431 u32 swizzle_pipe[CAYMAN_MAX_PIPES];
432 u32 cur_backend = 0;
433 u32 i;
434 bool force_no_swizzle;
435
436 /* force legal values */
437 if (num_tile_pipes < 1)
438 num_tile_pipes = 1;
439 if (num_tile_pipes > rdev->config.cayman.max_tile_pipes)
440 num_tile_pipes = rdev->config.cayman.max_tile_pipes;
441 if (num_shader_engines < 1)
442 num_shader_engines = 1;
443 if (num_shader_engines > rdev->config.cayman.max_shader_engines)
444 num_shader_engines = rdev->config.cayman.max_shader_engines;
445 if (num_backends_per_asic < num_shader_engines)
446 num_backends_per_asic = num_shader_engines;
447 if (num_backends_per_asic > (rdev->config.cayman.max_backends_per_se * num_shader_engines))
448 num_backends_per_asic = rdev->config.cayman.max_backends_per_se * num_shader_engines;
449
450 /* make sure we have the same number of backends per se */
451 num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
452 /* set up the number of backends per se */
453 num_backends_per_se = num_backends_per_asic / num_shader_engines;
454 if (num_backends_per_se > rdev->config.cayman.max_backends_per_se) {
455 num_backends_per_se = rdev->config.cayman.max_backends_per_se;
456 num_backends_per_asic = num_backends_per_se * num_shader_engines;
457 }
458
459 /* create enable mask and count for enabled backends */
460 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
461 if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
462 enabled_backends_mask |= (1 << i);
463 ++enabled_backends_count;
464 }
465 if (enabled_backends_count == num_backends_per_asic)
466 break;
467 }
468
469 /* force the backends mask to match the current number of backends */
470 if (enabled_backends_count != num_backends_per_asic) {
471 u32 this_backend_enabled;
472 u32 shader_engine;
473 u32 backend_per_se;
474
475 enabled_backends_mask = 0;
476 enabled_backends_count = 0;
477 *backend_disable_mask_per_asic = CAYMAN_MAX_BACKENDS_MASK;
478 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
479 /* calc the current se */
480 shader_engine = i / rdev->config.cayman.max_backends_per_se;
481 /* calc the backend per se */
482 backend_per_se = i % rdev->config.cayman.max_backends_per_se;
483 /* default to not enabled */
484 this_backend_enabled = 0;
485 if ((shader_engine < num_shader_engines) &&
486 (backend_per_se < num_backends_per_se))
487 this_backend_enabled = 1;
488 if (this_backend_enabled) {
489 enabled_backends_mask |= (1 << i);
490 *backend_disable_mask_per_asic &= ~(1 << i);
491 ++enabled_backends_count;
492 }
493 }
494 }
495
496
497 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * CAYMAN_MAX_PIPES);
498 switch (rdev->family) {
499 case CHIP_CAYMAN:
500 case CHIP_ARUBA:
501 force_no_swizzle = true;
502 break;
503 default:
504 force_no_swizzle = false;
505 break;
506 }
507 if (force_no_swizzle) {
508 bool last_backend_enabled = false;
509
510 force_no_swizzle = false;
511 for (i = 0; i < CAYMAN_MAX_BACKENDS; ++i) {
512 if (((enabled_backends_mask >> i) & 1) == 1) {
513 if (last_backend_enabled)
514 force_no_swizzle = true;
515 last_backend_enabled = true;
516 } else
517 last_backend_enabled = false;
518 }
519 }
520
521 switch (num_tile_pipes) {
522 case 1:
523 case 3:
524 case 5:
525 case 7:
526 DRM_ERROR("odd number of pipes!\n");
527 break;
528 case 2:
529 swizzle_pipe[0] = 0;
530 swizzle_pipe[1] = 1;
531 break;
532 case 4:
533 if (force_no_swizzle) {
534 swizzle_pipe[0] = 0;
535 swizzle_pipe[1] = 1;
536 swizzle_pipe[2] = 2;
537 swizzle_pipe[3] = 3;
538 } else {
539 swizzle_pipe[0] = 0;
540 swizzle_pipe[1] = 2;
541 swizzle_pipe[2] = 1;
542 swizzle_pipe[3] = 3;
543 }
544 break;
545 case 6:
546 if (force_no_swizzle) {
547 swizzle_pipe[0] = 0;
548 swizzle_pipe[1] = 1;
549 swizzle_pipe[2] = 2;
550 swizzle_pipe[3] = 3;
551 swizzle_pipe[4] = 4;
552 swizzle_pipe[5] = 5;
553 } else {
554 swizzle_pipe[0] = 0;
555 swizzle_pipe[1] = 2;
556 swizzle_pipe[2] = 4;
557 swizzle_pipe[3] = 1;
558 swizzle_pipe[4] = 3;
559 swizzle_pipe[5] = 5;
560 }
561 break;
562 case 8:
563 if (force_no_swizzle) {
564 swizzle_pipe[0] = 0;
565 swizzle_pipe[1] = 1;
566 swizzle_pipe[2] = 2;
567 swizzle_pipe[3] = 3;
568 swizzle_pipe[4] = 4;
569 swizzle_pipe[5] = 5;
570 swizzle_pipe[6] = 6;
571 swizzle_pipe[7] = 7;
572 } else {
573 swizzle_pipe[0] = 0;
574 swizzle_pipe[1] = 2;
575 swizzle_pipe[2] = 4;
576 swizzle_pipe[3] = 6;
577 swizzle_pipe[4] = 1;
578 swizzle_pipe[5] = 3;
579 swizzle_pipe[6] = 5;
580 swizzle_pipe[7] = 7;
581 }
582 break;
583 }
584
585 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
586 while (((1 << cur_backend) & enabled_backends_mask) == 0)
587 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
588
589 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
590
591 cur_backend = (cur_backend + 1) % CAYMAN_MAX_BACKENDS;
592 }
593
594 return backend_map;
595}
596
597static u32 cayman_get_disable_mask_per_asic(struct radeon_device *rdev,
598 u32 disable_mask_per_se,
599 u32 max_disable_mask_per_se,
600 u32 num_shader_engines)
601{
602 u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
603 u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
604
605 if (num_shader_engines == 1)
606 return disable_mask_per_asic;
607 else if (num_shader_engines == 2)
608 return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
609 else
610 return 0xffffffff;
611}
612
613static void cayman_gpu_init(struct radeon_device *rdev) 420static void cayman_gpu_init(struct radeon_device *rdev)
614{ 421{
615 u32 cc_rb_backend_disable = 0;
616 u32 cc_gc_shader_pipe_config;
617 u32 gb_addr_config = 0; 422 u32 gb_addr_config = 0;
618 u32 mc_shared_chmap, mc_arb_ramcfg; 423 u32 mc_shared_chmap, mc_arb_ramcfg;
619 u32 gb_backend_map;
620 u32 cgts_tcc_disable; 424 u32 cgts_tcc_disable;
621 u32 sx_debug_1; 425 u32 sx_debug_1;
622 u32 smx_dc_ctl0; 426 u32 smx_dc_ctl0;
623 u32 gc_user_shader_pipe_config;
624 u32 gc_user_rb_backend_disable;
625 u32 cgts_user_tcc_disable;
626 u32 cgts_sm_ctrl_reg; 427 u32 cgts_sm_ctrl_reg;
627 u32 hdp_host_path_cntl; 428 u32 hdp_host_path_cntl;
628 u32 tmp; 429 u32 tmp;
430 u32 disabled_rb_mask;
629 int i, j; 431 int i, j;
630 432
631 switch (rdev->family) { 433 switch (rdev->family) {
@@ -650,6 +452,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
650 rdev->config.cayman.sc_prim_fifo_size = 0x100; 452 rdev->config.cayman.sc_prim_fifo_size = 0x100;
651 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 453 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
652 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 454 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
455 gb_addr_config = CAYMAN_GB_ADDR_CONFIG_GOLDEN;
653 break; 456 break;
654 case CHIP_ARUBA: 457 case CHIP_ARUBA:
655 default: 458 default:
@@ -657,15 +460,28 @@ static void cayman_gpu_init(struct radeon_device *rdev)
657 rdev->config.cayman.max_pipes_per_simd = 4; 460 rdev->config.cayman.max_pipes_per_simd = 4;
658 rdev->config.cayman.max_tile_pipes = 2; 461 rdev->config.cayman.max_tile_pipes = 2;
659 if ((rdev->pdev->device == 0x9900) || 462 if ((rdev->pdev->device == 0x9900) ||
660 (rdev->pdev->device == 0x9901)) { 463 (rdev->pdev->device == 0x9901) ||
464 (rdev->pdev->device == 0x9905) ||
465 (rdev->pdev->device == 0x9906) ||
466 (rdev->pdev->device == 0x9907) ||
467 (rdev->pdev->device == 0x9908) ||
468 (rdev->pdev->device == 0x9909) ||
469 (rdev->pdev->device == 0x9910) ||
470 (rdev->pdev->device == 0x9917)) {
661 rdev->config.cayman.max_simds_per_se = 6; 471 rdev->config.cayman.max_simds_per_se = 6;
662 rdev->config.cayman.max_backends_per_se = 2; 472 rdev->config.cayman.max_backends_per_se = 2;
663 } else if ((rdev->pdev->device == 0x9903) || 473 } else if ((rdev->pdev->device == 0x9903) ||
664 (rdev->pdev->device == 0x9904)) { 474 (rdev->pdev->device == 0x9904) ||
475 (rdev->pdev->device == 0x990A) ||
476 (rdev->pdev->device == 0x9913) ||
477 (rdev->pdev->device == 0x9918)) {
665 rdev->config.cayman.max_simds_per_se = 4; 478 rdev->config.cayman.max_simds_per_se = 4;
666 rdev->config.cayman.max_backends_per_se = 2; 479 rdev->config.cayman.max_backends_per_se = 2;
667 } else if ((rdev->pdev->device == 0x9990) || 480 } else if ((rdev->pdev->device == 0x9919) ||
668 (rdev->pdev->device == 0x9991)) { 481 (rdev->pdev->device == 0x9990) ||
482 (rdev->pdev->device == 0x9991) ||
483 (rdev->pdev->device == 0x9994) ||
484 (rdev->pdev->device == 0x99A0)) {
669 rdev->config.cayman.max_simds_per_se = 3; 485 rdev->config.cayman.max_simds_per_se = 3;
670 rdev->config.cayman.max_backends_per_se = 1; 486 rdev->config.cayman.max_backends_per_se = 1;
671 } else { 487 } else {
@@ -687,6 +503,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
687 rdev->config.cayman.sc_prim_fifo_size = 0x40; 503 rdev->config.cayman.sc_prim_fifo_size = 0x40;
688 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30; 504 rdev->config.cayman.sc_hiz_tile_fifo_size = 0x30;
689 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130; 505 rdev->config.cayman.sc_earlyz_tile_fifo_size = 0x130;
506 gb_addr_config = ARUBA_GB_ADDR_CONFIG_GOLDEN;
690 break; 507 break;
691 } 508 }
692 509
@@ -706,39 +523,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
706 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 523 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
707 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 524 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
708 525
709 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
710 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
711 cgts_tcc_disable = 0xffff0000;
712 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
713 cgts_tcc_disable &= ~(1 << (16 + i));
714 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
715 gc_user_shader_pipe_config = RREG32(GC_USER_SHADER_PIPE_CONFIG);
716 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
717
718 rdev->config.cayman.num_shader_engines = rdev->config.cayman.max_shader_engines;
719 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
720 rdev->config.cayman.num_shader_pipes_per_simd = r600_count_pipe_bits(tmp);
721 rdev->config.cayman.num_tile_pipes = rdev->config.cayman.max_tile_pipes;
722 tmp = ((~gc_user_shader_pipe_config) & INACTIVE_SIMDS_MASK) >> INACTIVE_SIMDS_SHIFT;
723 rdev->config.cayman.num_simds_per_se = r600_count_pipe_bits(tmp);
724 tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
725 rdev->config.cayman.num_backends_per_se = r600_count_pipe_bits(tmp);
726 tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
727 rdev->config.cayman.backend_disable_mask_per_asic =
728 cayman_get_disable_mask_per_asic(rdev, tmp, CAYMAN_MAX_BACKENDS_PER_SE_MASK,
729 rdev->config.cayman.num_shader_engines);
730 rdev->config.cayman.backend_map =
731 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
732 rdev->config.cayman.num_backends_per_se *
733 rdev->config.cayman.num_shader_engines,
734 &rdev->config.cayman.backend_disable_mask_per_asic,
735 rdev->config.cayman.num_shader_engines);
736 tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
737 rdev->config.cayman.num_texture_channel_caches = r600_count_pipe_bits(tmp);
738 tmp = (mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT;
739 rdev->config.cayman.mem_max_burst_length_bytes = (tmp + 1) * 256;
740 if (rdev->config.cayman.mem_max_burst_length_bytes > 512)
741 rdev->config.cayman.mem_max_burst_length_bytes = 512;
742 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 526 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
743 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 527 rdev->config.cayman.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
744 if (rdev->config.cayman.mem_row_size_in_kb > 4) 528 if (rdev->config.cayman.mem_row_size_in_kb > 4)
@@ -748,73 +532,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
748 rdev->config.cayman.num_gpus = 1; 532 rdev->config.cayman.num_gpus = 1;
749 rdev->config.cayman.multi_gpu_tile_size = 64; 533 rdev->config.cayman.multi_gpu_tile_size = 64;
750 534
751 //gb_addr_config = 0x02011003
752#if 0
753 gb_addr_config = RREG32(GB_ADDR_CONFIG);
754#else
755 gb_addr_config = 0;
756 switch (rdev->config.cayman.num_tile_pipes) {
757 case 1:
758 default:
759 gb_addr_config |= NUM_PIPES(0);
760 break;
761 case 2:
762 gb_addr_config |= NUM_PIPES(1);
763 break;
764 case 4:
765 gb_addr_config |= NUM_PIPES(2);
766 break;
767 case 8:
768 gb_addr_config |= NUM_PIPES(3);
769 break;
770 }
771
772 tmp = (rdev->config.cayman.mem_max_burst_length_bytes / 256) - 1;
773 gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
774 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.cayman.num_shader_engines - 1);
775 tmp = (rdev->config.cayman.shader_engine_tile_size / 16) - 1;
776 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
777 switch (rdev->config.cayman.num_gpus) {
778 case 1:
779 default:
780 gb_addr_config |= NUM_GPUS(0);
781 break;
782 case 2:
783 gb_addr_config |= NUM_GPUS(1);
784 break;
785 case 4:
786 gb_addr_config |= NUM_GPUS(2);
787 break;
788 }
789 switch (rdev->config.cayman.multi_gpu_tile_size) {
790 case 16:
791 gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
792 break;
793 case 32:
794 default:
795 gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
796 break;
797 case 64:
798 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
799 break;
800 case 128:
801 gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
802 break;
803 }
804 switch (rdev->config.cayman.mem_row_size_in_kb) {
805 case 1:
806 default:
807 gb_addr_config |= ROW_SIZE(0);
808 break;
809 case 2:
810 gb_addr_config |= ROW_SIZE(1);
811 break;
812 case 4:
813 gb_addr_config |= ROW_SIZE(2);
814 break;
815 }
816#endif
817
818 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT; 535 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
819 rdev->config.cayman.num_tile_pipes = (1 << tmp); 536 rdev->config.cayman.num_tile_pipes = (1 << tmp);
820 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT; 537 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
@@ -828,17 +545,7 @@ static void cayman_gpu_init(struct radeon_device *rdev)
828 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT; 545 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
829 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp; 546 rdev->config.cayman.mem_row_size_in_kb = 1 << tmp;
830 547
831 //gb_backend_map = 0x76541032; 548
832#if 0
833 gb_backend_map = RREG32(GB_BACKEND_MAP);
834#else
835 gb_backend_map =
836 cayman_get_tile_pipe_to_backend_map(rdev, rdev->config.cayman.num_tile_pipes,
837 rdev->config.cayman.num_backends_per_se *
838 rdev->config.cayman.num_shader_engines,
839 &rdev->config.cayman.backend_disable_mask_per_asic,
840 rdev->config.cayman.num_shader_engines);
841#endif
842 /* setup tiling info dword. gb_addr_config is not adequate since it does 549 /* setup tiling info dword. gb_addr_config is not adequate since it does
843 * not have bank info, so create a custom tiling dword. 550 * not have bank info, so create a custom tiling dword.
844 * bits 3:0 num_pipes 551 * bits 3:0 num_pipes
@@ -866,33 +573,49 @@ static void cayman_gpu_init(struct radeon_device *rdev)
866 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */ 573 /* num banks is 8 on all fusion asics. 0 = 4, 1 = 8, 2 = 16 */
867 if (rdev->flags & RADEON_IS_IGP) 574 if (rdev->flags & RADEON_IS_IGP)
868 rdev->config.cayman.tile_config |= 1 << 4; 575 rdev->config.cayman.tile_config |= 1 << 4;
869 else 576 else {
870 rdev->config.cayman.tile_config |= 577 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
871 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 578 rdev->config.cayman.tile_config |= 1 << 4;
579 else
580 rdev->config.cayman.tile_config |= 0 << 4;
581 }
872 rdev->config.cayman.tile_config |= 582 rdev->config.cayman.tile_config |=
873 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 583 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
874 rdev->config.cayman.tile_config |= 584 rdev->config.cayman.tile_config |=
875 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 585 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
876 586
877 rdev->config.cayman.backend_map = gb_backend_map; 587 tmp = 0;
878 WREG32(GB_BACKEND_MAP, gb_backend_map); 588 for (i = (rdev->config.cayman.max_shader_engines - 1); i >= 0; i--) {
589 u32 rb_disable_bitmap;
590
591 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
592 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_INDEX(i));
593 rb_disable_bitmap = (RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000) >> 16;
594 tmp <<= 4;
595 tmp |= rb_disable_bitmap;
596 }
597 /* enabled rb are just the one not disabled :) */
598 disabled_rb_mask = tmp;
599
600 WREG32(GRBM_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
601 WREG32(RLC_GFX_INDEX, INSTANCE_BROADCAST_WRITES | SE_BROADCAST_WRITES);
602
879 WREG32(GB_ADDR_CONFIG, gb_addr_config); 603 WREG32(GB_ADDR_CONFIG, gb_addr_config);
880 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 604 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
881 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 605 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
882 606
883 /* primary versions */ 607 tmp = gb_addr_config & NUM_PIPES_MASK;
884 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable); 608 tmp = r6xx_remap_render_backend(rdev, tmp,
885 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 609 rdev->config.cayman.max_backends_per_se *
886 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config); 610 rdev->config.cayman.max_shader_engines,
611 CAYMAN_MAX_BACKENDS, disabled_rb_mask);
612 WREG32(GB_BACKEND_MAP, tmp);
887 613
614 cgts_tcc_disable = 0xffff0000;
615 for (i = 0; i < rdev->config.cayman.max_texture_channel_caches; i++)
616 cgts_tcc_disable &= ~(1 << (16 + i));
888 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable); 617 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
889 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable); 618 WREG32(CGTS_SYS_TCC_DISABLE, cgts_tcc_disable);
890
891 /* user versions */
892 WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable);
893 WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
894 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
895
896 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable); 619 WREG32(CGTS_USER_SYS_TCC_DISABLE, cgts_tcc_disable);
897 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 620 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable);
898 621
@@ -1580,6 +1303,10 @@ static int cayman_startup(struct radeon_device *rdev)
1580 if (r) 1303 if (r)
1581 return r; 1304 return r;
1582 1305
1306 r = r600_audio_init(rdev);
1307 if (r)
1308 return r;
1309
1583 return 0; 1310 return 0;
1584} 1311}
1585 1312
@@ -1606,6 +1333,7 @@ int cayman_resume(struct radeon_device *rdev)
1606 1333
1607int cayman_suspend(struct radeon_device *rdev) 1334int cayman_suspend(struct radeon_device *rdev)
1608{ 1335{
1336 r600_audio_fini(rdev);
1609 /* FIXME: we should wait for ring to be empty */ 1337 /* FIXME: we should wait for ring to be empty */
1610 radeon_ib_pool_suspend(rdev); 1338 radeon_ib_pool_suspend(rdev);
1611 radeon_vm_manager_suspend(rdev); 1339 radeon_vm_manager_suspend(rdev);
diff --git a/drivers/gpu/drm/radeon/nid.h b/drivers/gpu/drm/radeon/nid.h
index 2aa7046ada56..a0b98066e207 100644
--- a/drivers/gpu/drm/radeon/nid.h
+++ b/drivers/gpu/drm/radeon/nid.h
@@ -41,6 +41,9 @@
41#define CAYMAN_MAX_TCC 16 41#define CAYMAN_MAX_TCC 16
42#define CAYMAN_MAX_TCC_MASK 0xFF 42#define CAYMAN_MAX_TCC_MASK 0xFF
43 43
44#define CAYMAN_GB_ADDR_CONFIG_GOLDEN 0x02011003
45#define ARUBA_GB_ADDR_CONFIG_GOLDEN 0x12010001
46
44#define DMIF_ADDR_CONFIG 0xBD4 47#define DMIF_ADDR_CONFIG 0xBD4
45#define SRBM_GFX_CNTL 0x0E44 48#define SRBM_GFX_CNTL 0x0E44
46#define RINGID(x) (((x) & 0x3) << 0) 49#define RINGID(x) (((x) & 0x3) << 0)
@@ -148,6 +151,8 @@
148#define CGTS_SYS_TCC_DISABLE 0x3F90 151#define CGTS_SYS_TCC_DISABLE 0x3F90
149#define CGTS_USER_SYS_TCC_DISABLE 0x3F94 152#define CGTS_USER_SYS_TCC_DISABLE 0x3F94
150 153
154#define RLC_GFX_INDEX 0x3FC4
155
151#define CONFIG_MEMSIZE 0x5428 156#define CONFIG_MEMSIZE 0x5428
152 157
153#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480 158#define HDP_MEM_COHERENCY_FLUSH_CNTL 0x5480
@@ -212,6 +217,12 @@
212#define SOFT_RESET_VGT (1 << 14) 217#define SOFT_RESET_VGT (1 << 14)
213#define SOFT_RESET_IA (1 << 15) 218#define SOFT_RESET_IA (1 << 15)
214 219
220#define GRBM_GFX_INDEX 0x802C
221#define INSTANCE_INDEX(x) ((x) << 0)
222#define SE_INDEX(x) ((x) << 16)
223#define INSTANCE_BROADCAST_WRITES (1 << 30)
224#define SE_BROADCAST_WRITES (1 << 31)
225
215#define SCRATCH_REG0 0x8500 226#define SCRATCH_REG0 0x8500
216#define SCRATCH_REG1 0x8504 227#define SCRATCH_REG1 0x8504
217#define SCRATCH_REG2 0x8508 228#define SCRATCH_REG2 0x8508
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index f388a1d73b63..bff627293812 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -1376,113 +1376,51 @@ int r600_asic_reset(struct radeon_device *rdev)
1376 return r600_gpu_soft_reset(rdev); 1376 return r600_gpu_soft_reset(rdev);
1377} 1377}
1378 1378
1379static u32 r600_get_tile_pipe_to_backend_map(u32 num_tile_pipes, 1379u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1380 u32 num_backends, 1380 u32 tiling_pipe_num,
1381 u32 backend_disable_mask) 1381 u32 max_rb_num,
1382{ 1382 u32 total_max_rb_num,
1383 u32 backend_map = 0; 1383 u32 disabled_rb_mask)
1384 u32 enabled_backends_mask; 1384{
1385 u32 enabled_backends_count; 1385 u32 rendering_pipe_num, rb_num_width, req_rb_num;
1386 u32 cur_pipe; 1386 u32 pipe_rb_ratio, pipe_rb_remain;
1387 u32 swizzle_pipe[R6XX_MAX_PIPES]; 1387 u32 data = 0, mask = 1 << (max_rb_num - 1);
1388 u32 cur_backend; 1388 unsigned i, j;
1389 u32 i; 1389
1390 1390 /* mask out the RBs that don't exist on that asic */
1391 if (num_tile_pipes > R6XX_MAX_PIPES) 1391 disabled_rb_mask |= (0xff << max_rb_num) & 0xff;
1392 num_tile_pipes = R6XX_MAX_PIPES; 1392
1393 if (num_tile_pipes < 1) 1393 rendering_pipe_num = 1 << tiling_pipe_num;
1394 num_tile_pipes = 1; 1394 req_rb_num = total_max_rb_num - r600_count_pipe_bits(disabled_rb_mask);
1395 if (num_backends > R6XX_MAX_BACKENDS) 1395 BUG_ON(rendering_pipe_num < req_rb_num);
1396 num_backends = R6XX_MAX_BACKENDS; 1396
1397 if (num_backends < 1) 1397 pipe_rb_ratio = rendering_pipe_num / req_rb_num;
1398 num_backends = 1; 1398 pipe_rb_remain = rendering_pipe_num - pipe_rb_ratio * req_rb_num;
1399 1399
1400 enabled_backends_mask = 0; 1400 if (rdev->family <= CHIP_RV740) {
1401 enabled_backends_count = 0; 1401 /* r6xx/r7xx */
1402 for (i = 0; i < R6XX_MAX_BACKENDS; ++i) { 1402 rb_num_width = 2;
1403 if (((backend_disable_mask >> i) & 1) == 0) { 1403 } else {
1404 enabled_backends_mask |= (1 << i); 1404 /* eg+ */
1405 ++enabled_backends_count; 1405 rb_num_width = 4;
1406 }
1407 if (enabled_backends_count == num_backends)
1408 break;
1409 }
1410
1411 if (enabled_backends_count == 0) {
1412 enabled_backends_mask = 1;
1413 enabled_backends_count = 1;
1414 }
1415
1416 if (enabled_backends_count != num_backends)
1417 num_backends = enabled_backends_count;
1418
1419 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R6XX_MAX_PIPES);
1420 switch (num_tile_pipes) {
1421 case 1:
1422 swizzle_pipe[0] = 0;
1423 break;
1424 case 2:
1425 swizzle_pipe[0] = 0;
1426 swizzle_pipe[1] = 1;
1427 break;
1428 case 3:
1429 swizzle_pipe[0] = 0;
1430 swizzle_pipe[1] = 1;
1431 swizzle_pipe[2] = 2;
1432 break;
1433 case 4:
1434 swizzle_pipe[0] = 0;
1435 swizzle_pipe[1] = 1;
1436 swizzle_pipe[2] = 2;
1437 swizzle_pipe[3] = 3;
1438 break;
1439 case 5:
1440 swizzle_pipe[0] = 0;
1441 swizzle_pipe[1] = 1;
1442 swizzle_pipe[2] = 2;
1443 swizzle_pipe[3] = 3;
1444 swizzle_pipe[4] = 4;
1445 break;
1446 case 6:
1447 swizzle_pipe[0] = 0;
1448 swizzle_pipe[1] = 2;
1449 swizzle_pipe[2] = 4;
1450 swizzle_pipe[3] = 5;
1451 swizzle_pipe[4] = 1;
1452 swizzle_pipe[5] = 3;
1453 break;
1454 case 7:
1455 swizzle_pipe[0] = 0;
1456 swizzle_pipe[1] = 2;
1457 swizzle_pipe[2] = 4;
1458 swizzle_pipe[3] = 6;
1459 swizzle_pipe[4] = 1;
1460 swizzle_pipe[5] = 3;
1461 swizzle_pipe[6] = 5;
1462 break;
1463 case 8:
1464 swizzle_pipe[0] = 0;
1465 swizzle_pipe[1] = 2;
1466 swizzle_pipe[2] = 4;
1467 swizzle_pipe[3] = 6;
1468 swizzle_pipe[4] = 1;
1469 swizzle_pipe[5] = 3;
1470 swizzle_pipe[6] = 5;
1471 swizzle_pipe[7] = 7;
1472 break;
1473 } 1406 }
1474 1407
1475 cur_backend = 0; 1408 for (i = 0; i < max_rb_num; i++) {
1476 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) { 1409 if (!(mask & disabled_rb_mask)) {
1477 while (((1 << cur_backend) & enabled_backends_mask) == 0) 1410 for (j = 0; j < pipe_rb_ratio; j++) {
1478 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; 1411 data <<= rb_num_width;
1479 1412 data |= max_rb_num - i - 1;
1480 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2))); 1413 }
1481 1414 if (pipe_rb_remain) {
1482 cur_backend = (cur_backend + 1) % R6XX_MAX_BACKENDS; 1415 data <<= rb_num_width;
1416 data |= max_rb_num - i - 1;
1417 pipe_rb_remain--;
1418 }
1419 }
1420 mask >>= 1;
1483 } 1421 }
1484 1422
1485 return backend_map; 1423 return data;
1486} 1424}
1487 1425
1488int r600_count_pipe_bits(uint32_t val) 1426int r600_count_pipe_bits(uint32_t val)
@@ -1500,7 +1438,6 @@ void r600_gpu_init(struct radeon_device *rdev)
1500{ 1438{
1501 u32 tiling_config; 1439 u32 tiling_config;
1502 u32 ramcfg; 1440 u32 ramcfg;
1503 u32 backend_map;
1504 u32 cc_rb_backend_disable; 1441 u32 cc_rb_backend_disable;
1505 u32 cc_gc_shader_pipe_config; 1442 u32 cc_gc_shader_pipe_config;
1506 u32 tmp; 1443 u32 tmp;
@@ -1511,8 +1448,9 @@ void r600_gpu_init(struct radeon_device *rdev)
1511 u32 sq_thread_resource_mgmt = 0; 1448 u32 sq_thread_resource_mgmt = 0;
1512 u32 sq_stack_resource_mgmt_1 = 0; 1449 u32 sq_stack_resource_mgmt_1 = 0;
1513 u32 sq_stack_resource_mgmt_2 = 0; 1450 u32 sq_stack_resource_mgmt_2 = 0;
1451 u32 disabled_rb_mask;
1514 1452
1515 /* FIXME: implement */ 1453 rdev->config.r600.tiling_group_size = 256;
1516 switch (rdev->family) { 1454 switch (rdev->family) {
1517 case CHIP_R600: 1455 case CHIP_R600:
1518 rdev->config.r600.max_pipes = 4; 1456 rdev->config.r600.max_pipes = 4;
@@ -1616,10 +1554,7 @@ void r600_gpu_init(struct radeon_device *rdev)
1616 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1554 rdev->config.r600.tiling_nbanks = 4 << ((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1617 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 1555 tiling_config |= BANK_TILING((ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT);
1618 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 1556 tiling_config |= GROUP_SIZE((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
1619 if ((ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT) 1557
1620 rdev->config.r600.tiling_group_size = 512;
1621 else
1622 rdev->config.r600.tiling_group_size = 256;
1623 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT; 1558 tmp = (ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
1624 if (tmp > 3) { 1559 if (tmp > 3) {
1625 tiling_config |= ROW_TILING(3); 1560 tiling_config |= ROW_TILING(3);
@@ -1631,32 +1566,36 @@ void r600_gpu_init(struct radeon_device *rdev)
1631 tiling_config |= BANK_SWAPS(1); 1566 tiling_config |= BANK_SWAPS(1);
1632 1567
1633 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000; 1568 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
1634 cc_rb_backend_disable |= 1569 tmp = R6XX_MAX_BACKENDS -
1635 BACKEND_DISABLE((R6XX_MAX_BACKENDS_MASK << rdev->config.r600.max_backends) & R6XX_MAX_BACKENDS_MASK); 1570 r600_count_pipe_bits((cc_rb_backend_disable >> 16) & R6XX_MAX_BACKENDS_MASK);
1636 1571 if (tmp < rdev->config.r600.max_backends) {
1637 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00; 1572 rdev->config.r600.max_backends = tmp;
1638 cc_gc_shader_pipe_config |= 1573 }
1639 INACTIVE_QD_PIPES((R6XX_MAX_PIPES_MASK << rdev->config.r600.max_pipes) & R6XX_MAX_PIPES_MASK); 1574
1640 cc_gc_shader_pipe_config |= 1575 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0x00ffff00;
1641 INACTIVE_SIMDS((R6XX_MAX_SIMDS_MASK << rdev->config.r600.max_simds) & R6XX_MAX_SIMDS_MASK); 1576 tmp = R6XX_MAX_PIPES -
1642 1577 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R6XX_MAX_PIPES_MASK);
1643 backend_map = r600_get_tile_pipe_to_backend_map(rdev->config.r600.max_tile_pipes, 1578 if (tmp < rdev->config.r600.max_pipes) {
1644 (R6XX_MAX_BACKENDS - 1579 rdev->config.r600.max_pipes = tmp;
1645 r600_count_pipe_bits((cc_rb_backend_disable & 1580 }
1646 R6XX_MAX_BACKENDS_MASK) >> 16)), 1581 tmp = R6XX_MAX_SIMDS -
1647 (cc_rb_backend_disable >> 16)); 1582 r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R6XX_MAX_SIMDS_MASK);
1583 if (tmp < rdev->config.r600.max_simds) {
1584 rdev->config.r600.max_simds = tmp;
1585 }
1586
1587 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R6XX_MAX_BACKENDS_MASK;
1588 tmp = (tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
1589 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.r600.max_backends,
1590 R6XX_MAX_BACKENDS, disabled_rb_mask);
1591 tiling_config |= tmp << 16;
1592 rdev->config.r600.backend_map = tmp;
1593
1648 rdev->config.r600.tile_config = tiling_config; 1594 rdev->config.r600.tile_config = tiling_config;
1649 rdev->config.r600.backend_map = backend_map;
1650 tiling_config |= BACKEND_MAP(backend_map);
1651 WREG32(GB_TILING_CONFIG, tiling_config); 1595 WREG32(GB_TILING_CONFIG, tiling_config);
1652 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff); 1596 WREG32(DCP_TILING_CONFIG, tiling_config & 0xffff);
1653 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff); 1597 WREG32(HDP_TILING_CONFIG, tiling_config & 0xffff);
1654 1598
1655 /* Setup pipes */
1656 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1657 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1658 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
1659
1660 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 1599 tmp = R6XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
1661 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK); 1600 WREG32(VGT_OUT_DEALLOC_CNTL, (tmp * 4) & DEALLOC_DIST_MASK);
1662 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK); 1601 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((tmp * 4) - 2) & VTX_REUSE_DEPTH_MASK);
@@ -1900,6 +1839,7 @@ void r600_gpu_init(struct radeon_device *rdev)
1900 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | 1839 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
1901 NUM_CLIP_SEQ(3))); 1840 NUM_CLIP_SEQ(3)));
1902 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095)); 1841 WREG32(PA_SC_ENHANCE, FORCE_EOV_MAX_CLK_CNT(4095));
1842 WREG32(VC_ENHANCE, 0);
1903} 1843}
1904 1844
1905 1845
@@ -2487,6 +2427,12 @@ int r600_startup(struct radeon_device *rdev)
2487 if (r) 2427 if (r)
2488 return r; 2428 return r;
2489 2429
2430 r = r600_audio_init(rdev);
2431 if (r) {
2432 DRM_ERROR("radeon: audio init failed\n");
2433 return r;
2434 }
2435
2490 return 0; 2436 return 0;
2491} 2437}
2492 2438
@@ -2523,12 +2469,6 @@ int r600_resume(struct radeon_device *rdev)
2523 return r; 2469 return r;
2524 } 2470 }
2525 2471
2526 r = r600_audio_init(rdev);
2527 if (r) {
2528 DRM_ERROR("radeon: audio resume failed\n");
2529 return r;
2530 }
2531
2532 return r; 2472 return r;
2533} 2473}
2534 2474
@@ -2638,9 +2578,6 @@ int r600_init(struct radeon_device *rdev)
2638 rdev->accel_working = false; 2578 rdev->accel_working = false;
2639 } 2579 }
2640 2580
2641 r = r600_audio_init(rdev);
2642 if (r)
2643 return r; /* TODO error handling */
2644 return 0; 2581 return 0;
2645} 2582}
2646 2583
diff --git a/drivers/gpu/drm/radeon/r600_audio.c b/drivers/gpu/drm/radeon/r600_audio.c
index 7c4fa77f018f..79b55916cf90 100644
--- a/drivers/gpu/drm/radeon/r600_audio.c
+++ b/drivers/gpu/drm/radeon/r600_audio.c
@@ -57,7 +57,7 @@ static bool radeon_dig_encoder(struct drm_encoder *encoder)
57 */ 57 */
58static int r600_audio_chipset_supported(struct radeon_device *rdev) 58static int r600_audio_chipset_supported(struct radeon_device *rdev)
59{ 59{
60 return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE5(rdev)) 60 return (rdev->family >= CHIP_R600 && !ASIC_IS_DCE6(rdev))
61 || rdev->family == CHIP_RS600 61 || rdev->family == CHIP_RS600
62 || rdev->family == CHIP_RS690 62 || rdev->family == CHIP_RS690
63 || rdev->family == CHIP_RS740; 63 || rdev->family == CHIP_RS740;
@@ -192,6 +192,7 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
192 struct radeon_device *rdev = dev->dev_private; 192 struct radeon_device *rdev = dev->dev_private;
193 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); 193 struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder);
194 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 194 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
195 struct radeon_crtc *radeon_crtc = to_radeon_crtc(encoder->crtc);
195 int base_rate = 48000; 196 int base_rate = 48000;
196 197
197 switch (radeon_encoder->encoder_id) { 198 switch (radeon_encoder->encoder_id) {
@@ -217,8 +218,8 @@ void r600_audio_set_clock(struct drm_encoder *encoder, int clock)
217 WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10); 218 WREG32(EVERGREEN_AUDIO_PLL1_DIV, clock * 10);
218 WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071); 219 WREG32(EVERGREEN_AUDIO_PLL1_UNK, 0x00000071);
219 220
220 /* Some magic trigger or src sel? */ 221 /* Select DTO source */
221 WREG32_P(0x5ac, 0x01, ~0x77); 222 WREG32(0x5ac, radeon_crtc->crtc_id);
222 } else { 223 } else {
223 switch (dig->dig_encoder) { 224 switch (dig->dig_encoder) {
224 case 0: 225 case 0:
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 0133f5f09bd6..ca87f7afaf23 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -2079,6 +2079,48 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
2079 return -EINVAL; 2079 return -EINVAL;
2080 } 2080 }
2081 break; 2081 break;
2082 case PACKET3_STRMOUT_BASE_UPDATE:
2083 if (p->family < CHIP_RV770) {
2084 DRM_ERROR("STRMOUT_BASE_UPDATE only supported on 7xx\n");
2085 return -EINVAL;
2086 }
2087 if (pkt->count != 1) {
2088 DRM_ERROR("bad STRMOUT_BASE_UPDATE packet count\n");
2089 return -EINVAL;
2090 }
2091 if (idx_value > 3) {
2092 DRM_ERROR("bad STRMOUT_BASE_UPDATE index\n");
2093 return -EINVAL;
2094 }
2095 {
2096 u64 offset;
2097
2098 r = r600_cs_packet_next_reloc(p, &reloc);
2099 if (r) {
2100 DRM_ERROR("bad STRMOUT_BASE_UPDATE reloc\n");
2101 return -EINVAL;
2102 }
2103
2104 if (reloc->robj != track->vgt_strmout_bo[idx_value]) {
2105 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo does not match\n");
2106 return -EINVAL;
2107 }
2108
2109 offset = radeon_get_ib_value(p, idx+1) << 8;
2110 if (offset != track->vgt_strmout_bo_offset[idx_value]) {
2111 DRM_ERROR("bad STRMOUT_BASE_UPDATE, bo offset does not match: 0x%llx, 0x%x\n",
2112 offset, track->vgt_strmout_bo_offset[idx_value]);
2113 return -EINVAL;
2114 }
2115
2116 if ((offset + 4) > radeon_bo_size(reloc->robj)) {
2117 DRM_ERROR("bad STRMOUT_BASE_UPDATE bo too small: 0x%llx, 0x%lx\n",
2118 offset + 4, radeon_bo_size(reloc->robj));
2119 return -EINVAL;
2120 }
2121 ib[idx+1] += (u32)((reloc->lobj.gpu_offset >> 8) & 0xffffffff);
2122 }
2123 break;
2082 case PACKET3_SURFACE_BASE_UPDATE: 2124 case PACKET3_SURFACE_BASE_UPDATE:
2083 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) { 2125 if (p->family >= CHIP_RV770 || p->family == CHIP_R600) {
2084 DRM_ERROR("bad SURFACE_BASE_UPDATE\n"); 2126 DRM_ERROR("bad SURFACE_BASE_UPDATE\n");
diff --git a/drivers/gpu/drm/radeon/r600_hdmi.c b/drivers/gpu/drm/radeon/r600_hdmi.c
index 226379e00ac1..82a0a4c919c0 100644
--- a/drivers/gpu/drm/radeon/r600_hdmi.c
+++ b/drivers/gpu/drm/radeon/r600_hdmi.c
@@ -322,9 +322,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
322 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 322 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
323 uint32_t offset; 323 uint32_t offset;
324 324
325 if (ASIC_IS_DCE5(rdev))
326 return;
327
328 /* Silent, r600_hdmi_enable will raise WARN for us */ 325 /* Silent, r600_hdmi_enable will raise WARN for us */
329 if (!dig->afmt->enabled) 326 if (!dig->afmt->enabled)
330 return; 327 return;
@@ -348,7 +345,6 @@ void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mod
348 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset, 345 WREG32(HDMI0_AUDIO_PACKET_CONTROL + offset,
349 HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */ 346 HDMI0_AUDIO_SAMPLE_SEND | /* send audio packets */
350 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */ 347 HDMI0_AUDIO_DELAY_EN(1) | /* default audio delay */
351 HDMI0_AUDIO_SEND_MAX_PACKETS | /* send NULL packets if no audio is available */
352 HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */ 348 HDMI0_AUDIO_PACKETS_PER_LINE(3) | /* should be suffient for all audio modes and small enough for all hblanks */
353 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */ 349 HDMI0_60958_CS_UPDATE); /* allow 60958 channel status fields to be updated */
354 } 350 }
@@ -484,7 +480,7 @@ void r600_hdmi_enable(struct drm_encoder *encoder)
484 uint32_t offset; 480 uint32_t offset;
485 u32 hdmi; 481 u32 hdmi;
486 482
487 if (ASIC_IS_DCE5(rdev)) 483 if (ASIC_IS_DCE6(rdev))
488 return; 484 return;
489 485
490 /* Silent, r600_hdmi_enable will raise WARN for us */ 486 /* Silent, r600_hdmi_enable will raise WARN for us */
@@ -544,7 +540,7 @@ void r600_hdmi_disable(struct drm_encoder *encoder)
544 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv; 540 struct radeon_encoder_atom_dig *dig = radeon_encoder->enc_priv;
545 uint32_t offset; 541 uint32_t offset;
546 542
547 if (ASIC_IS_DCE5(rdev)) 543 if (ASIC_IS_DCE6(rdev))
548 return; 544 return;
549 545
550 /* Called for ATOM_ENCODER_MODE_HDMI only */ 546 /* Called for ATOM_ENCODER_MODE_HDMI only */
diff --git a/drivers/gpu/drm/radeon/r600d.h b/drivers/gpu/drm/radeon/r600d.h
index 15bd3b216243..025fd5b6c08c 100644
--- a/drivers/gpu/drm/radeon/r600d.h
+++ b/drivers/gpu/drm/radeon/r600d.h
@@ -219,6 +219,8 @@
219#define BACKEND_MAP(x) ((x) << 16) 219#define BACKEND_MAP(x) ((x) << 16)
220 220
221#define GB_TILING_CONFIG 0x98F0 221#define GB_TILING_CONFIG 0x98F0
222#define PIPE_TILING__SHIFT 1
223#define PIPE_TILING__MASK 0x0000000e
222 224
223#define GC_USER_SHADER_PIPE_CONFIG 0x8954 225#define GC_USER_SHADER_PIPE_CONFIG 0x8954
224#define INACTIVE_QD_PIPES(x) ((x) << 8) 226#define INACTIVE_QD_PIPES(x) ((x) << 8)
@@ -483,6 +485,7 @@
483#define TC_L2_SIZE(x) ((x)<<5) 485#define TC_L2_SIZE(x) ((x)<<5)
484#define L2_DISABLE_LATE_HIT (1<<9) 486#define L2_DISABLE_LATE_HIT (1<<9)
485 487
488#define VC_ENHANCE 0x9714
486 489
487#define VGT_CACHE_INVALIDATION 0x88C4 490#define VGT_CACHE_INVALIDATION 0x88C4
488#define CACHE_INVALIDATION(x) ((x)<<0) 491#define CACHE_INVALIDATION(x) ((x)<<0)
@@ -1161,6 +1164,7 @@
1161#define PACKET3_SET_CTL_CONST 0x6F 1164#define PACKET3_SET_CTL_CONST 0x6F
1162#define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0 1165#define PACKET3_SET_CTL_CONST_OFFSET 0x0003cff0
1163#define PACKET3_SET_CTL_CONST_END 0x0003e200 1166#define PACKET3_SET_CTL_CONST_END 0x0003e200
1167#define PACKET3_STRMOUT_BASE_UPDATE 0x72 /* r7xx */
1164#define PACKET3_SURFACE_BASE_UPDATE 0x73 1168#define PACKET3_SURFACE_BASE_UPDATE 0x73
1165 1169
1166 1170
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 2e24022b389a..fefcca55c1eb 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -1374,9 +1374,9 @@ struct cayman_asic {
1374 1374
1375struct si_asic { 1375struct si_asic {
1376 unsigned max_shader_engines; 1376 unsigned max_shader_engines;
1377 unsigned max_pipes_per_simd;
1378 unsigned max_tile_pipes; 1377 unsigned max_tile_pipes;
1379 unsigned max_simds_per_se; 1378 unsigned max_cu_per_sh;
1379 unsigned max_sh_per_se;
1380 unsigned max_backends_per_se; 1380 unsigned max_backends_per_se;
1381 unsigned max_texture_channel_caches; 1381 unsigned max_texture_channel_caches;
1382 unsigned max_gprs; 1382 unsigned max_gprs;
@@ -1387,7 +1387,6 @@ struct si_asic {
1387 unsigned sc_hiz_tile_fifo_size; 1387 unsigned sc_hiz_tile_fifo_size;
1388 unsigned sc_earlyz_tile_fifo_size; 1388 unsigned sc_earlyz_tile_fifo_size;
1389 1389
1390 unsigned num_shader_engines;
1391 unsigned num_tile_pipes; 1390 unsigned num_tile_pipes;
1392 unsigned num_backends_per_se; 1391 unsigned num_backends_per_se;
1393 unsigned backend_disable_mask_per_asic; 1392 unsigned backend_disable_mask_per_asic;
@@ -1848,6 +1847,11 @@ extern struct radeon_hdmi_acr r600_hdmi_acr(uint32_t clock);
1848extern void r600_hdmi_enable(struct drm_encoder *encoder); 1847extern void r600_hdmi_enable(struct drm_encoder *encoder);
1849extern void r600_hdmi_disable(struct drm_encoder *encoder); 1848extern void r600_hdmi_disable(struct drm_encoder *encoder);
1850extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode); 1849extern void r600_hdmi_setmode(struct drm_encoder *encoder, struct drm_display_mode *mode);
1850extern u32 r6xx_remap_render_backend(struct radeon_device *rdev,
1851 u32 tiling_pipe_num,
1852 u32 max_rb_num,
1853 u32 total_max_rb_num,
1854 u32 enabled_rb_mask);
1851 1855
1852/* 1856/*
1853 * evergreen functions used by radeon_encoder.c 1857 * evergreen functions used by radeon_encoder.c
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 0137689ed461..142f89462aa4 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -147,6 +147,7 @@ static int radeon_cs_sync_rings(struct radeon_cs_parser *p)
147 sync_to_ring, p->ring); 147 sync_to_ring, p->ring);
148} 148}
149 149
150/* XXX: note that this is called from the legacy UMS CS ioctl as well */
150int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data) 151int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
151{ 152{
152 struct drm_radeon_cs *cs = data; 153 struct drm_radeon_cs *cs = data;
@@ -245,22 +246,24 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
245 } 246 }
246 } 247 }
247 248
248 if ((p->cs_flags & RADEON_CS_USE_VM) && 249 /* these are KMS only */
249 !p->rdev->vm_manager.enabled) { 250 if (p->rdev) {
250 DRM_ERROR("VM not active on asic!\n"); 251 if ((p->cs_flags & RADEON_CS_USE_VM) &&
251 return -EINVAL; 252 !p->rdev->vm_manager.enabled) {
252 } 253 DRM_ERROR("VM not active on asic!\n");
253 254 return -EINVAL;
254 /* we only support VM on SI+ */ 255 }
255 if ((p->rdev->family >= CHIP_TAHITI) &&
256 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
257 DRM_ERROR("VM required on SI+!\n");
258 return -EINVAL;
259 }
260 256
261 if (radeon_cs_get_ring(p, ring, priority)) 257 /* we only support VM on SI+ */
262 return -EINVAL; 258 if ((p->rdev->family >= CHIP_TAHITI) &&
259 ((p->cs_flags & RADEON_CS_USE_VM) == 0)) {
260 DRM_ERROR("VM required on SI+!\n");
261 return -EINVAL;
262 }
263 263
264 if (radeon_cs_get_ring(p, ring, priority))
265 return -EINVAL;
266 }
264 267
265 /* deal with non-vm */ 268 /* deal with non-vm */
266 if ((p->chunk_ib_idx != -1) && 269 if ((p->chunk_ib_idx != -1) &&
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index f0bb2b543b13..2c4d53fd20c5 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -57,9 +57,11 @@
57 * 2.13.0 - virtual memory support, streamout 57 * 2.13.0 - virtual memory support, streamout
58 * 2.14.0 - add evergreen tiling informations 58 * 2.14.0 - add evergreen tiling informations
59 * 2.15.0 - add max_pipes query 59 * 2.15.0 - add max_pipes query
60 * 2.16.0 - fix evergreen 2D tiled surface calculation
61 * 2.17.0 - add STRMOUT_BASE_UPDATE for r7xx
60 */ 62 */
61#define KMS_DRIVER_MAJOR 2 63#define KMS_DRIVER_MAJOR 2
62#define KMS_DRIVER_MINOR 15 64#define KMS_DRIVER_MINOR 17
63#define KMS_DRIVER_PATCHLEVEL 0 65#define KMS_DRIVER_PATCHLEVEL 0
64int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags); 66int radeon_driver_load_kms(struct drm_device *dev, unsigned long flags);
65int radeon_driver_unload_kms(struct drm_device *dev); 67int radeon_driver_unload_kms(struct drm_device *dev);
diff --git a/drivers/gpu/drm/radeon/radeon_gart.c b/drivers/gpu/drm/radeon/radeon_gart.c
index 79db56e6c2ac..59d44937dd9f 100644
--- a/drivers/gpu/drm/radeon/radeon_gart.c
+++ b/drivers/gpu/drm/radeon/radeon_gart.c
@@ -476,12 +476,18 @@ int radeon_vm_bo_add(struct radeon_device *rdev,
476 476
477 mutex_lock(&vm->mutex); 477 mutex_lock(&vm->mutex);
478 if (last_pfn > vm->last_pfn) { 478 if (last_pfn > vm->last_pfn) {
479 /* grow va space 32M by 32M */ 479 /* release mutex and lock in right order */
480 unsigned align = ((32 << 20) >> 12) - 1; 480 mutex_unlock(&vm->mutex);
481 radeon_mutex_lock(&rdev->cs_mutex); 481 radeon_mutex_lock(&rdev->cs_mutex);
482 radeon_vm_unbind_locked(rdev, vm); 482 mutex_lock(&vm->mutex);
483 /* and check again */
484 if (last_pfn > vm->last_pfn) {
485 /* grow va space 32M by 32M */
486 unsigned align = ((32 << 20) >> 12) - 1;
487 radeon_vm_unbind_locked(rdev, vm);
488 vm->last_pfn = (last_pfn + align) & ~align;
489 }
483 radeon_mutex_unlock(&rdev->cs_mutex); 490 radeon_mutex_unlock(&rdev->cs_mutex);
484 vm->last_pfn = (last_pfn + align) & ~align;
485 } 491 }
486 head = &vm->va; 492 head = &vm->va;
487 last_offset = 0; 493 last_offset = 0;
@@ -595,8 +601,8 @@ int radeon_vm_bo_rmv(struct radeon_device *rdev,
595 if (bo_va == NULL) 601 if (bo_va == NULL)
596 return 0; 602 return 0;
597 603
598 mutex_lock(&vm->mutex);
599 radeon_mutex_lock(&rdev->cs_mutex); 604 radeon_mutex_lock(&rdev->cs_mutex);
605 mutex_lock(&vm->mutex);
600 radeon_vm_bo_update_pte(rdev, vm, bo, NULL); 606 radeon_vm_bo_update_pte(rdev, vm, bo, NULL);
601 radeon_mutex_unlock(&rdev->cs_mutex); 607 radeon_mutex_unlock(&rdev->cs_mutex);
602 list_del(&bo_va->vm_list); 608 list_del(&bo_va->vm_list);
@@ -641,9 +647,8 @@ void radeon_vm_fini(struct radeon_device *rdev, struct radeon_vm *vm)
641 struct radeon_bo_va *bo_va, *tmp; 647 struct radeon_bo_va *bo_va, *tmp;
642 int r; 648 int r;
643 649
644 mutex_lock(&vm->mutex);
645
646 radeon_mutex_lock(&rdev->cs_mutex); 650 radeon_mutex_lock(&rdev->cs_mutex);
651 mutex_lock(&vm->mutex);
647 radeon_vm_unbind_locked(rdev, vm); 652 radeon_vm_unbind_locked(rdev, vm);
648 radeon_mutex_unlock(&rdev->cs_mutex); 653 radeon_mutex_unlock(&rdev->cs_mutex);
649 654
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index f1016a5820d1..5c58d7d90cb2 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -273,7 +273,7 @@ int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
273 break; 273 break;
274 case RADEON_INFO_MAX_PIPES: 274 case RADEON_INFO_MAX_PIPES:
275 if (rdev->family >= CHIP_TAHITI) 275 if (rdev->family >= CHIP_TAHITI)
276 value = rdev->config.si.max_pipes_per_simd; 276 value = rdev->config.si.max_cu_per_sh;
277 else if (rdev->family >= CHIP_CAYMAN) 277 else if (rdev->family >= CHIP_CAYMAN)
278 value = rdev->config.cayman.max_pipes_per_simd; 278 value = rdev->config.cayman.max_pipes_per_simd;
279 else if (rdev->family >= CHIP_CEDAR) 279 else if (rdev->family >= CHIP_CEDAR)
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 08825548ee69..5b37e283ec38 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -801,9 +801,13 @@ static void radeon_dynpm_idle_work_handler(struct work_struct *work)
801 int i; 801 int i;
802 802
803 for (i = 0; i < RADEON_NUM_RINGS; ++i) { 803 for (i = 0; i < RADEON_NUM_RINGS; ++i) {
804 not_processed += radeon_fence_count_emitted(rdev, i); 804 struct radeon_ring *ring = &rdev->ring[i];
805 if (not_processed >= 3) 805
806 break; 806 if (ring->ready) {
807 not_processed += radeon_fence_count_emitted(rdev, i);
808 if (not_processed >= 3)
809 break;
810 }
807 } 811 }
808 812
809 if (not_processed >= 3) { /* should upclock */ 813 if (not_processed >= 3) { /* should upclock */
diff --git a/drivers/gpu/drm/radeon/radeon_prime.c b/drivers/gpu/drm/radeon/radeon_prime.c
index 8ddab4c76710..6bef46ace831 100644
--- a/drivers/gpu/drm/radeon/radeon_prime.c
+++ b/drivers/gpu/drm/radeon/radeon_prime.c
@@ -169,11 +169,17 @@ struct dma_buf *radeon_gem_prime_export(struct drm_device *dev,
169 struct radeon_bo *bo = gem_to_radeon_bo(obj); 169 struct radeon_bo *bo = gem_to_radeon_bo(obj);
170 int ret = 0; 170 int ret = 0;
171 171
172 ret = radeon_bo_reserve(bo, false);
173 if (unlikely(ret != 0))
174 return ERR_PTR(ret);
175
172 /* pin buffer into GTT */ 176 /* pin buffer into GTT */
173 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL); 177 ret = radeon_bo_pin(bo, RADEON_GEM_DOMAIN_GTT, NULL);
174 if (ret) 178 if (ret) {
179 radeon_bo_unreserve(bo);
175 return ERR_PTR(ret); 180 return ERR_PTR(ret);
176 181 }
182 radeon_bo_unreserve(bo);
177 return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags); 183 return dma_buf_export(bo, &radeon_dmabuf_ops, obj->size, flags);
178} 184}
179 185
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 25f9eef12c42..e95c5e61d4e2 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -908,12 +908,6 @@ static int rs600_startup(struct radeon_device *rdev)
908 return r; 908 return r;
909 } 909 }
910 910
911 r = r600_audio_init(rdev);
912 if (r) {
913 dev_err(rdev->dev, "failed initializing audio\n");
914 return r;
915 }
916
917 r = radeon_ib_pool_start(rdev); 911 r = radeon_ib_pool_start(rdev);
918 if (r) 912 if (r)
919 return r; 913 return r;
@@ -922,6 +916,12 @@ static int rs600_startup(struct radeon_device *rdev)
922 if (r) 916 if (r)
923 return r; 917 return r;
924 918
919 r = r600_audio_init(rdev);
920 if (r) {
921 dev_err(rdev->dev, "failed initializing audio\n");
922 return r;
923 }
924
925 return 0; 925 return 0;
926} 926}
927 927
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 3277ddecfe9f..159b6a43fda0 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -637,12 +637,6 @@ static int rs690_startup(struct radeon_device *rdev)
637 return r; 637 return r;
638 } 638 }
639 639
640 r = r600_audio_init(rdev);
641 if (r) {
642 dev_err(rdev->dev, "failed initializing audio\n");
643 return r;
644 }
645
646 r = radeon_ib_pool_start(rdev); 640 r = radeon_ib_pool_start(rdev);
647 if (r) 641 if (r)
648 return r; 642 return r;
@@ -651,6 +645,12 @@ static int rs690_startup(struct radeon_device *rdev)
651 if (r) 645 if (r)
652 return r; 646 return r;
653 647
648 r = r600_audio_init(rdev);
649 if (r) {
650 dev_err(rdev->dev, "failed initializing audio\n");
651 return r;
652 }
653
654 return 0; 654 return 0;
655} 655}
656 656
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index c2f473bc13b8..b4f51c569c36 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -151,6 +151,8 @@ int rv770_pcie_gart_enable(struct radeon_device *rdev)
151 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp); 151 WREG32(MC_VM_MD_L1_TLB0_CNTL, tmp);
152 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp); 152 WREG32(MC_VM_MD_L1_TLB1_CNTL, tmp);
153 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp); 153 WREG32(MC_VM_MD_L1_TLB2_CNTL, tmp);
154 if (rdev->family == CHIP_RV740)
155 WREG32(MC_VM_MD_L1_TLB3_CNTL, tmp);
154 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp); 156 WREG32(MC_VM_MB_L1_TLB0_CNTL, tmp);
155 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp); 157 WREG32(MC_VM_MB_L1_TLB1_CNTL, tmp);
156 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp); 158 WREG32(MC_VM_MB_L1_TLB2_CNTL, tmp);
@@ -363,180 +365,6 @@ void r700_cp_fini(struct radeon_device *rdev)
363/* 365/*
364 * Core functions 366 * Core functions
365 */ 367 */
366static u32 r700_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
367 u32 num_tile_pipes,
368 u32 num_backends,
369 u32 backend_disable_mask)
370{
371 u32 backend_map = 0;
372 u32 enabled_backends_mask;
373 u32 enabled_backends_count;
374 u32 cur_pipe;
375 u32 swizzle_pipe[R7XX_MAX_PIPES];
376 u32 cur_backend;
377 u32 i;
378 bool force_no_swizzle;
379
380 if (num_tile_pipes > R7XX_MAX_PIPES)
381 num_tile_pipes = R7XX_MAX_PIPES;
382 if (num_tile_pipes < 1)
383 num_tile_pipes = 1;
384 if (num_backends > R7XX_MAX_BACKENDS)
385 num_backends = R7XX_MAX_BACKENDS;
386 if (num_backends < 1)
387 num_backends = 1;
388
389 enabled_backends_mask = 0;
390 enabled_backends_count = 0;
391 for (i = 0; i < R7XX_MAX_BACKENDS; ++i) {
392 if (((backend_disable_mask >> i) & 1) == 0) {
393 enabled_backends_mask |= (1 << i);
394 ++enabled_backends_count;
395 }
396 if (enabled_backends_count == num_backends)
397 break;
398 }
399
400 if (enabled_backends_count == 0) {
401 enabled_backends_mask = 1;
402 enabled_backends_count = 1;
403 }
404
405 if (enabled_backends_count != num_backends)
406 num_backends = enabled_backends_count;
407
408 switch (rdev->family) {
409 case CHIP_RV770:
410 case CHIP_RV730:
411 force_no_swizzle = false;
412 break;
413 case CHIP_RV710:
414 case CHIP_RV740:
415 default:
416 force_no_swizzle = true;
417 break;
418 }
419
420 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * R7XX_MAX_PIPES);
421 switch (num_tile_pipes) {
422 case 1:
423 swizzle_pipe[0] = 0;
424 break;
425 case 2:
426 swizzle_pipe[0] = 0;
427 swizzle_pipe[1] = 1;
428 break;
429 case 3:
430 if (force_no_swizzle) {
431 swizzle_pipe[0] = 0;
432 swizzle_pipe[1] = 1;
433 swizzle_pipe[2] = 2;
434 } else {
435 swizzle_pipe[0] = 0;
436 swizzle_pipe[1] = 2;
437 swizzle_pipe[2] = 1;
438 }
439 break;
440 case 4:
441 if (force_no_swizzle) {
442 swizzle_pipe[0] = 0;
443 swizzle_pipe[1] = 1;
444 swizzle_pipe[2] = 2;
445 swizzle_pipe[3] = 3;
446 } else {
447 swizzle_pipe[0] = 0;
448 swizzle_pipe[1] = 2;
449 swizzle_pipe[2] = 3;
450 swizzle_pipe[3] = 1;
451 }
452 break;
453 case 5:
454 if (force_no_swizzle) {
455 swizzle_pipe[0] = 0;
456 swizzle_pipe[1] = 1;
457 swizzle_pipe[2] = 2;
458 swizzle_pipe[3] = 3;
459 swizzle_pipe[4] = 4;
460 } else {
461 swizzle_pipe[0] = 0;
462 swizzle_pipe[1] = 2;
463 swizzle_pipe[2] = 4;
464 swizzle_pipe[3] = 1;
465 swizzle_pipe[4] = 3;
466 }
467 break;
468 case 6:
469 if (force_no_swizzle) {
470 swizzle_pipe[0] = 0;
471 swizzle_pipe[1] = 1;
472 swizzle_pipe[2] = 2;
473 swizzle_pipe[3] = 3;
474 swizzle_pipe[4] = 4;
475 swizzle_pipe[5] = 5;
476 } else {
477 swizzle_pipe[0] = 0;
478 swizzle_pipe[1] = 2;
479 swizzle_pipe[2] = 4;
480 swizzle_pipe[3] = 5;
481 swizzle_pipe[4] = 3;
482 swizzle_pipe[5] = 1;
483 }
484 break;
485 case 7:
486 if (force_no_swizzle) {
487 swizzle_pipe[0] = 0;
488 swizzle_pipe[1] = 1;
489 swizzle_pipe[2] = 2;
490 swizzle_pipe[3] = 3;
491 swizzle_pipe[4] = 4;
492 swizzle_pipe[5] = 5;
493 swizzle_pipe[6] = 6;
494 } else {
495 swizzle_pipe[0] = 0;
496 swizzle_pipe[1] = 2;
497 swizzle_pipe[2] = 4;
498 swizzle_pipe[3] = 6;
499 swizzle_pipe[4] = 3;
500 swizzle_pipe[5] = 1;
501 swizzle_pipe[6] = 5;
502 }
503 break;
504 case 8:
505 if (force_no_swizzle) {
506 swizzle_pipe[0] = 0;
507 swizzle_pipe[1] = 1;
508 swizzle_pipe[2] = 2;
509 swizzle_pipe[3] = 3;
510 swizzle_pipe[4] = 4;
511 swizzle_pipe[5] = 5;
512 swizzle_pipe[6] = 6;
513 swizzle_pipe[7] = 7;
514 } else {
515 swizzle_pipe[0] = 0;
516 swizzle_pipe[1] = 2;
517 swizzle_pipe[2] = 4;
518 swizzle_pipe[3] = 6;
519 swizzle_pipe[4] = 3;
520 swizzle_pipe[5] = 1;
521 swizzle_pipe[6] = 7;
522 swizzle_pipe[7] = 5;
523 }
524 break;
525 }
526
527 cur_backend = 0;
528 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
529 while (((1 << cur_backend) & enabled_backends_mask) == 0)
530 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
531
532 backend_map |= (u32)(((cur_backend & 3) << (swizzle_pipe[cur_pipe] * 2)));
533
534 cur_backend = (cur_backend + 1) % R7XX_MAX_BACKENDS;
535 }
536
537 return backend_map;
538}
539
540static void rv770_gpu_init(struct radeon_device *rdev) 368static void rv770_gpu_init(struct radeon_device *rdev)
541{ 369{
542 int i, j, num_qd_pipes; 370 int i, j, num_qd_pipes;
@@ -552,14 +380,17 @@ static void rv770_gpu_init(struct radeon_device *rdev)
552 u32 sq_thread_resource_mgmt; 380 u32 sq_thread_resource_mgmt;
553 u32 hdp_host_path_cntl; 381 u32 hdp_host_path_cntl;
554 u32 sq_dyn_gpr_size_simd_ab_0; 382 u32 sq_dyn_gpr_size_simd_ab_0;
555 u32 backend_map;
556 u32 gb_tiling_config = 0; 383 u32 gb_tiling_config = 0;
557 u32 cc_rb_backend_disable = 0; 384 u32 cc_rb_backend_disable = 0;
558 u32 cc_gc_shader_pipe_config = 0; 385 u32 cc_gc_shader_pipe_config = 0;
559 u32 mc_arb_ramcfg; 386 u32 mc_arb_ramcfg;
560 u32 db_debug4; 387 u32 db_debug4, tmp;
388 u32 inactive_pipes, shader_pipe_config;
389 u32 disabled_rb_mask;
390 unsigned active_number;
561 391
562 /* setup chip specs */ 392 /* setup chip specs */
393 rdev->config.rv770.tiling_group_size = 256;
563 switch (rdev->family) { 394 switch (rdev->family) {
564 case CHIP_RV770: 395 case CHIP_RV770:
565 rdev->config.rv770.max_pipes = 4; 396 rdev->config.rv770.max_pipes = 4;
@@ -670,33 +501,70 @@ static void rv770_gpu_init(struct radeon_device *rdev)
670 /* setup tiling, simd, pipe config */ 501 /* setup tiling, simd, pipe config */
671 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 502 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
672 503
504 shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG);
505 inactive_pipes = (shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> INACTIVE_QD_PIPES_SHIFT;
506 for (i = 0, tmp = 1, active_number = 0; i < R7XX_MAX_PIPES; i++) {
507 if (!(inactive_pipes & tmp)) {
508 active_number++;
509 }
510 tmp <<= 1;
511 }
512 if (active_number == 1) {
513 WREG32(SPI_CONFIG_CNTL, DISABLE_INTERP_1);
514 } else {
515 WREG32(SPI_CONFIG_CNTL, 0);
516 }
517
518 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
519 tmp = R7XX_MAX_BACKENDS - r600_count_pipe_bits(cc_rb_backend_disable >> 16);
520 if (tmp < rdev->config.rv770.max_backends) {
521 rdev->config.rv770.max_backends = tmp;
522 }
523
524 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
525 tmp = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 8) & R7XX_MAX_PIPES_MASK);
526 if (tmp < rdev->config.rv770.max_pipes) {
527 rdev->config.rv770.max_pipes = tmp;
528 }
529 tmp = R7XX_MAX_SIMDS - r600_count_pipe_bits((cc_gc_shader_pipe_config >> 16) & R7XX_MAX_SIMDS_MASK);
530 if (tmp < rdev->config.rv770.max_simds) {
531 rdev->config.rv770.max_simds = tmp;
532 }
533
673 switch (rdev->config.rv770.max_tile_pipes) { 534 switch (rdev->config.rv770.max_tile_pipes) {
674 case 1: 535 case 1:
675 default: 536 default:
676 gb_tiling_config |= PIPE_TILING(0); 537 gb_tiling_config = PIPE_TILING(0);
677 break; 538 break;
678 case 2: 539 case 2:
679 gb_tiling_config |= PIPE_TILING(1); 540 gb_tiling_config = PIPE_TILING(1);
680 break; 541 break;
681 case 4: 542 case 4:
682 gb_tiling_config |= PIPE_TILING(2); 543 gb_tiling_config = PIPE_TILING(2);
683 break; 544 break;
684 case 8: 545 case 8:
685 gb_tiling_config |= PIPE_TILING(3); 546 gb_tiling_config = PIPE_TILING(3);
686 break; 547 break;
687 } 548 }
688 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes; 549 rdev->config.rv770.tiling_npipes = rdev->config.rv770.max_tile_pipes;
689 550
551 disabled_rb_mask = (RREG32(CC_RB_BACKEND_DISABLE) >> 16) & R7XX_MAX_BACKENDS_MASK;
552 tmp = (gb_tiling_config & PIPE_TILING__MASK) >> PIPE_TILING__SHIFT;
553 tmp = r6xx_remap_render_backend(rdev, tmp, rdev->config.rv770.max_backends,
554 R7XX_MAX_BACKENDS, disabled_rb_mask);
555 gb_tiling_config |= tmp << 16;
556 rdev->config.rv770.backend_map = tmp;
557
690 if (rdev->family == CHIP_RV770) 558 if (rdev->family == CHIP_RV770)
691 gb_tiling_config |= BANK_TILING(1); 559 gb_tiling_config |= BANK_TILING(1);
692 else 560 else {
693 gb_tiling_config |= BANK_TILING((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT); 561 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
562 gb_tiling_config |= BANK_TILING(1);
563 else
564 gb_tiling_config |= BANK_TILING(0);
565 }
694 rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3); 566 rdev->config.rv770.tiling_nbanks = 4 << ((gb_tiling_config >> 4) & 0x3);
695 gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT); 567 gb_tiling_config |= GROUP_SIZE((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT);
696 if ((mc_arb_ramcfg & BURSTLENGTH_MASK) >> BURSTLENGTH_SHIFT)
697 rdev->config.rv770.tiling_group_size = 512;
698 else
699 rdev->config.rv770.tiling_group_size = 256;
700 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) { 568 if (((mc_arb_ramcfg & NOOFROWS_MASK) >> NOOFROWS_SHIFT) > 3) {
701 gb_tiling_config |= ROW_TILING(3); 569 gb_tiling_config |= ROW_TILING(3);
702 gb_tiling_config |= SAMPLE_SPLIT(3); 570 gb_tiling_config |= SAMPLE_SPLIT(3);
@@ -708,47 +576,19 @@ static void rv770_gpu_init(struct radeon_device *rdev)
708 } 576 }
709 577
710 gb_tiling_config |= BANK_SWAPS(1); 578 gb_tiling_config |= BANK_SWAPS(1);
711
712 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE) & 0x00ff0000;
713 cc_rb_backend_disable |=
714 BACKEND_DISABLE((R7XX_MAX_BACKENDS_MASK << rdev->config.rv770.max_backends) & R7XX_MAX_BACKENDS_MASK);
715
716 cc_gc_shader_pipe_config = RREG32(CC_GC_SHADER_PIPE_CONFIG) & 0xffffff00;
717 cc_gc_shader_pipe_config |=
718 INACTIVE_QD_PIPES((R7XX_MAX_PIPES_MASK << rdev->config.rv770.max_pipes) & R7XX_MAX_PIPES_MASK);
719 cc_gc_shader_pipe_config |=
720 INACTIVE_SIMDS((R7XX_MAX_SIMDS_MASK << rdev->config.rv770.max_simds) & R7XX_MAX_SIMDS_MASK);
721
722 if (rdev->family == CHIP_RV740)
723 backend_map = 0x28;
724 else
725 backend_map = r700_get_tile_pipe_to_backend_map(rdev,
726 rdev->config.rv770.max_tile_pipes,
727 (R7XX_MAX_BACKENDS -
728 r600_count_pipe_bits((cc_rb_backend_disable &
729 R7XX_MAX_BACKENDS_MASK) >> 16)),
730 (cc_rb_backend_disable >> 16));
731
732 rdev->config.rv770.tile_config = gb_tiling_config; 579 rdev->config.rv770.tile_config = gb_tiling_config;
733 rdev->config.rv770.backend_map = backend_map;
734 gb_tiling_config |= BACKEND_MAP(backend_map);
735 580
736 WREG32(GB_TILING_CONFIG, gb_tiling_config); 581 WREG32(GB_TILING_CONFIG, gb_tiling_config);
737 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 582 WREG32(DCP_TILING_CONFIG, (gb_tiling_config & 0xffff));
738 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff)); 583 WREG32(HDP_TILING_CONFIG, (gb_tiling_config & 0xffff));
739 584
740 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
741 WREG32(CC_GC_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
742 WREG32(GC_USER_SHADER_PIPE_CONFIG, cc_gc_shader_pipe_config);
743 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
744
745 WREG32(CGTS_SYS_TCC_DISABLE, 0); 585 WREG32(CGTS_SYS_TCC_DISABLE, 0);
746 WREG32(CGTS_TCC_DISABLE, 0); 586 WREG32(CGTS_TCC_DISABLE, 0);
747 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0); 587 WREG32(CGTS_USER_SYS_TCC_DISABLE, 0);
748 WREG32(CGTS_USER_TCC_DISABLE, 0); 588 WREG32(CGTS_USER_TCC_DISABLE, 0);
749 589
750 num_qd_pipes = 590
751 R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8); 591 num_qd_pipes = R7XX_MAX_PIPES - r600_count_pipe_bits((cc_gc_shader_pipe_config & INACTIVE_QD_PIPES_MASK) >> 8);
752 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK); 592 WREG32(VGT_OUT_DEALLOC_CNTL, (num_qd_pipes * 4) & DEALLOC_DIST_MASK);
753 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK); 593 WREG32(VGT_VERTEX_REUSE_BLOCK_CNTL, ((num_qd_pipes * 4) - 2) & VTX_REUSE_DEPTH_MASK);
754 594
@@ -776,6 +616,9 @@ static void rv770_gpu_init(struct radeon_device *rdev)
776 ACK_FLUSH_CTL(3) | 616 ACK_FLUSH_CTL(3) |
777 SYNC_FLUSH_CTL)); 617 SYNC_FLUSH_CTL));
778 618
619 if (rdev->family != CHIP_RV770)
620 WREG32(SMX_SAR_CTL0, 0x00003f3f);
621
779 db_debug3 = RREG32(DB_DEBUG3); 622 db_debug3 = RREG32(DB_DEBUG3);
780 db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f); 623 db_debug3 &= ~DB_CLK_OFF_DELAY(0x1f);
781 switch (rdev->family) { 624 switch (rdev->family) {
@@ -809,8 +652,6 @@ static void rv770_gpu_init(struct radeon_device *rdev)
809 652
810 WREG32(VGT_NUM_INSTANCES, 1); 653 WREG32(VGT_NUM_INSTANCES, 1);
811 654
812 WREG32(SPI_CONFIG_CNTL, GPR_WRITE_PRIORITY(0));
813
814 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4)); 655 WREG32(SPI_CONFIG_CNTL_1, VTX_DONE_DELAY(4));
815 656
816 WREG32(CP_PERFMON_CNTL, 0); 657 WREG32(CP_PERFMON_CNTL, 0);
@@ -954,7 +795,7 @@ static void rv770_gpu_init(struct radeon_device *rdev)
954 795
955 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA | 796 WREG32(PA_CL_ENHANCE, (CLIP_VTX_REORDER_ENA |
956 NUM_CLIP_SEQ(3))); 797 NUM_CLIP_SEQ(3)));
957 798 WREG32(VC_ENHANCE, 0);
958} 799}
959 800
960void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc) 801void r700_vram_gtt_location(struct radeon_device *rdev, struct radeon_mc *mc)
@@ -1118,6 +959,12 @@ static int rv770_startup(struct radeon_device *rdev)
1118 if (r) 959 if (r)
1119 return r; 960 return r;
1120 961
962 r = r600_audio_init(rdev);
963 if (r) {
964 DRM_ERROR("radeon: audio init failed\n");
965 return r;
966 }
967
1121 return 0; 968 return 0;
1122} 969}
1123 970
@@ -1140,12 +987,6 @@ int rv770_resume(struct radeon_device *rdev)
1140 return r; 987 return r;
1141 } 988 }
1142 989
1143 r = r600_audio_init(rdev);
1144 if (r) {
1145 dev_err(rdev->dev, "radeon: audio init failed\n");
1146 return r;
1147 }
1148
1149 return r; 990 return r;
1150 991
1151} 992}
@@ -1254,12 +1095,6 @@ int rv770_init(struct radeon_device *rdev)
1254 rdev->accel_working = false; 1095 rdev->accel_working = false;
1255 } 1096 }
1256 1097
1257 r = r600_audio_init(rdev);
1258 if (r) {
1259 dev_err(rdev->dev, "radeon: audio init failed\n");
1260 return r;
1261 }
1262
1263 return 0; 1098 return 0;
1264} 1099}
1265 1100
diff --git a/drivers/gpu/drm/radeon/rv770d.h b/drivers/gpu/drm/radeon/rv770d.h
index 9c549f702f2f..b0adfc595d75 100644
--- a/drivers/gpu/drm/radeon/rv770d.h
+++ b/drivers/gpu/drm/radeon/rv770d.h
@@ -106,10 +106,13 @@
106#define BACKEND_MAP(x) ((x) << 16) 106#define BACKEND_MAP(x) ((x) << 16)
107 107
108#define GB_TILING_CONFIG 0x98F0 108#define GB_TILING_CONFIG 0x98F0
109#define PIPE_TILING__SHIFT 1
110#define PIPE_TILING__MASK 0x0000000e
109 111
110#define GC_USER_SHADER_PIPE_CONFIG 0x8954 112#define GC_USER_SHADER_PIPE_CONFIG 0x8954
111#define INACTIVE_QD_PIPES(x) ((x) << 8) 113#define INACTIVE_QD_PIPES(x) ((x) << 8)
112#define INACTIVE_QD_PIPES_MASK 0x0000FF00 114#define INACTIVE_QD_PIPES_MASK 0x0000FF00
115#define INACTIVE_QD_PIPES_SHIFT 8
113#define INACTIVE_SIMDS(x) ((x) << 16) 116#define INACTIVE_SIMDS(x) ((x) << 16)
114#define INACTIVE_SIMDS_MASK 0x00FF0000 117#define INACTIVE_SIMDS_MASK 0x00FF0000
115 118
@@ -174,6 +177,7 @@
174#define MC_VM_MD_L1_TLB0_CNTL 0x2654 177#define MC_VM_MD_L1_TLB0_CNTL 0x2654
175#define MC_VM_MD_L1_TLB1_CNTL 0x2658 178#define MC_VM_MD_L1_TLB1_CNTL 0x2658
176#define MC_VM_MD_L1_TLB2_CNTL 0x265C 179#define MC_VM_MD_L1_TLB2_CNTL 0x265C
180#define MC_VM_MD_L1_TLB3_CNTL 0x2698
177#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C 181#define MC_VM_SYSTEM_APERTURE_DEFAULT_ADDR 0x203C
178#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038 182#define MC_VM_SYSTEM_APERTURE_HIGH_ADDR 0x2038
179#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034 183#define MC_VM_SYSTEM_APERTURE_LOW_ADDR 0x2034
@@ -207,6 +211,7 @@
207#define SCRATCH_UMSK 0x8540 211#define SCRATCH_UMSK 0x8540
208#define SCRATCH_ADDR 0x8544 212#define SCRATCH_ADDR 0x8544
209 213
214#define SMX_SAR_CTL0 0xA008
210#define SMX_DC_CTL0 0xA020 215#define SMX_DC_CTL0 0xA020
211#define USE_HASH_FUNCTION (1 << 0) 216#define USE_HASH_FUNCTION (1 << 0)
212#define CACHE_DEPTH(x) ((x) << 1) 217#define CACHE_DEPTH(x) ((x) << 1)
@@ -306,6 +311,8 @@
306#define TCP_CNTL 0x9610 311#define TCP_CNTL 0x9610
307#define TCP_CHAN_STEER 0x9614 312#define TCP_CHAN_STEER 0x9614
308 313
314#define VC_ENHANCE 0x9714
315
309#define VGT_CACHE_INVALIDATION 0x88C4 316#define VGT_CACHE_INVALIDATION 0x88C4
310#define CACHE_INVALIDATION(x) ((x)<<0) 317#define CACHE_INVALIDATION(x) ((x)<<0)
311#define VC_ONLY 0 318#define VC_ONLY 0
diff --git a/drivers/gpu/drm/radeon/si.c b/drivers/gpu/drm/radeon/si.c
index 549732e56ca9..c7b61f16ecfd 100644
--- a/drivers/gpu/drm/radeon/si.c
+++ b/drivers/gpu/drm/radeon/si.c
@@ -867,200 +867,6 @@ void dce6_bandwidth_update(struct radeon_device *rdev)
867/* 867/*
868 * Core functions 868 * Core functions
869 */ 869 */
870static u32 si_get_tile_pipe_to_backend_map(struct radeon_device *rdev,
871 u32 num_tile_pipes,
872 u32 num_backends_per_asic,
873 u32 *backend_disable_mask_per_asic,
874 u32 num_shader_engines)
875{
876 u32 backend_map = 0;
877 u32 enabled_backends_mask = 0;
878 u32 enabled_backends_count = 0;
879 u32 num_backends_per_se;
880 u32 cur_pipe;
881 u32 swizzle_pipe[SI_MAX_PIPES];
882 u32 cur_backend = 0;
883 u32 i;
884 bool force_no_swizzle;
885
886 /* force legal values */
887 if (num_tile_pipes < 1)
888 num_tile_pipes = 1;
889 if (num_tile_pipes > rdev->config.si.max_tile_pipes)
890 num_tile_pipes = rdev->config.si.max_tile_pipes;
891 if (num_shader_engines < 1)
892 num_shader_engines = 1;
893 if (num_shader_engines > rdev->config.si.max_shader_engines)
894 num_shader_engines = rdev->config.si.max_shader_engines;
895 if (num_backends_per_asic < num_shader_engines)
896 num_backends_per_asic = num_shader_engines;
897 if (num_backends_per_asic > (rdev->config.si.max_backends_per_se * num_shader_engines))
898 num_backends_per_asic = rdev->config.si.max_backends_per_se * num_shader_engines;
899
900 /* make sure we have the same number of backends per se */
901 num_backends_per_asic = ALIGN(num_backends_per_asic, num_shader_engines);
902 /* set up the number of backends per se */
903 num_backends_per_se = num_backends_per_asic / num_shader_engines;
904 if (num_backends_per_se > rdev->config.si.max_backends_per_se) {
905 num_backends_per_se = rdev->config.si.max_backends_per_se;
906 num_backends_per_asic = num_backends_per_se * num_shader_engines;
907 }
908
909 /* create enable mask and count for enabled backends */
910 for (i = 0; i < SI_MAX_BACKENDS; ++i) {
911 if (((*backend_disable_mask_per_asic >> i) & 1) == 0) {
912 enabled_backends_mask |= (1 << i);
913 ++enabled_backends_count;
914 }
915 if (enabled_backends_count == num_backends_per_asic)
916 break;
917 }
918
919 /* force the backends mask to match the current number of backends */
920 if (enabled_backends_count != num_backends_per_asic) {
921 u32 this_backend_enabled;
922 u32 shader_engine;
923 u32 backend_per_se;
924
925 enabled_backends_mask = 0;
926 enabled_backends_count = 0;
927 *backend_disable_mask_per_asic = SI_MAX_BACKENDS_MASK;
928 for (i = 0; i < SI_MAX_BACKENDS; ++i) {
929 /* calc the current se */
930 shader_engine = i / rdev->config.si.max_backends_per_se;
931 /* calc the backend per se */
932 backend_per_se = i % rdev->config.si.max_backends_per_se;
933 /* default to not enabled */
934 this_backend_enabled = 0;
935 if ((shader_engine < num_shader_engines) &&
936 (backend_per_se < num_backends_per_se))
937 this_backend_enabled = 1;
938 if (this_backend_enabled) {
939 enabled_backends_mask |= (1 << i);
940 *backend_disable_mask_per_asic &= ~(1 << i);
941 ++enabled_backends_count;
942 }
943 }
944 }
945
946
947 memset((uint8_t *)&swizzle_pipe[0], 0, sizeof(u32) * SI_MAX_PIPES);
948 switch (rdev->family) {
949 case CHIP_TAHITI:
950 case CHIP_PITCAIRN:
951 case CHIP_VERDE:
952 force_no_swizzle = true;
953 break;
954 default:
955 force_no_swizzle = false;
956 break;
957 }
958 if (force_no_swizzle) {
959 bool last_backend_enabled = false;
960
961 force_no_swizzle = false;
962 for (i = 0; i < SI_MAX_BACKENDS; ++i) {
963 if (((enabled_backends_mask >> i) & 1) == 1) {
964 if (last_backend_enabled)
965 force_no_swizzle = true;
966 last_backend_enabled = true;
967 } else
968 last_backend_enabled = false;
969 }
970 }
971
972 switch (num_tile_pipes) {
973 case 1:
974 case 3:
975 case 5:
976 case 7:
977 DRM_ERROR("odd number of pipes!\n");
978 break;
979 case 2:
980 swizzle_pipe[0] = 0;
981 swizzle_pipe[1] = 1;
982 break;
983 case 4:
984 if (force_no_swizzle) {
985 swizzle_pipe[0] = 0;
986 swizzle_pipe[1] = 1;
987 swizzle_pipe[2] = 2;
988 swizzle_pipe[3] = 3;
989 } else {
990 swizzle_pipe[0] = 0;
991 swizzle_pipe[1] = 2;
992 swizzle_pipe[2] = 1;
993 swizzle_pipe[3] = 3;
994 }
995 break;
996 case 6:
997 if (force_no_swizzle) {
998 swizzle_pipe[0] = 0;
999 swizzle_pipe[1] = 1;
1000 swizzle_pipe[2] = 2;
1001 swizzle_pipe[3] = 3;
1002 swizzle_pipe[4] = 4;
1003 swizzle_pipe[5] = 5;
1004 } else {
1005 swizzle_pipe[0] = 0;
1006 swizzle_pipe[1] = 2;
1007 swizzle_pipe[2] = 4;
1008 swizzle_pipe[3] = 1;
1009 swizzle_pipe[4] = 3;
1010 swizzle_pipe[5] = 5;
1011 }
1012 break;
1013 case 8:
1014 if (force_no_swizzle) {
1015 swizzle_pipe[0] = 0;
1016 swizzle_pipe[1] = 1;
1017 swizzle_pipe[2] = 2;
1018 swizzle_pipe[3] = 3;
1019 swizzle_pipe[4] = 4;
1020 swizzle_pipe[5] = 5;
1021 swizzle_pipe[6] = 6;
1022 swizzle_pipe[7] = 7;
1023 } else {
1024 swizzle_pipe[0] = 0;
1025 swizzle_pipe[1] = 2;
1026 swizzle_pipe[2] = 4;
1027 swizzle_pipe[3] = 6;
1028 swizzle_pipe[4] = 1;
1029 swizzle_pipe[5] = 3;
1030 swizzle_pipe[6] = 5;
1031 swizzle_pipe[7] = 7;
1032 }
1033 break;
1034 }
1035
1036 for (cur_pipe = 0; cur_pipe < num_tile_pipes; ++cur_pipe) {
1037 while (((1 << cur_backend) & enabled_backends_mask) == 0)
1038 cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
1039
1040 backend_map |= (((cur_backend & 0xf) << (swizzle_pipe[cur_pipe] * 4)));
1041
1042 cur_backend = (cur_backend + 1) % SI_MAX_BACKENDS;
1043 }
1044
1045 return backend_map;
1046}
1047
1048static u32 si_get_disable_mask_per_asic(struct radeon_device *rdev,
1049 u32 disable_mask_per_se,
1050 u32 max_disable_mask_per_se,
1051 u32 num_shader_engines)
1052{
1053 u32 disable_field_width_per_se = r600_count_pipe_bits(disable_mask_per_se);
1054 u32 disable_mask_per_asic = disable_mask_per_se & max_disable_mask_per_se;
1055
1056 if (num_shader_engines == 1)
1057 return disable_mask_per_asic;
1058 else if (num_shader_engines == 2)
1059 return disable_mask_per_asic | (disable_mask_per_asic << disable_field_width_per_se);
1060 else
1061 return 0xffffffff;
1062}
1063
1064static void si_tiling_mode_table_init(struct radeon_device *rdev) 870static void si_tiling_mode_table_init(struct radeon_device *rdev)
1065{ 871{
1066 const u32 num_tile_mode_states = 32; 872 const u32 num_tile_mode_states = 32;
@@ -1562,18 +1368,151 @@ static void si_tiling_mode_table_init(struct radeon_device *rdev)
1562 DRM_ERROR("unknown asic: 0x%x\n", rdev->family); 1368 DRM_ERROR("unknown asic: 0x%x\n", rdev->family);
1563} 1369}
1564 1370
1371static void si_select_se_sh(struct radeon_device *rdev,
1372 u32 se_num, u32 sh_num)
1373{
1374 u32 data = INSTANCE_BROADCAST_WRITES;
1375
1376 if ((se_num == 0xffffffff) && (sh_num == 0xffffffff))
1377 data = SH_BROADCAST_WRITES | SE_BROADCAST_WRITES;
1378 else if (se_num == 0xffffffff)
1379 data |= SE_BROADCAST_WRITES | SH_INDEX(sh_num);
1380 else if (sh_num == 0xffffffff)
1381 data |= SH_BROADCAST_WRITES | SE_INDEX(se_num);
1382 else
1383 data |= SH_INDEX(sh_num) | SE_INDEX(se_num);
1384 WREG32(GRBM_GFX_INDEX, data);
1385}
1386
1387static u32 si_create_bitmask(u32 bit_width)
1388{
1389 u32 i, mask = 0;
1390
1391 for (i = 0; i < bit_width; i++) {
1392 mask <<= 1;
1393 mask |= 1;
1394 }
1395 return mask;
1396}
1397
1398static u32 si_get_cu_enabled(struct radeon_device *rdev, u32 cu_per_sh)
1399{
1400 u32 data, mask;
1401
1402 data = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
1403 if (data & 1)
1404 data &= INACTIVE_CUS_MASK;
1405 else
1406 data = 0;
1407 data |= RREG32(GC_USER_SHADER_ARRAY_CONFIG);
1408
1409 data >>= INACTIVE_CUS_SHIFT;
1410
1411 mask = si_create_bitmask(cu_per_sh);
1412
1413 return ~data & mask;
1414}
1415
1416static void si_setup_spi(struct radeon_device *rdev,
1417 u32 se_num, u32 sh_per_se,
1418 u32 cu_per_sh)
1419{
1420 int i, j, k;
1421 u32 data, mask, active_cu;
1422
1423 for (i = 0; i < se_num; i++) {
1424 for (j = 0; j < sh_per_se; j++) {
1425 si_select_se_sh(rdev, i, j);
1426 data = RREG32(SPI_STATIC_THREAD_MGMT_3);
1427 active_cu = si_get_cu_enabled(rdev, cu_per_sh);
1428
1429 mask = 1;
1430 for (k = 0; k < 16; k++) {
1431 mask <<= k;
1432 if (active_cu & mask) {
1433 data &= ~mask;
1434 WREG32(SPI_STATIC_THREAD_MGMT_3, data);
1435 break;
1436 }
1437 }
1438 }
1439 }
1440 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1441}
1442
1443static u32 si_get_rb_disabled(struct radeon_device *rdev,
1444 u32 max_rb_num, u32 se_num,
1445 u32 sh_per_se)
1446{
1447 u32 data, mask;
1448
1449 data = RREG32(CC_RB_BACKEND_DISABLE);
1450 if (data & 1)
1451 data &= BACKEND_DISABLE_MASK;
1452 else
1453 data = 0;
1454 data |= RREG32(GC_USER_RB_BACKEND_DISABLE);
1455
1456 data >>= BACKEND_DISABLE_SHIFT;
1457
1458 mask = si_create_bitmask(max_rb_num / se_num / sh_per_se);
1459
1460 return data & mask;
1461}
1462
1463static void si_setup_rb(struct radeon_device *rdev,
1464 u32 se_num, u32 sh_per_se,
1465 u32 max_rb_num)
1466{
1467 int i, j;
1468 u32 data, mask;
1469 u32 disabled_rbs = 0;
1470 u32 enabled_rbs = 0;
1471
1472 for (i = 0; i < se_num; i++) {
1473 for (j = 0; j < sh_per_se; j++) {
1474 si_select_se_sh(rdev, i, j);
1475 data = si_get_rb_disabled(rdev, max_rb_num, se_num, sh_per_se);
1476 disabled_rbs |= data << ((i * sh_per_se + j) * TAHITI_RB_BITMAP_WIDTH_PER_SH);
1477 }
1478 }
1479 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1480
1481 mask = 1;
1482 for (i = 0; i < max_rb_num; i++) {
1483 if (!(disabled_rbs & mask))
1484 enabled_rbs |= mask;
1485 mask <<= 1;
1486 }
1487
1488 for (i = 0; i < se_num; i++) {
1489 si_select_se_sh(rdev, i, 0xffffffff);
1490 data = 0;
1491 for (j = 0; j < sh_per_se; j++) {
1492 switch (enabled_rbs & 3) {
1493 case 1:
1494 data |= (RASTER_CONFIG_RB_MAP_0 << (i * sh_per_se + j) * 2);
1495 break;
1496 case 2:
1497 data |= (RASTER_CONFIG_RB_MAP_3 << (i * sh_per_se + j) * 2);
1498 break;
1499 case 3:
1500 default:
1501 data |= (RASTER_CONFIG_RB_MAP_2 << (i * sh_per_se + j) * 2);
1502 break;
1503 }
1504 enabled_rbs >>= 2;
1505 }
1506 WREG32(PA_SC_RASTER_CONFIG, data);
1507 }
1508 si_select_se_sh(rdev, 0xffffffff, 0xffffffff);
1509}
1510
1565static void si_gpu_init(struct radeon_device *rdev) 1511static void si_gpu_init(struct radeon_device *rdev)
1566{ 1512{
1567 u32 cc_rb_backend_disable = 0;
1568 u32 cc_gc_shader_array_config;
1569 u32 gb_addr_config = 0; 1513 u32 gb_addr_config = 0;
1570 u32 mc_shared_chmap, mc_arb_ramcfg; 1514 u32 mc_shared_chmap, mc_arb_ramcfg;
1571 u32 gb_backend_map;
1572 u32 cgts_tcc_disable;
1573 u32 sx_debug_1; 1515 u32 sx_debug_1;
1574 u32 gc_user_shader_array_config;
1575 u32 gc_user_rb_backend_disable;
1576 u32 cgts_user_tcc_disable;
1577 u32 hdp_host_path_cntl; 1516 u32 hdp_host_path_cntl;
1578 u32 tmp; 1517 u32 tmp;
1579 int i, j; 1518 int i, j;
@@ -1581,9 +1520,9 @@ static void si_gpu_init(struct radeon_device *rdev)
1581 switch (rdev->family) { 1520 switch (rdev->family) {
1582 case CHIP_TAHITI: 1521 case CHIP_TAHITI:
1583 rdev->config.si.max_shader_engines = 2; 1522 rdev->config.si.max_shader_engines = 2;
1584 rdev->config.si.max_pipes_per_simd = 4;
1585 rdev->config.si.max_tile_pipes = 12; 1523 rdev->config.si.max_tile_pipes = 12;
1586 rdev->config.si.max_simds_per_se = 8; 1524 rdev->config.si.max_cu_per_sh = 8;
1525 rdev->config.si.max_sh_per_se = 2;
1587 rdev->config.si.max_backends_per_se = 4; 1526 rdev->config.si.max_backends_per_se = 4;
1588 rdev->config.si.max_texture_channel_caches = 12; 1527 rdev->config.si.max_texture_channel_caches = 12;
1589 rdev->config.si.max_gprs = 256; 1528 rdev->config.si.max_gprs = 256;
@@ -1594,12 +1533,13 @@ static void si_gpu_init(struct radeon_device *rdev)
1594 rdev->config.si.sc_prim_fifo_size_backend = 0x100; 1533 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
1595 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1534 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1596 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1535 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1536 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
1597 break; 1537 break;
1598 case CHIP_PITCAIRN: 1538 case CHIP_PITCAIRN:
1599 rdev->config.si.max_shader_engines = 2; 1539 rdev->config.si.max_shader_engines = 2;
1600 rdev->config.si.max_pipes_per_simd = 4;
1601 rdev->config.si.max_tile_pipes = 8; 1540 rdev->config.si.max_tile_pipes = 8;
1602 rdev->config.si.max_simds_per_se = 5; 1541 rdev->config.si.max_cu_per_sh = 5;
1542 rdev->config.si.max_sh_per_se = 2;
1603 rdev->config.si.max_backends_per_se = 4; 1543 rdev->config.si.max_backends_per_se = 4;
1604 rdev->config.si.max_texture_channel_caches = 8; 1544 rdev->config.si.max_texture_channel_caches = 8;
1605 rdev->config.si.max_gprs = 256; 1545 rdev->config.si.max_gprs = 256;
@@ -1610,13 +1550,14 @@ static void si_gpu_init(struct radeon_device *rdev)
1610 rdev->config.si.sc_prim_fifo_size_backend = 0x100; 1550 rdev->config.si.sc_prim_fifo_size_backend = 0x100;
1611 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1551 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1612 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1552 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1553 gb_addr_config = TAHITI_GB_ADDR_CONFIG_GOLDEN;
1613 break; 1554 break;
1614 case CHIP_VERDE: 1555 case CHIP_VERDE:
1615 default: 1556 default:
1616 rdev->config.si.max_shader_engines = 1; 1557 rdev->config.si.max_shader_engines = 1;
1617 rdev->config.si.max_pipes_per_simd = 4;
1618 rdev->config.si.max_tile_pipes = 4; 1558 rdev->config.si.max_tile_pipes = 4;
1619 rdev->config.si.max_simds_per_se = 2; 1559 rdev->config.si.max_cu_per_sh = 2;
1560 rdev->config.si.max_sh_per_se = 2;
1620 rdev->config.si.max_backends_per_se = 4; 1561 rdev->config.si.max_backends_per_se = 4;
1621 rdev->config.si.max_texture_channel_caches = 4; 1562 rdev->config.si.max_texture_channel_caches = 4;
1622 rdev->config.si.max_gprs = 256; 1563 rdev->config.si.max_gprs = 256;
@@ -1627,6 +1568,7 @@ static void si_gpu_init(struct radeon_device *rdev)
1627 rdev->config.si.sc_prim_fifo_size_backend = 0x40; 1568 rdev->config.si.sc_prim_fifo_size_backend = 0x40;
1628 rdev->config.si.sc_hiz_tile_fifo_size = 0x30; 1569 rdev->config.si.sc_hiz_tile_fifo_size = 0x30;
1629 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130; 1570 rdev->config.si.sc_earlyz_tile_fifo_size = 0x130;
1571 gb_addr_config = VERDE_GB_ADDR_CONFIG_GOLDEN;
1630 break; 1572 break;
1631 } 1573 }
1632 1574
@@ -1648,31 +1590,7 @@ static void si_gpu_init(struct radeon_device *rdev)
1648 mc_shared_chmap = RREG32(MC_SHARED_CHMAP); 1590 mc_shared_chmap = RREG32(MC_SHARED_CHMAP);
1649 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG); 1591 mc_arb_ramcfg = RREG32(MC_ARB_RAMCFG);
1650 1592
1651 cc_rb_backend_disable = RREG32(CC_RB_BACKEND_DISABLE);
1652 cc_gc_shader_array_config = RREG32(CC_GC_SHADER_ARRAY_CONFIG);
1653 cgts_tcc_disable = 0xffff0000;
1654 for (i = 0; i < rdev->config.si.max_texture_channel_caches; i++)
1655 cgts_tcc_disable &= ~(1 << (16 + i));
1656 gc_user_rb_backend_disable = RREG32(GC_USER_RB_BACKEND_DISABLE);
1657 gc_user_shader_array_config = RREG32(GC_USER_SHADER_ARRAY_CONFIG);
1658 cgts_user_tcc_disable = RREG32(CGTS_USER_TCC_DISABLE);
1659
1660 rdev->config.si.num_shader_engines = rdev->config.si.max_shader_engines;
1661 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes; 1593 rdev->config.si.num_tile_pipes = rdev->config.si.max_tile_pipes;
1662 tmp = ((~gc_user_rb_backend_disable) & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
1663 rdev->config.si.num_backends_per_se = r600_count_pipe_bits(tmp);
1664 tmp = (gc_user_rb_backend_disable & BACKEND_DISABLE_MASK) >> BACKEND_DISABLE_SHIFT;
1665 rdev->config.si.backend_disable_mask_per_asic =
1666 si_get_disable_mask_per_asic(rdev, tmp, SI_MAX_BACKENDS_PER_SE_MASK,
1667 rdev->config.si.num_shader_engines);
1668 rdev->config.si.backend_map =
1669 si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
1670 rdev->config.si.num_backends_per_se *
1671 rdev->config.si.num_shader_engines,
1672 &rdev->config.si.backend_disable_mask_per_asic,
1673 rdev->config.si.num_shader_engines);
1674 tmp = ((~cgts_user_tcc_disable) & TCC_DISABLE_MASK) >> TCC_DISABLE_SHIFT;
1675 rdev->config.si.num_texture_channel_caches = r600_count_pipe_bits(tmp);
1676 rdev->config.si.mem_max_burst_length_bytes = 256; 1594 rdev->config.si.mem_max_burst_length_bytes = 256;
1677 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT; 1595 tmp = (mc_arb_ramcfg & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT;
1678 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024; 1596 rdev->config.si.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
@@ -1683,55 +1601,8 @@ static void si_gpu_init(struct radeon_device *rdev)
1683 rdev->config.si.num_gpus = 1; 1601 rdev->config.si.num_gpus = 1;
1684 rdev->config.si.multi_gpu_tile_size = 64; 1602 rdev->config.si.multi_gpu_tile_size = 64;
1685 1603
1686 gb_addr_config = 0; 1604 /* fix up row size */
1687 switch (rdev->config.si.num_tile_pipes) { 1605 gb_addr_config &= ~ROW_SIZE_MASK;
1688 case 1:
1689 gb_addr_config |= NUM_PIPES(0);
1690 break;
1691 case 2:
1692 gb_addr_config |= NUM_PIPES(1);
1693 break;
1694 case 4:
1695 gb_addr_config |= NUM_PIPES(2);
1696 break;
1697 case 8:
1698 default:
1699 gb_addr_config |= NUM_PIPES(3);
1700 break;
1701 }
1702
1703 tmp = (rdev->config.si.mem_max_burst_length_bytes / 256) - 1;
1704 gb_addr_config |= PIPE_INTERLEAVE_SIZE(tmp);
1705 gb_addr_config |= NUM_SHADER_ENGINES(rdev->config.si.num_shader_engines - 1);
1706 tmp = (rdev->config.si.shader_engine_tile_size / 16) - 1;
1707 gb_addr_config |= SHADER_ENGINE_TILE_SIZE(tmp);
1708 switch (rdev->config.si.num_gpus) {
1709 case 1:
1710 default:
1711 gb_addr_config |= NUM_GPUS(0);
1712 break;
1713 case 2:
1714 gb_addr_config |= NUM_GPUS(1);
1715 break;
1716 case 4:
1717 gb_addr_config |= NUM_GPUS(2);
1718 break;
1719 }
1720 switch (rdev->config.si.multi_gpu_tile_size) {
1721 case 16:
1722 gb_addr_config |= MULTI_GPU_TILE_SIZE(0);
1723 break;
1724 case 32:
1725 default:
1726 gb_addr_config |= MULTI_GPU_TILE_SIZE(1);
1727 break;
1728 case 64:
1729 gb_addr_config |= MULTI_GPU_TILE_SIZE(2);
1730 break;
1731 case 128:
1732 gb_addr_config |= MULTI_GPU_TILE_SIZE(3);
1733 break;
1734 }
1735 switch (rdev->config.si.mem_row_size_in_kb) { 1606 switch (rdev->config.si.mem_row_size_in_kb) {
1736 case 1: 1607 case 1:
1737 default: 1608 default:
@@ -1745,26 +1616,6 @@ static void si_gpu_init(struct radeon_device *rdev)
1745 break; 1616 break;
1746 } 1617 }
1747 1618
1748 tmp = (gb_addr_config & NUM_PIPES_MASK) >> NUM_PIPES_SHIFT;
1749 rdev->config.si.num_tile_pipes = (1 << tmp);
1750 tmp = (gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT;
1751 rdev->config.si.mem_max_burst_length_bytes = (tmp + 1) * 256;
1752 tmp = (gb_addr_config & NUM_SHADER_ENGINES_MASK) >> NUM_SHADER_ENGINES_SHIFT;
1753 rdev->config.si.num_shader_engines = tmp + 1;
1754 tmp = (gb_addr_config & NUM_GPUS_MASK) >> NUM_GPUS_SHIFT;
1755 rdev->config.si.num_gpus = tmp + 1;
1756 tmp = (gb_addr_config & MULTI_GPU_TILE_SIZE_MASK) >> MULTI_GPU_TILE_SIZE_SHIFT;
1757 rdev->config.si.multi_gpu_tile_size = 1 << tmp;
1758 tmp = (gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT;
1759 rdev->config.si.mem_row_size_in_kb = 1 << tmp;
1760
1761 gb_backend_map =
1762 si_get_tile_pipe_to_backend_map(rdev, rdev->config.si.num_tile_pipes,
1763 rdev->config.si.num_backends_per_se *
1764 rdev->config.si.num_shader_engines,
1765 &rdev->config.si.backend_disable_mask_per_asic,
1766 rdev->config.si.num_shader_engines);
1767
1768 /* setup tiling info dword. gb_addr_config is not adequate since it does 1619 /* setup tiling info dword. gb_addr_config is not adequate since it does
1769 * not have bank info, so create a custom tiling dword. 1620 * not have bank info, so create a custom tiling dword.
1770 * bits 3:0 num_pipes 1621 * bits 3:0 num_pipes
@@ -1789,33 +1640,29 @@ static void si_gpu_init(struct radeon_device *rdev)
1789 rdev->config.si.tile_config |= (3 << 0); 1640 rdev->config.si.tile_config |= (3 << 0);
1790 break; 1641 break;
1791 } 1642 }
1792 rdev->config.si.tile_config |= 1643 if ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT)
1793 ((mc_arb_ramcfg & NOOFBANK_MASK) >> NOOFBANK_SHIFT) << 4; 1644 rdev->config.si.tile_config |= 1 << 4;
1645 else
1646 rdev->config.si.tile_config |= 0 << 4;
1794 rdev->config.si.tile_config |= 1647 rdev->config.si.tile_config |=
1795 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8; 1648 ((gb_addr_config & PIPE_INTERLEAVE_SIZE_MASK) >> PIPE_INTERLEAVE_SIZE_SHIFT) << 8;
1796 rdev->config.si.tile_config |= 1649 rdev->config.si.tile_config |=
1797 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12; 1650 ((gb_addr_config & ROW_SIZE_MASK) >> ROW_SIZE_SHIFT) << 12;
1798 1651
1799 rdev->config.si.backend_map = gb_backend_map;
1800 WREG32(GB_ADDR_CONFIG, gb_addr_config); 1652 WREG32(GB_ADDR_CONFIG, gb_addr_config);
1801 WREG32(DMIF_ADDR_CONFIG, gb_addr_config); 1653 WREG32(DMIF_ADDR_CONFIG, gb_addr_config);
1802 WREG32(HDP_ADDR_CONFIG, gb_addr_config); 1654 WREG32(HDP_ADDR_CONFIG, gb_addr_config);
1803 1655
1804 /* primary versions */ 1656 si_tiling_mode_table_init(rdev);
1805 WREG32(CC_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1806 WREG32(CC_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable);
1807 WREG32(CC_GC_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
1808
1809 WREG32(CGTS_TCC_DISABLE, cgts_tcc_disable);
1810 1657
1811 /* user versions */ 1658 si_setup_rb(rdev, rdev->config.si.max_shader_engines,
1812 WREG32(GC_USER_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1659 rdev->config.si.max_sh_per_se,
1813 WREG32(GC_USER_SYS_RB_BACKEND_DISABLE, cc_rb_backend_disable); 1660 rdev->config.si.max_backends_per_se);
1814 WREG32(GC_USER_SHADER_ARRAY_CONFIG, cc_gc_shader_array_config);
1815 1661
1816 WREG32(CGTS_USER_TCC_DISABLE, cgts_tcc_disable); 1662 si_setup_spi(rdev, rdev->config.si.max_shader_engines,
1663 rdev->config.si.max_sh_per_se,
1664 rdev->config.si.max_cu_per_sh);
1817 1665
1818 si_tiling_mode_table_init(rdev);
1819 1666
1820 /* set HW defaults for 3D engine */ 1667 /* set HW defaults for 3D engine */
1821 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) | 1668 WREG32(CP_QUEUE_THRESHOLDS, (ROQ_IB1_START(0x16) |
diff --git a/drivers/gpu/drm/radeon/si_reg.h b/drivers/gpu/drm/radeon/si_reg.h
index eda938a7cb6e..501f9d431d57 100644
--- a/drivers/gpu/drm/radeon/si_reg.h
+++ b/drivers/gpu/drm/radeon/si_reg.h
@@ -30,4 +30,76 @@
30#define SI_DC_GPIO_HPD_EN 0x65b8 30#define SI_DC_GPIO_HPD_EN 0x65b8
31#define SI_DC_GPIO_HPD_Y 0x65bc 31#define SI_DC_GPIO_HPD_Y 0x65bc
32 32
33#define SI_GRPH_CONTROL 0x6804
34# define SI_GRPH_DEPTH(x) (((x) & 0x3) << 0)
35# define SI_GRPH_DEPTH_8BPP 0
36# define SI_GRPH_DEPTH_16BPP 1
37# define SI_GRPH_DEPTH_32BPP 2
38# define SI_GRPH_NUM_BANKS(x) (((x) & 0x3) << 2)
39# define SI_ADDR_SURF_2_BANK 0
40# define SI_ADDR_SURF_4_BANK 1
41# define SI_ADDR_SURF_8_BANK 2
42# define SI_ADDR_SURF_16_BANK 3
43# define SI_GRPH_Z(x) (((x) & 0x3) << 4)
44# define SI_GRPH_BANK_WIDTH(x) (((x) & 0x3) << 6)
45# define SI_ADDR_SURF_BANK_WIDTH_1 0
46# define SI_ADDR_SURF_BANK_WIDTH_2 1
47# define SI_ADDR_SURF_BANK_WIDTH_4 2
48# define SI_ADDR_SURF_BANK_WIDTH_8 3
49# define SI_GRPH_FORMAT(x) (((x) & 0x7) << 8)
50/* 8 BPP */
51# define SI_GRPH_FORMAT_INDEXED 0
52/* 16 BPP */
53# define SI_GRPH_FORMAT_ARGB1555 0
54# define SI_GRPH_FORMAT_ARGB565 1
55# define SI_GRPH_FORMAT_ARGB4444 2
56# define SI_GRPH_FORMAT_AI88 3
57# define SI_GRPH_FORMAT_MONO16 4
58# define SI_GRPH_FORMAT_BGRA5551 5
59/* 32 BPP */
60# define SI_GRPH_FORMAT_ARGB8888 0
61# define SI_GRPH_FORMAT_ARGB2101010 1
62# define SI_GRPH_FORMAT_32BPP_DIG 2
63# define SI_GRPH_FORMAT_8B_ARGB2101010 3
64# define SI_GRPH_FORMAT_BGRA1010102 4
65# define SI_GRPH_FORMAT_8B_BGRA1010102 5
66# define SI_GRPH_FORMAT_RGB111110 6
67# define SI_GRPH_FORMAT_BGR101111 7
68# define SI_GRPH_BANK_HEIGHT(x) (((x) & 0x3) << 11)
69# define SI_ADDR_SURF_BANK_HEIGHT_1 0
70# define SI_ADDR_SURF_BANK_HEIGHT_2 1
71# define SI_ADDR_SURF_BANK_HEIGHT_4 2
72# define SI_ADDR_SURF_BANK_HEIGHT_8 3
73# define SI_GRPH_TILE_SPLIT(x) (((x) & 0x7) << 13)
74# define SI_ADDR_SURF_TILE_SPLIT_64B 0
75# define SI_ADDR_SURF_TILE_SPLIT_128B 1
76# define SI_ADDR_SURF_TILE_SPLIT_256B 2
77# define SI_ADDR_SURF_TILE_SPLIT_512B 3
78# define SI_ADDR_SURF_TILE_SPLIT_1KB 4
79# define SI_ADDR_SURF_TILE_SPLIT_2KB 5
80# define SI_ADDR_SURF_TILE_SPLIT_4KB 6
81# define SI_GRPH_MACRO_TILE_ASPECT(x) (((x) & 0x3) << 18)
82# define SI_ADDR_SURF_MACRO_TILE_ASPECT_1 0
83# define SI_ADDR_SURF_MACRO_TILE_ASPECT_2 1
84# define SI_ADDR_SURF_MACRO_TILE_ASPECT_4 2
85# define SI_ADDR_SURF_MACRO_TILE_ASPECT_8 3
86# define SI_GRPH_ARRAY_MODE(x) (((x) & 0x7) << 20)
87# define SI_GRPH_ARRAY_LINEAR_GENERAL 0
88# define SI_GRPH_ARRAY_LINEAR_ALIGNED 1
89# define SI_GRPH_ARRAY_1D_TILED_THIN1 2
90# define SI_GRPH_ARRAY_2D_TILED_THIN1 4
91# define SI_GRPH_PIPE_CONFIG(x) (((x) & 0x1f) << 24)
92# define SI_ADDR_SURF_P2 0
93# define SI_ADDR_SURF_P4_8x16 4
94# define SI_ADDR_SURF_P4_16x16 5
95# define SI_ADDR_SURF_P4_16x32 6
96# define SI_ADDR_SURF_P4_32x32 7
97# define SI_ADDR_SURF_P8_16x16_8x16 8
98# define SI_ADDR_SURF_P8_16x32_8x16 9
99# define SI_ADDR_SURF_P8_32x32_8x16 10
100# define SI_ADDR_SURF_P8_16x32_16x16 11
101# define SI_ADDR_SURF_P8_32x32_16x16 12
102# define SI_ADDR_SURF_P8_32x32_16x32 13
103# define SI_ADDR_SURF_P8_32x64_32x32 14
104
33#endif 105#endif
diff --git a/drivers/gpu/drm/radeon/sid.h b/drivers/gpu/drm/radeon/sid.h
index 53ea2c42dbd6..db4067962868 100644
--- a/drivers/gpu/drm/radeon/sid.h
+++ b/drivers/gpu/drm/radeon/sid.h
@@ -24,6 +24,11 @@
24#ifndef SI_H 24#ifndef SI_H
25#define SI_H 25#define SI_H
26 26
27#define TAHITI_RB_BITMAP_WIDTH_PER_SH 2
28
29#define TAHITI_GB_ADDR_CONFIG_GOLDEN 0x12011003
30#define VERDE_GB_ADDR_CONFIG_GOLDEN 0x12010002
31
27#define CG_MULT_THERMAL_STATUS 0x714 32#define CG_MULT_THERMAL_STATUS 0x714
28#define ASIC_MAX_TEMP(x) ((x) << 0) 33#define ASIC_MAX_TEMP(x) ((x) << 0)
29#define ASIC_MAX_TEMP_MASK 0x000001ff 34#define ASIC_MAX_TEMP_MASK 0x000001ff
@@ -408,6 +413,12 @@
408#define SOFT_RESET_IA (1 << 15) 413#define SOFT_RESET_IA (1 << 15)
409 414
410#define GRBM_GFX_INDEX 0x802C 415#define GRBM_GFX_INDEX 0x802C
416#define INSTANCE_INDEX(x) ((x) << 0)
417#define SH_INDEX(x) ((x) << 8)
418#define SE_INDEX(x) ((x) << 16)
419#define SH_BROADCAST_WRITES (1 << 29)
420#define INSTANCE_BROADCAST_WRITES (1 << 30)
421#define SE_BROADCAST_WRITES (1 << 31)
411 422
412#define GRBM_INT_CNTL 0x8060 423#define GRBM_INT_CNTL 0x8060
413# define RDERR_INT_ENABLE (1 << 0) 424# define RDERR_INT_ENABLE (1 << 0)
@@ -480,6 +491,8 @@
480#define VGT_TF_MEMORY_BASE 0x89B8 491#define VGT_TF_MEMORY_BASE 0x89B8
481 492
482#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc 493#define CC_GC_SHADER_ARRAY_CONFIG 0x89bc
494#define INACTIVE_CUS_MASK 0xFFFF0000
495#define INACTIVE_CUS_SHIFT 16
483#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0 496#define GC_USER_SHADER_ARRAY_CONFIG 0x89c0
484 497
485#define PA_CL_ENHANCE 0x8A14 498#define PA_CL_ENHANCE 0x8A14
@@ -688,6 +701,12 @@
688#define RLC_MC_CNTL 0xC344 701#define RLC_MC_CNTL 0xC344
689#define RLC_UCODE_CNTL 0xC348 702#define RLC_UCODE_CNTL 0xC348
690 703
704#define PA_SC_RASTER_CONFIG 0x28350
705# define RASTER_CONFIG_RB_MAP_0 0
706# define RASTER_CONFIG_RB_MAP_1 1
707# define RASTER_CONFIG_RB_MAP_2 2
708# define RASTER_CONFIG_RB_MAP_3 3
709
691#define VGT_EVENT_INITIATOR 0x28a90 710#define VGT_EVENT_INITIATOR 0x28a90
692# define SAMPLE_STREAMOUTSTATS1 (1 << 0) 711# define SAMPLE_STREAMOUTSTATS1 (1 << 0)
693# define SAMPLE_STREAMOUTSTATS2 (2 << 0) 712# define SAMPLE_STREAMOUTSTATS2 (2 << 0)
diff --git a/drivers/gpu/drm/sis/sis_drv.c b/drivers/gpu/drm/sis/sis_drv.c
index 30d98d14b5c5..dd14cd1a0033 100644
--- a/drivers/gpu/drm/sis/sis_drv.c
+++ b/drivers/gpu/drm/sis/sis_drv.c
@@ -47,9 +47,9 @@ static int sis_driver_load(struct drm_device *dev, unsigned long chipset)
47 if (dev_priv == NULL) 47 if (dev_priv == NULL)
48 return -ENOMEM; 48 return -ENOMEM;
49 49
50 idr_init(&dev_priv->object_idr);
50 dev->dev_private = (void *)dev_priv; 51 dev->dev_private = (void *)dev_priv;
51 dev_priv->chipset = chipset; 52 dev_priv->chipset = chipset;
52 idr_init(&dev->object_name_idr);
53 53
54 return 0; 54 return 0;
55} 55}
diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c
index 36792bd4da77..36f4b28c1b90 100644
--- a/drivers/gpu/drm/ttm/ttm_bo.c
+++ b/drivers/gpu/drm/ttm/ttm_bo.c
@@ -1204,6 +1204,7 @@ int ttm_bo_init(struct ttm_bo_device *bdev,
1204 (*destroy)(bo); 1204 (*destroy)(bo);
1205 else 1205 else
1206 kfree(bo); 1206 kfree(bo);
1207 ttm_mem_global_free(mem_glob, acc_size);
1207 return -EINVAL; 1208 return -EINVAL;
1208 } 1209 }
1209 bo->destroy = destroy; 1210 bo->destroy = destroy;
@@ -1307,22 +1308,14 @@ int ttm_bo_create(struct ttm_bo_device *bdev,
1307 struct ttm_buffer_object **p_bo) 1308 struct ttm_buffer_object **p_bo)
1308{ 1309{
1309 struct ttm_buffer_object *bo; 1310 struct ttm_buffer_object *bo;
1310 struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
1311 size_t acc_size; 1311 size_t acc_size;
1312 int ret; 1312 int ret;
1313 1313
1314 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1315 ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
1316 if (unlikely(ret != 0))
1317 return ret;
1318
1319 bo = kzalloc(sizeof(*bo), GFP_KERNEL); 1314 bo = kzalloc(sizeof(*bo), GFP_KERNEL);
1320 1315 if (unlikely(bo == NULL))
1321 if (unlikely(bo == NULL)) {
1322 ttm_mem_global_free(mem_glob, acc_size);
1323 return -ENOMEM; 1316 return -ENOMEM;
1324 }
1325 1317
1318 acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
1326 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, 1319 ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
1327 buffer_start, interruptible, 1320 buffer_start, interruptible,
1328 persistent_swap_storage, acc_size, NULL, NULL); 1321 persistent_swap_storage, acc_size, NULL, NULL);
@@ -1834,6 +1827,7 @@ static int ttm_bo_swapout(struct ttm_mem_shrink *shrink)
1834 spin_unlock(&glob->lru_lock); 1827 spin_unlock(&glob->lru_lock);
1835 (void) ttm_bo_cleanup_refs(bo, false, false, false); 1828 (void) ttm_bo_cleanup_refs(bo, false, false, false);
1836 kref_put(&bo->list_kref, ttm_bo_release_list); 1829 kref_put(&bo->list_kref, ttm_bo_release_list);
1830 spin_lock(&glob->lru_lock);
1837 continue; 1831 continue;
1838 } 1832 }
1839 1833
diff --git a/drivers/gpu/drm/udl/udl_drv.c b/drivers/gpu/drm/udl/udl_drv.c
index 4d02c46a9420..6e52069894b3 100644
--- a/drivers/gpu/drm/udl/udl_drv.c
+++ b/drivers/gpu/drm/udl/udl_drv.c
@@ -13,8 +13,21 @@
13 13
14static struct drm_driver driver; 14static struct drm_driver driver;
15 15
16/*
17 * There are many DisplayLink-based graphics products, all with unique PIDs.
18 * So we match on DisplayLink's VID + Vendor-Defined Interface Class (0xff)
19 * We also require a match on SubClass (0x00) and Protocol (0x00),
20 * which is compatible with all known USB 2.0 era graphics chips and firmware,
21 * but allows DisplayLink to increment those for any future incompatible chips
22 */
16static struct usb_device_id id_table[] = { 23static struct usb_device_id id_table[] = {
17 {.idVendor = 0x17e9, .match_flags = USB_DEVICE_ID_MATCH_VENDOR,}, 24 {.idVendor = 0x17e9, .bInterfaceClass = 0xff,
25 .bInterfaceSubClass = 0x00,
26 .bInterfaceProtocol = 0x00,
27 .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
28 USB_DEVICE_ID_MATCH_INT_CLASS |
29 USB_DEVICE_ID_MATCH_INT_SUBCLASS |
30 USB_DEVICE_ID_MATCH_INT_PROTOCOL,},
18 {}, 31 {},
19}; 32};
20MODULE_DEVICE_TABLE(usb, id_table); 33MODULE_DEVICE_TABLE(usb, id_table);
diff --git a/drivers/gpu/drm/via/via_map.c b/drivers/gpu/drm/via/via_map.c
index 1f182254e81e..c126182ac07e 100644
--- a/drivers/gpu/drm/via/via_map.c
+++ b/drivers/gpu/drm/via/via_map.c
@@ -100,12 +100,11 @@ int via_driver_load(struct drm_device *dev, unsigned long chipset)
100 if (dev_priv == NULL) 100 if (dev_priv == NULL)
101 return -ENOMEM; 101 return -ENOMEM;
102 102
103 idr_init(&dev_priv->object_idr);
103 dev->dev_private = (void *)dev_priv; 104 dev->dev_private = (void *)dev_priv;
104 105
105 dev_priv->chipset = chipset; 106 dev_priv->chipset = chipset;
106 107
107 idr_init(&dev->object_name_idr);
108
109 pci_set_master(dev->pdev); 108 pci_set_master(dev->pdev);
110 109
111 ret = drm_vblank_init(dev, 1); 110 ret = drm_vblank_init(dev, 1);
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
index 51c9ba5cd2fb..21ee78226560 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c
@@ -66,7 +66,7 @@ static int vmw_gmr2_bind(struct vmw_private *dev_priv,
66 cmd += sizeof(remap_cmd) / sizeof(uint32); 66 cmd += sizeof(remap_cmd) / sizeof(uint32);
67 67
68 for (i = 0; i < num_pages; ++i) { 68 for (i = 0; i < num_pages; ++i) {
69 if (VMW_PPN_SIZE > 4) 69 if (VMW_PPN_SIZE <= 4)
70 *cmd = page_to_pfn(*pages++); 70 *cmd = page_to_pfn(*pages++);
71 else 71 else
72 *((uint64_t *)cmd) = page_to_pfn(*pages++); 72 *((uint64_t *)cmd) = page_to_pfn(*pages++);
diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c
index 38f9534ac513..5b3c7d135dc9 100644
--- a/drivers/gpu/vga/vga_switcheroo.c
+++ b/drivers/gpu/vga/vga_switcheroo.c
@@ -190,6 +190,19 @@ find_active_client(struct list_head *head)
190 return NULL; 190 return NULL;
191} 191}
192 192
193int vga_switcheroo_get_client_state(struct pci_dev *pdev)
194{
195 struct vga_switcheroo_client *client;
196
197 client = find_client_from_pci(&vgasr_priv.clients, pdev);
198 if (!client)
199 return VGA_SWITCHEROO_NOT_FOUND;
200 if (!vgasr_priv.active)
201 return VGA_SWITCHEROO_INIT;
202 return client->pwr_state;
203}
204EXPORT_SYMBOL(vga_switcheroo_get_client_state);
205
193void vga_switcheroo_unregister_client(struct pci_dev *pdev) 206void vga_switcheroo_unregister_client(struct pci_dev *pdev)
194{ 207{
195 struct vga_switcheroo_client *client; 208 struct vga_switcheroo_client *client;
@@ -291,8 +304,6 @@ static int vga_switchto_stage1(struct vga_switcheroo_client *new_client)
291 vga_switchon(new_client); 304 vga_switchon(new_client);
292 305
293 vga_set_default_device(new_client->pdev); 306 vga_set_default_device(new_client->pdev);
294 set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
295
296 return 0; 307 return 0;
297} 308}
298 309
@@ -308,6 +319,8 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
308 319
309 active->active = false; 320 active->active = false;
310 321
322 set_audio_state(active->id, VGA_SWITCHEROO_OFF);
323
311 if (new_client->fb_info) { 324 if (new_client->fb_info) {
312 struct fb_event event; 325 struct fb_event event;
313 event.info = new_client->fb_info; 326 event.info = new_client->fb_info;
@@ -321,11 +334,11 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client)
321 if (new_client->ops->reprobe) 334 if (new_client->ops->reprobe)
322 new_client->ops->reprobe(new_client->pdev); 335 new_client->ops->reprobe(new_client->pdev);
323 336
324 set_audio_state(active->id, VGA_SWITCHEROO_OFF);
325
326 if (active->pwr_state == VGA_SWITCHEROO_ON) 337 if (active->pwr_state == VGA_SWITCHEROO_ON)
327 vga_switchoff(active); 338 vga_switchoff(active);
328 339
340 set_audio_state(new_client->id, VGA_SWITCHEROO_ON);
341
329 new_client->active = true; 342 new_client->active = true;
330 return 0; 343 return 0;
331} 344}
@@ -371,8 +384,9 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
371 /* pwr off the device not in use */ 384 /* pwr off the device not in use */
372 if (strncmp(usercmd, "OFF", 3) == 0) { 385 if (strncmp(usercmd, "OFF", 3) == 0) {
373 list_for_each_entry(client, &vgasr_priv.clients, list) { 386 list_for_each_entry(client, &vgasr_priv.clients, list) {
374 if (client->active) 387 if (client->active || client_is_audio(client))
375 continue; 388 continue;
389 set_audio_state(client->id, VGA_SWITCHEROO_OFF);
376 if (client->pwr_state == VGA_SWITCHEROO_ON) 390 if (client->pwr_state == VGA_SWITCHEROO_ON)
377 vga_switchoff(client); 391 vga_switchoff(client);
378 } 392 }
@@ -381,10 +395,11 @@ vga_switcheroo_debugfs_write(struct file *filp, const char __user *ubuf,
381 /* pwr on the device not in use */ 395 /* pwr on the device not in use */
382 if (strncmp(usercmd, "ON", 2) == 0) { 396 if (strncmp(usercmd, "ON", 2) == 0) {
383 list_for_each_entry(client, &vgasr_priv.clients, list) { 397 list_for_each_entry(client, &vgasr_priv.clients, list) {
384 if (client->active) 398 if (client->active || client_is_audio(client))
385 continue; 399 continue;
386 if (client->pwr_state == VGA_SWITCHEROO_OFF) 400 if (client->pwr_state == VGA_SWITCHEROO_OFF)
387 vga_switchon(client); 401 vga_switchon(client);
402 set_audio_state(client->id, VGA_SWITCHEROO_ON);
388 } 403 }
389 goto out; 404 goto out;
390 } 405 }
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index f082e48ab113..70d62f5bc909 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -215,7 +215,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
215 int i; 215 int i;
216 216
217 if (send_command(cmd) || send_argument(key)) { 217 if (send_command(cmd) || send_argument(key)) {
218 pr_warn("%s: read arg fail\n", key); 218 pr_warn("%.4s: read arg fail\n", key);
219 return -EIO; 219 return -EIO;
220 } 220 }
221 221
@@ -223,7 +223,7 @@ static int read_smc(u8 cmd, const char *key, u8 *buffer, u8 len)
223 223
224 for (i = 0; i < len; i++) { 224 for (i = 0; i < len; i++) {
225 if (__wait_status(0x05)) { 225 if (__wait_status(0x05)) {
226 pr_warn("%s: read data fail\n", key); 226 pr_warn("%.4s: read data fail\n", key);
227 return -EIO; 227 return -EIO;
228 } 228 }
229 buffer[i] = inb(APPLESMC_DATA_PORT); 229 buffer[i] = inb(APPLESMC_DATA_PORT);
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index b9d512331ed4..7f1feb2f467a 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -191,6 +191,24 @@ static ssize_t show_temp(struct device *dev,
191 return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN; 191 return tdata->valid ? sprintf(buf, "%d\n", tdata->temp) : -EAGAIN;
192} 192}
193 193
194struct tjmax {
195 char const *id;
196 int tjmax;
197};
198
199static struct tjmax __cpuinitconst tjmax_table[] = {
200 { "CPU D410", 100000 },
201 { "CPU D425", 100000 },
202 { "CPU D510", 100000 },
203 { "CPU D525", 100000 },
204 { "CPU N450", 100000 },
205 { "CPU N455", 100000 },
206 { "CPU N470", 100000 },
207 { "CPU N475", 100000 },
208 { "CPU 230", 100000 },
209 { "CPU 330", 125000 },
210};
211
194static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id, 212static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
195 struct device *dev) 213 struct device *dev)
196{ 214{
@@ -202,6 +220,13 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
202 int err; 220 int err;
203 u32 eax, edx; 221 u32 eax, edx;
204 struct pci_dev *host_bridge; 222 struct pci_dev *host_bridge;
223 int i;
224
225 /* explicit tjmax table entries override heuristics */
226 for (i = 0; i < ARRAY_SIZE(tjmax_table); i++) {
227 if (strstr(c->x86_model_id, tjmax_table[i].id))
228 return tjmax_table[i].tjmax;
229 }
205 230
206 /* Early chips have no MSR for TjMax */ 231 /* Early chips have no MSR for TjMax */
207 232
@@ -210,7 +235,8 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
210 235
211 /* Atom CPUs */ 236 /* Atom CPUs */
212 237
213 if (c->x86_model == 0x1c) { 238 if (c->x86_model == 0x1c || c->x86_model == 0x26
239 || c->x86_model == 0x27) {
214 usemsr_ee = 0; 240 usemsr_ee = 0;
215 241
216 host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0)); 242 host_bridge = pci_get_bus_and_slot(0, PCI_DEVFN(0, 0));
@@ -223,6 +249,9 @@ static int __cpuinit adjust_tjmax(struct cpuinfo_x86 *c, u32 id,
223 tjmax = 90000; 249 tjmax = 90000;
224 250
225 pci_dev_put(host_bridge); 251 pci_dev_put(host_bridge);
252 } else if (c->x86_model == 0x36) {
253 usemsr_ee = 0;
254 tjmax = 100000;
226 } 255 }
227 256
228 if (c->x86_model > 0xe && usemsr_ee) { 257 if (c->x86_model > 0xe && usemsr_ee) {
@@ -772,7 +801,7 @@ MODULE_DEVICE_TABLE(x86cpu, coretemp_ids);
772 801
773static int __init coretemp_init(void) 802static int __init coretemp_init(void)
774{ 803{
775 int i, err = -ENODEV; 804 int i, err;
776 805
777 /* 806 /*
778 * CPUID.06H.EAX[0] indicates whether the CPU has thermal 807 * CPUID.06H.EAX[0] indicates whether the CPU has thermal
diff --git a/drivers/hwmon/emc2103.c b/drivers/hwmon/emc2103.c
index 9691f664c76e..e7d234b59312 100644
--- a/drivers/hwmon/emc2103.c
+++ b/drivers/hwmon/emc2103.c
@@ -451,11 +451,15 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
451 data->fan_rpm_control = true; 451 data->fan_rpm_control = true;
452 break; 452 break;
453 default: 453 default:
454 mutex_unlock(&data->update_lock); 454 count = -EINVAL;
455 return -EINVAL; 455 goto err;
456 } 456 }
457 457
458 read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg); 458 result = read_u8_from_i2c(client, REG_FAN_CONF1, &conf_reg);
459 if (result) {
460 count = result;
461 goto err;
462 }
459 463
460 if (data->fan_rpm_control) 464 if (data->fan_rpm_control)
461 conf_reg |= 0x80; 465 conf_reg |= 0x80;
@@ -463,7 +467,7 @@ static ssize_t set_pwm_enable(struct device *dev, struct device_attribute *da,
463 conf_reg &= ~0x80; 467 conf_reg &= ~0x80;
464 468
465 i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg); 469 i2c_smbus_write_byte_data(client, REG_FAN_CONF1, conf_reg);
466 470err:
467 mutex_unlock(&data->update_lock); 471 mutex_unlock(&data->update_lock);
468 return count; 472 return count;
469} 473}
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index beb2491db274..a0edd9854218 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -37,4 +37,16 @@ config I2C_MUX_PCA954x
37 This driver can also be built as a module. If so, the module 37 This driver can also be built as a module. If so, the module
38 will be called i2c-mux-pca954x. 38 will be called i2c-mux-pca954x.
39 39
40config I2C_MUX_PINCTRL
41 tristate "pinctrl-based I2C multiplexer"
42 depends on PINCTRL
43 help
44 If you say yes to this option, support will be included for an I2C
45 multiplexer that uses the pinctrl subsystem, i.e. pin multiplexing.
46 This is useful for SoCs whose I2C module's signals can be routed to
47 different sets of pins at run-time.
48
49 This driver can also be built as a module. If so, the module will be
50 called pinctrl-i2cmux.
51
40endmenu 52endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 5826249b29ca..76da8692afff 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -4,5 +4,6 @@
4obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o 4obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
5obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o 5obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
6obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o 6obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
7obj-$(CONFIG_I2C_MUX_PINCTRL) += i2c-mux-pinctrl.o
7 8
8ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG 9ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/i2c-mux-pinctrl.c b/drivers/i2c/muxes/i2c-mux-pinctrl.c
new file mode 100644
index 000000000000..46a669763476
--- /dev/null
+++ b/drivers/i2c/muxes/i2c-mux-pinctrl.c
@@ -0,0 +1,279 @@
1/*
2 * I2C multiplexer using pinctrl API
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#include <linux/i2c.h>
20#include <linux/i2c-mux.h>
21#include <linux/init.h>
22#include <linux/module.h>
23#include <linux/of_i2c.h>
24#include <linux/pinctrl/consumer.h>
25#include <linux/i2c-mux-pinctrl.h>
26#include <linux/platform_device.h>
27#include <linux/slab.h>
28
29struct i2c_mux_pinctrl {
30 struct device *dev;
31 struct i2c_mux_pinctrl_platform_data *pdata;
32 struct pinctrl *pinctrl;
33 struct pinctrl_state **states;
34 struct pinctrl_state *state_idle;
35 struct i2c_adapter *parent;
36 struct i2c_adapter **busses;
37};
38
39static int i2c_mux_pinctrl_select(struct i2c_adapter *adap, void *data,
40 u32 chan)
41{
42 struct i2c_mux_pinctrl *mux = data;
43
44 return pinctrl_select_state(mux->pinctrl, mux->states[chan]);
45}
46
47static int i2c_mux_pinctrl_deselect(struct i2c_adapter *adap, void *data,
48 u32 chan)
49{
50 struct i2c_mux_pinctrl *mux = data;
51
52 return pinctrl_select_state(mux->pinctrl, mux->state_idle);
53}
54
55#ifdef CONFIG_OF
56static int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
57 struct platform_device *pdev)
58{
59 struct device_node *np = pdev->dev.of_node;
60 int num_names, i, ret;
61 struct device_node *adapter_np;
62 struct i2c_adapter *adapter;
63
64 if (!np)
65 return 0;
66
67 mux->pdata = devm_kzalloc(&pdev->dev, sizeof(*mux->pdata), GFP_KERNEL);
68 if (!mux->pdata) {
69 dev_err(mux->dev,
70 "Cannot allocate i2c_mux_pinctrl_platform_data\n");
71 return -ENOMEM;
72 }
73
74 num_names = of_property_count_strings(np, "pinctrl-names");
75 if (num_names < 0) {
76 dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
77 num_names);
78 return num_names;
79 }
80
81 mux->pdata->pinctrl_states = devm_kzalloc(&pdev->dev,
82 sizeof(*mux->pdata->pinctrl_states) * num_names,
83 GFP_KERNEL);
84 if (!mux->pdata->pinctrl_states) {
85 dev_err(mux->dev, "Cannot allocate pinctrl_states\n");
86 return -ENOMEM;
87 }
88
89 for (i = 0; i < num_names; i++) {
90 ret = of_property_read_string_index(np, "pinctrl-names", i,
91 &mux->pdata->pinctrl_states[mux->pdata->bus_count]);
92 if (ret < 0) {
93 dev_err(mux->dev, "Cannot parse pinctrl-names: %d\n",
94 ret);
95 return ret;
96 }
97 if (!strcmp(mux->pdata->pinctrl_states[mux->pdata->bus_count],
98 "idle")) {
99 if (i != num_names - 1) {
100 dev_err(mux->dev, "idle state must be last\n");
101 return -EINVAL;
102 }
103 mux->pdata->pinctrl_state_idle = "idle";
104 } else {
105 mux->pdata->bus_count++;
106 }
107 }
108
109 adapter_np = of_parse_phandle(np, "i2c-parent", 0);
110 if (!adapter_np) {
111 dev_err(mux->dev, "Cannot parse i2c-parent\n");
112 return -ENODEV;
113 }
114 adapter = of_find_i2c_adapter_by_node(adapter_np);
115 if (!adapter) {
116 dev_err(mux->dev, "Cannot find parent bus\n");
117 return -ENODEV;
118 }
119 mux->pdata->parent_bus_num = i2c_adapter_id(adapter);
120 put_device(&adapter->dev);
121
122 return 0;
123}
124#else
125static inline int i2c_mux_pinctrl_parse_dt(struct i2c_mux_pinctrl *mux,
126 struct platform_device *pdev)
127{
128 return 0;
129}
130#endif
131
132static int __devinit i2c_mux_pinctrl_probe(struct platform_device *pdev)
133{
134 struct i2c_mux_pinctrl *mux;
135 int (*deselect)(struct i2c_adapter *, void *, u32);
136 int i, ret;
137
138 mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL);
139 if (!mux) {
140 dev_err(&pdev->dev, "Cannot allocate i2c_mux_pinctrl\n");
141 ret = -ENOMEM;
142 goto err;
143 }
144 platform_set_drvdata(pdev, mux);
145
146 mux->dev = &pdev->dev;
147
148 mux->pdata = pdev->dev.platform_data;
149 if (!mux->pdata) {
150 ret = i2c_mux_pinctrl_parse_dt(mux, pdev);
151 if (ret < 0)
152 goto err;
153 }
154 if (!mux->pdata) {
155 dev_err(&pdev->dev, "Missing platform data\n");
156 ret = -ENODEV;
157 goto err;
158 }
159
160 mux->states = devm_kzalloc(&pdev->dev,
161 sizeof(*mux->states) * mux->pdata->bus_count,
162 GFP_KERNEL);
163 if (!mux->states) {
164 dev_err(&pdev->dev, "Cannot allocate states\n");
165 ret = -ENOMEM;
166 goto err;
167 }
168
169 mux->busses = devm_kzalloc(&pdev->dev,
170 sizeof(mux->busses) * mux->pdata->bus_count,
171 GFP_KERNEL);
172 if (!mux->states) {
173 dev_err(&pdev->dev, "Cannot allocate busses\n");
174 ret = -ENOMEM;
175 goto err;
176 }
177
178 mux->pinctrl = devm_pinctrl_get(&pdev->dev);
179 if (IS_ERR(mux->pinctrl)) {
180 ret = PTR_ERR(mux->pinctrl);
181 dev_err(&pdev->dev, "Cannot get pinctrl: %d\n", ret);
182 goto err;
183 }
184 for (i = 0; i < mux->pdata->bus_count; i++) {
185 mux->states[i] = pinctrl_lookup_state(mux->pinctrl,
186 mux->pdata->pinctrl_states[i]);
187 if (IS_ERR(mux->states[i])) {
188 ret = PTR_ERR(mux->states[i]);
189 dev_err(&pdev->dev,
190 "Cannot look up pinctrl state %s: %d\n",
191 mux->pdata->pinctrl_states[i], ret);
192 goto err;
193 }
194 }
195 if (mux->pdata->pinctrl_state_idle) {
196 mux->state_idle = pinctrl_lookup_state(mux->pinctrl,
197 mux->pdata->pinctrl_state_idle);
198 if (IS_ERR(mux->state_idle)) {
199 ret = PTR_ERR(mux->state_idle);
200 dev_err(&pdev->dev,
201 "Cannot look up pinctrl state %s: %d\n",
202 mux->pdata->pinctrl_state_idle, ret);
203 goto err;
204 }
205
206 deselect = i2c_mux_pinctrl_deselect;
207 } else {
208 deselect = NULL;
209 }
210
211 mux->parent = i2c_get_adapter(mux->pdata->parent_bus_num);
212 if (!mux->parent) {
213 dev_err(&pdev->dev, "Parent adapter (%d) not found\n",
214 mux->pdata->parent_bus_num);
215 ret = -ENODEV;
216 goto err;
217 }
218
219 for (i = 0; i < mux->pdata->bus_count; i++) {
220 u32 bus = mux->pdata->base_bus_num ?
221 (mux->pdata->base_bus_num + i) : 0;
222
223 mux->busses[i] = i2c_add_mux_adapter(mux->parent, &pdev->dev,
224 mux, bus, i,
225 i2c_mux_pinctrl_select,
226 deselect);
227 if (!mux->busses[i]) {
228 ret = -ENODEV;
229 dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
230 goto err_del_adapter;
231 }
232 }
233
234 return 0;
235
236err_del_adapter:
237 for (; i > 0; i--)
238 i2c_del_mux_adapter(mux->busses[i - 1]);
239 i2c_put_adapter(mux->parent);
240err:
241 return ret;
242}
243
244static int __devexit i2c_mux_pinctrl_remove(struct platform_device *pdev)
245{
246 struct i2c_mux_pinctrl *mux = platform_get_drvdata(pdev);
247 int i;
248
249 for (i = 0; i < mux->pdata->bus_count; i++)
250 i2c_del_mux_adapter(mux->busses[i]);
251
252 i2c_put_adapter(mux->parent);
253
254 return 0;
255}
256
257#ifdef CONFIG_OF
258static const struct of_device_id i2c_mux_pinctrl_of_match[] __devinitconst = {
259 { .compatible = "i2c-mux-pinctrl", },
260 {},
261};
262MODULE_DEVICE_TABLE(of, i2c_mux_pinctrl_of_match);
263#endif
264
265static struct platform_driver i2c_mux_pinctrl_driver = {
266 .driver = {
267 .name = "i2c-mux-pinctrl",
268 .owner = THIS_MODULE,
269 .of_match_table = of_match_ptr(i2c_mux_pinctrl_of_match),
270 },
271 .probe = i2c_mux_pinctrl_probe,
272 .remove = __devexit_p(i2c_mux_pinctrl_remove),
273};
274module_platform_driver(i2c_mux_pinctrl_driver);
275
276MODULE_DESCRIPTION("pinctrl-based I2C multiplexer driver");
277MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
278MODULE_LICENSE("GPL v2");
279MODULE_ALIAS("platform:i2c-mux-pinctrl");
diff --git a/drivers/ide/icside.c b/drivers/ide/icside.c
index 8716066a2f2b..bcb507b0cfd4 100644
--- a/drivers/ide/icside.c
+++ b/drivers/ide/icside.c
@@ -236,7 +236,7 @@ static const struct ide_port_ops icside_v6_no_dma_port_ops = {
236 */ 236 */
237static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) 237static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
238{ 238{
239 unsigned long cycle_time; 239 unsigned long cycle_time = 0;
240 int use_dma_info = 0; 240 int use_dma_info = 0;
241 const u8 xfer_mode = drive->dma_mode; 241 const u8 xfer_mode = drive->dma_mode;
242 242
@@ -271,9 +271,9 @@ static void icside_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
271 271
272 ide_set_drivedata(drive, (void *)cycle_time); 272 ide_set_drivedata(drive, (void *)cycle_time);
273 273
274 printk("%s: %s selected (peak %dMB/s)\n", drive->name, 274 printk(KERN_INFO "%s: %s selected (peak %luMB/s)\n",
275 ide_xfer_verbose(xfer_mode), 275 drive->name, ide_xfer_verbose(xfer_mode),
276 2000 / (unsigned long)ide_get_drivedata(drive)); 276 2000 / (cycle_time ? cycle_time : (unsigned long) -1));
277} 277}
278 278
279static const struct ide_port_ops icside_v6_port_ops = { 279static const struct ide_port_ops icside_v6_port_ops = {
@@ -375,8 +375,6 @@ static const struct ide_dma_ops icside_v6_dma_ops = {
375 .dma_test_irq = icside_dma_test_irq, 375 .dma_test_irq = icside_dma_test_irq,
376 .dma_lost_irq = ide_dma_lost_irq, 376 .dma_lost_irq = ide_dma_lost_irq,
377}; 377};
378#else
379#define icside_v6_dma_ops NULL
380#endif 378#endif
381 379
382static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d) 380static int icside_dma_off_init(ide_hwif_t *hwif, const struct ide_port_info *d)
@@ -456,7 +454,6 @@ err_free:
456static const struct ide_port_info icside_v6_port_info __initdata = { 454static const struct ide_port_info icside_v6_port_info __initdata = {
457 .init_dma = icside_dma_off_init, 455 .init_dma = icside_dma_off_init,
458 .port_ops = &icside_v6_no_dma_port_ops, 456 .port_ops = &icside_v6_no_dma_port_ops,
459 .dma_ops = &icside_v6_dma_ops,
460 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO, 457 .host_flags = IDE_HFLAG_SERIALIZE | IDE_HFLAG_MMIO,
461 .mwdma_mask = ATA_MWDMA2, 458 .mwdma_mask = ATA_MWDMA2,
462 .swdma_mask = ATA_SWDMA2, 459 .swdma_mask = ATA_SWDMA2,
@@ -518,11 +515,13 @@ icside_register_v6(struct icside_state *state, struct expansion_card *ec)
518 515
519 ecard_set_drvdata(ec, state); 516 ecard_set_drvdata(ec, state);
520 517
518#ifdef CONFIG_BLK_DEV_IDEDMA_ICS
521 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) { 519 if (ec->dma != NO_DMA && !request_dma(ec->dma, DRV_NAME)) {
522 d.init_dma = icside_dma_init; 520 d.init_dma = icside_dma_init;
523 d.port_ops = &icside_v6_port_ops; 521 d.port_ops = &icside_v6_port_ops;
524 } else 522 d.dma_ops = &icside_v6_dma_ops;
525 d.dma_ops = NULL; 523 }
524#endif
526 525
527 ret = ide_host_register(host, &d, hws); 526 ret = ide_host_register(host, &d, hws);
528 if (ret) 527 if (ret)
diff --git a/drivers/ide/ide-cs.c b/drivers/ide/ide-cs.c
index 28e344ea514c..f1e922e2479a 100644
--- a/drivers/ide/ide-cs.c
+++ b/drivers/ide/ide-cs.c
@@ -167,7 +167,8 @@ static int pcmcia_check_one_config(struct pcmcia_device *pdev, void *priv_data)
167{ 167{
168 int *is_kme = priv_data; 168 int *is_kme = priv_data;
169 169
170 if (!(pdev->resource[0]->flags & IO_DATA_PATH_WIDTH_8)) { 170 if ((pdev->resource[0]->flags & IO_DATA_PATH_WIDTH)
171 != IO_DATA_PATH_WIDTH_8) {
171 pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; 172 pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH;
172 pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO; 173 pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_AUTO;
173 } 174 }
diff --git a/drivers/iio/Kconfig b/drivers/iio/Kconfig
index 56eecefcec75..2ec93da41e2c 100644
--- a/drivers/iio/Kconfig
+++ b/drivers/iio/Kconfig
@@ -8,8 +8,7 @@ menuconfig IIO
8 help 8 help
9 The industrial I/O subsystem provides a unified framework for 9 The industrial I/O subsystem provides a unified framework for
10 drivers for many different types of embedded sensors using a 10 drivers for many different types of embedded sensors using a
11 number of different physical interfaces (i2c, spi, etc). See 11 number of different physical interfaces (i2c, spi, etc).
12 Documentation/iio for more information.
13 12
14if IIO 13if IIO
15 14
diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
index 1ddd8861c71b..4f947e4377ef 100644
--- a/drivers/iio/industrialio-core.c
+++ b/drivers/iio/industrialio-core.c
@@ -661,7 +661,6 @@ static int iio_device_register_sysfs(struct iio_dev *indio_dev)
661 * New channel registration method - relies on the fact a group does 661 * New channel registration method - relies on the fact a group does
662 * not need to be initialized if it is name is NULL. 662 * not need to be initialized if it is name is NULL.
663 */ 663 */
664 INIT_LIST_HEAD(&indio_dev->channel_attr_list);
665 if (indio_dev->channels) 664 if (indio_dev->channels)
666 for (i = 0; i < indio_dev->num_channels; i++) { 665 for (i = 0; i < indio_dev->num_channels; i++) {
667 ret = iio_device_add_channel_sysfs(indio_dev, 666 ret = iio_device_add_channel_sysfs(indio_dev,
@@ -725,12 +724,16 @@ static void iio_device_unregister_sysfs(struct iio_dev *indio_dev)
725static void iio_dev_release(struct device *device) 724static void iio_dev_release(struct device *device)
726{ 725{
727 struct iio_dev *indio_dev = dev_to_iio_dev(device); 726 struct iio_dev *indio_dev = dev_to_iio_dev(device);
728 cdev_del(&indio_dev->chrdev); 727 if (indio_dev->chrdev.dev)
728 cdev_del(&indio_dev->chrdev);
729 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED) 729 if (indio_dev->modes & INDIO_BUFFER_TRIGGERED)
730 iio_device_unregister_trigger_consumer(indio_dev); 730 iio_device_unregister_trigger_consumer(indio_dev);
731 iio_device_unregister_eventset(indio_dev); 731 iio_device_unregister_eventset(indio_dev);
732 iio_device_unregister_sysfs(indio_dev); 732 iio_device_unregister_sysfs(indio_dev);
733 iio_device_unregister_debugfs(indio_dev); 733 iio_device_unregister_debugfs(indio_dev);
734
735 ida_simple_remove(&iio_ida, indio_dev->id);
736 kfree(indio_dev);
734} 737}
735 738
736static struct device_type iio_dev_type = { 739static struct device_type iio_dev_type = {
@@ -761,6 +764,7 @@ struct iio_dev *iio_device_alloc(int sizeof_priv)
761 dev_set_drvdata(&dev->dev, (void *)dev); 764 dev_set_drvdata(&dev->dev, (void *)dev);
762 mutex_init(&dev->mlock); 765 mutex_init(&dev->mlock);
763 mutex_init(&dev->info_exist_lock); 766 mutex_init(&dev->info_exist_lock);
767 INIT_LIST_HEAD(&dev->channel_attr_list);
764 768
765 dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL); 769 dev->id = ida_simple_get(&iio_ida, 0, 0, GFP_KERNEL);
766 if (dev->id < 0) { 770 if (dev->id < 0) {
@@ -778,10 +782,8 @@ EXPORT_SYMBOL(iio_device_alloc);
778 782
779void iio_device_free(struct iio_dev *dev) 783void iio_device_free(struct iio_dev *dev)
780{ 784{
781 if (dev) { 785 if (dev)
782 ida_simple_remove(&iio_ida, dev->id); 786 put_device(&dev->dev);
783 kfree(dev);
784 }
785} 787}
786EXPORT_SYMBOL(iio_device_free); 788EXPORT_SYMBOL(iio_device_free);
787 789
@@ -902,7 +904,7 @@ void iio_device_unregister(struct iio_dev *indio_dev)
902 mutex_lock(&indio_dev->info_exist_lock); 904 mutex_lock(&indio_dev->info_exist_lock);
903 indio_dev->info = NULL; 905 indio_dev->info = NULL;
904 mutex_unlock(&indio_dev->info_exist_lock); 906 mutex_unlock(&indio_dev->info_exist_lock);
905 device_unregister(&indio_dev->dev); 907 device_del(&indio_dev->dev);
906} 908}
907EXPORT_SYMBOL(iio_device_unregister); 909EXPORT_SYMBOL(iio_device_unregister);
908subsys_initcall(iio_init); 910subsys_initcall(iio_init);
diff --git a/drivers/infiniband/core/cma.c b/drivers/infiniband/core/cma.c
index 55d5642eb10a..2e826f9702c6 100644
--- a/drivers/infiniband/core/cma.c
+++ b/drivers/infiniband/core/cma.c
@@ -1184,7 +1184,7 @@ static void cma_set_req_event_data(struct rdma_cm_event *event,
1184 1184
1185static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event) 1185static int cma_check_req_qp_type(struct rdma_cm_id *id, struct ib_cm_event *ib_event)
1186{ 1186{
1187 return (((ib_event->event == IB_CM_REQ_RECEIVED) || 1187 return (((ib_event->event == IB_CM_REQ_RECEIVED) &&
1188 (ib_event->param.req_rcvd.qp_type == id->qp_type)) || 1188 (ib_event->param.req_rcvd.qp_type == id->qp_type)) ||
1189 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) && 1189 ((ib_event->event == IB_CM_SIDR_REQ_RECEIVED) &&
1190 (id->qp_type == IB_QPT_UD)) || 1190 (id->qp_type == IB_QPT_UD)) ||
diff --git a/drivers/infiniband/hw/cxgb4/cm.c b/drivers/infiniband/hw/cxgb4/cm.c
index 55ab284e22f2..b18870c455ad 100644
--- a/drivers/infiniband/hw/cxgb4/cm.c
+++ b/drivers/infiniband/hw/cxgb4/cm.c
@@ -1593,6 +1593,10 @@ static int import_ep(struct c4iw_ep *ep, __be32 peer_ip, struct dst_entry *dst,
1593 struct net_device *pdev; 1593 struct net_device *pdev;
1594 1594
1595 pdev = ip_dev_find(&init_net, peer_ip); 1595 pdev = ip_dev_find(&init_net, peer_ip);
1596 if (!pdev) {
1597 err = -ENODEV;
1598 goto out;
1599 }
1596 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t, 1600 ep->l2t = cxgb4_l2t_get(cdev->rdev.lldi.l2t,
1597 n, pdev, 0); 1601 n, pdev, 0);
1598 if (!ep->l2t) 1602 if (!ep->l2t)
diff --git a/drivers/infiniband/hw/mlx4/main.c b/drivers/infiniband/hw/mlx4/main.c
index ee1c577238f7..3530c41fcd1f 100644
--- a/drivers/infiniband/hw/mlx4/main.c
+++ b/drivers/infiniband/hw/mlx4/main.c
@@ -140,7 +140,7 @@ static int mlx4_ib_query_device(struct ib_device *ibdev,
140 props->max_mr_size = ~0ull; 140 props->max_mr_size = ~0ull;
141 props->page_size_cap = dev->dev->caps.page_size_cap; 141 props->page_size_cap = dev->dev->caps.page_size_cap;
142 props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps; 142 props->max_qp = dev->dev->caps.num_qps - dev->dev->caps.reserved_qps;
143 props->max_qp_wr = dev->dev->caps.max_wqes; 143 props->max_qp_wr = dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE;
144 props->max_sge = min(dev->dev->caps.max_sq_sg, 144 props->max_sge = min(dev->dev->caps.max_sq_sg,
145 dev->dev->caps.max_rq_sg); 145 dev->dev->caps.max_rq_sg);
146 props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs; 146 props->max_cq = dev->dev->caps.num_cqs - dev->dev->caps.reserved_cqs;
@@ -1084,12 +1084,9 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1084 int total_eqs = 0; 1084 int total_eqs = 0;
1085 int i, j, eq; 1085 int i, j, eq;
1086 1086
1087 /* Init eq table */ 1087 /* Legacy mode or comp_pool is not large enough */
1088 ibdev->eq_table = NULL; 1088 if (dev->caps.comp_pool == 0 ||
1089 ibdev->eq_added = 0; 1089 dev->caps.num_ports > dev->caps.comp_pool)
1090
1091 /* Legacy mode? */
1092 if (dev->caps.comp_pool == 0)
1093 return; 1090 return;
1094 1091
1095 eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/ 1092 eq_per_port = rounddown_pow_of_two(dev->caps.comp_pool/
@@ -1135,7 +1132,10 @@ static void mlx4_ib_alloc_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1135static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev) 1132static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1136{ 1133{
1137 int i; 1134 int i;
1138 int total_eqs; 1135
1136 /* no additional eqs were added */
1137 if (!ibdev->eq_table)
1138 return;
1139 1139
1140 /* Reset the advertised EQ number */ 1140 /* Reset the advertised EQ number */
1141 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors; 1141 ibdev->ib_dev.num_comp_vectors = dev->caps.num_comp_vectors;
@@ -1148,12 +1148,7 @@ static void mlx4_ib_free_eqs(struct mlx4_dev *dev, struct mlx4_ib_dev *ibdev)
1148 mlx4_release_eq(dev, ibdev->eq_table[i]); 1148 mlx4_release_eq(dev, ibdev->eq_table[i]);
1149 } 1149 }
1150 1150
1151 total_eqs = dev->caps.num_comp_vectors + ibdev->eq_added;
1152 memset(ibdev->eq_table, 0, total_eqs * sizeof(int));
1153 kfree(ibdev->eq_table); 1151 kfree(ibdev->eq_table);
1154
1155 ibdev->eq_table = NULL;
1156 ibdev->eq_added = 0;
1157} 1152}
1158 1153
1159static void *mlx4_ib_add(struct mlx4_dev *dev) 1154static void *mlx4_ib_add(struct mlx4_dev *dev)
diff --git a/drivers/infiniband/hw/mlx4/mlx4_ib.h b/drivers/infiniband/hw/mlx4/mlx4_ib.h
index e62297cc77cc..ff36655d23d3 100644
--- a/drivers/infiniband/hw/mlx4/mlx4_ib.h
+++ b/drivers/infiniband/hw/mlx4/mlx4_ib.h
@@ -44,6 +44,14 @@
44#include <linux/mlx4/device.h> 44#include <linux/mlx4/device.h>
45#include <linux/mlx4/doorbell.h> 45#include <linux/mlx4/doorbell.h>
46 46
47enum {
48 MLX4_IB_SQ_MIN_WQE_SHIFT = 6,
49 MLX4_IB_MAX_HEADROOM = 2048
50};
51
52#define MLX4_IB_SQ_HEADROOM(shift) ((MLX4_IB_MAX_HEADROOM >> (shift)) + 1)
53#define MLX4_IB_SQ_MAX_SPARE (MLX4_IB_SQ_HEADROOM(MLX4_IB_SQ_MIN_WQE_SHIFT))
54
47struct mlx4_ib_ucontext { 55struct mlx4_ib_ucontext {
48 struct ib_ucontext ibucontext; 56 struct ib_ucontext ibucontext;
49 struct mlx4_uar uar; 57 struct mlx4_uar uar;
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index ceb33327091a..8d4ed24aef93 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -310,8 +310,8 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
310 int is_user, int has_rq, struct mlx4_ib_qp *qp) 310 int is_user, int has_rq, struct mlx4_ib_qp *qp)
311{ 311{
312 /* Sanity check RQ size before proceeding */ 312 /* Sanity check RQ size before proceeding */
313 if (cap->max_recv_wr > dev->dev->caps.max_wqes || 313 if (cap->max_recv_wr > dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE ||
314 cap->max_recv_sge > dev->dev->caps.max_rq_sg) 314 cap->max_recv_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg))
315 return -EINVAL; 315 return -EINVAL;
316 316
317 if (!has_rq) { 317 if (!has_rq) {
@@ -329,8 +329,17 @@ static int set_rq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
329 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg)); 329 qp->rq.wqe_shift = ilog2(qp->rq.max_gs * sizeof (struct mlx4_wqe_data_seg));
330 } 330 }
331 331
332 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt; 332 /* leave userspace return values as they were, so as not to break ABI */
333 cap->max_recv_sge = qp->rq.max_gs; 333 if (is_user) {
334 cap->max_recv_wr = qp->rq.max_post = qp->rq.wqe_cnt;
335 cap->max_recv_sge = qp->rq.max_gs;
336 } else {
337 cap->max_recv_wr = qp->rq.max_post =
338 min(dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE, qp->rq.wqe_cnt);
339 cap->max_recv_sge = min(qp->rq.max_gs,
340 min(dev->dev->caps.max_sq_sg,
341 dev->dev->caps.max_rq_sg));
342 }
334 343
335 return 0; 344 return 0;
336} 345}
@@ -341,8 +350,8 @@ static int set_kernel_sq_size(struct mlx4_ib_dev *dev, struct ib_qp_cap *cap,
341 int s; 350 int s;
342 351
343 /* Sanity check SQ size before proceeding */ 352 /* Sanity check SQ size before proceeding */
344 if (cap->max_send_wr > dev->dev->caps.max_wqes || 353 if (cap->max_send_wr > (dev->dev->caps.max_wqes - MLX4_IB_SQ_MAX_SPARE) ||
345 cap->max_send_sge > dev->dev->caps.max_sq_sg || 354 cap->max_send_sge > min(dev->dev->caps.max_sq_sg, dev->dev->caps.max_rq_sg) ||
346 cap->max_inline_data + send_wqe_overhead(type, qp->flags) + 355 cap->max_inline_data + send_wqe_overhead(type, qp->flags) +
347 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz) 356 sizeof (struct mlx4_wqe_inline_seg) > dev->dev->caps.max_sq_desc_sz)
348 return -EINVAL; 357 return -EINVAL;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 85a69c958559..48970af23679 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -61,6 +61,7 @@ struct ocrdma_dev_attr {
61 u32 max_inline_data; 61 u32 max_inline_data;
62 int max_send_sge; 62 int max_send_sge;
63 int max_recv_sge; 63 int max_recv_sge;
64 int max_srq_sge;
64 int max_mr; 65 int max_mr;
65 u64 max_mr_size; 66 u64 max_mr_size;
66 u32 max_num_mr_pbl; 67 u32 max_num_mr_pbl;
@@ -231,7 +232,6 @@ struct ocrdma_qp_hwq_info {
231 u32 entry_size; 232 u32 entry_size;
232 u32 max_cnt; 233 u32 max_cnt;
233 u32 max_wqe_idx; 234 u32 max_wqe_idx;
234 u32 free_delta;
235 u16 dbid; /* qid, where to ring the doorbell. */ 235 u16 dbid; /* qid, where to ring the doorbell. */
236 u32 len; 236 u32 len;
237 dma_addr_t pa; 237 dma_addr_t pa;
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
index a411a4e3193d..517ab20b727c 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_abi.h
@@ -101,8 +101,6 @@ struct ocrdma_create_qp_uresp {
101 u32 rsvd1; 101 u32 rsvd1;
102 u32 num_wqe_allocated; 102 u32 num_wqe_allocated;
103 u32 num_rqe_allocated; 103 u32 num_rqe_allocated;
104 u32 free_wqe_delta;
105 u32 free_rqe_delta;
106 u32 db_sq_offset; 104 u32 db_sq_offset;
107 u32 db_rq_offset; 105 u32 db_rq_offset;
108 u32 db_shift; 106 u32 db_shift;
@@ -126,8 +124,7 @@ struct ocrdma_create_srq_uresp {
126 u32 db_rq_offset; 124 u32 db_rq_offset;
127 u32 db_shift; 125 u32 db_shift;
128 126
129 u32 free_rqe_delta; 127 u64 rsvd2;
130 u32 rsvd2;
131 u64 rsvd3; 128 u64 rsvd3;
132} __packed; 129} __packed;
133 130
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
index 9b204b1ba336..71942af4fce9 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_hw.c
@@ -732,7 +732,7 @@ static void ocrdma_dispatch_ibevent(struct ocrdma_dev *dev,
732 break; 732 break;
733 case OCRDMA_SRQ_LIMIT_EVENT: 733 case OCRDMA_SRQ_LIMIT_EVENT:
734 ib_evt.element.srq = &qp->srq->ibsrq; 734 ib_evt.element.srq = &qp->srq->ibsrq;
735 ib_evt.event = IB_EVENT_QP_LAST_WQE_REACHED; 735 ib_evt.event = IB_EVENT_SRQ_LIMIT_REACHED;
736 srq_event = 1; 736 srq_event = 1;
737 qp_event = 0; 737 qp_event = 0;
738 break; 738 break;
@@ -990,8 +990,6 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
990 struct ocrdma_dev_attr *attr, 990 struct ocrdma_dev_attr *attr,
991 struct ocrdma_mbx_query_config *rsp) 991 struct ocrdma_mbx_query_config *rsp)
992{ 992{
993 int max_q_mem;
994
995 attr->max_pd = 993 attr->max_pd =
996 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >> 994 (rsp->max_pd_ca_ack_delay & OCRDMA_MBX_QUERY_CFG_MAX_PD_MASK) >>
997 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT; 995 OCRDMA_MBX_QUERY_CFG_MAX_PD_SHIFT;
@@ -1004,6 +1002,9 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
1004 attr->max_recv_sge = (rsp->max_write_send_sge & 1002 attr->max_recv_sge = (rsp->max_write_send_sge &
1005 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >> 1003 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK) >>
1006 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT; 1004 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT;
1005 attr->max_srq_sge = (rsp->max_srq_rqe_sge &
1006 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_MASK) >>
1007 OCRDMA_MBX_QUERY_CFG_MAX_SRQ_SGE_OFFSET;
1007 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp & 1008 attr->max_ord_per_qp = (rsp->max_ird_ord_per_qp &
1008 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >> 1009 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK) >>
1009 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT; 1010 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT;
@@ -1037,18 +1038,15 @@ static void ocrdma_get_attr(struct ocrdma_dev *dev,
1037 attr->max_inline_data = 1038 attr->max_inline_data =
1038 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) + 1039 attr->wqe_size - (sizeof(struct ocrdma_hdr_wqe) +
1039 sizeof(struct ocrdma_sge)); 1040 sizeof(struct ocrdma_sge));
1040 max_q_mem = OCRDMA_Q_PAGE_BASE_SIZE << (OCRDMA_MAX_Q_PAGE_SIZE_CNT - 1);
1041 /* hw can queue one less then the configured size,
1042 * so publish less by one to stack.
1043 */
1044 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1041 if (dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1045 dev->attr.max_wqe = max_q_mem / dev->attr.wqe_size;
1046 attr->ird = 1; 1042 attr->ird = 1;
1047 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE; 1043 attr->ird_page_size = OCRDMA_MIN_Q_PAGE_SIZE;
1048 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES; 1044 attr->num_ird_pages = MAX_OCRDMA_IRD_PAGES;
1049 } else 1045 }
1050 dev->attr.max_wqe = (max_q_mem / dev->attr.wqe_size) - 1; 1046 dev->attr.max_wqe = rsp->max_wqes_rqes_per_q >>
1051 dev->attr.max_rqe = (max_q_mem / dev->attr.rqe_size) - 1; 1047 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET;
1048 dev->attr.max_rqe = rsp->max_wqes_rqes_per_q &
1049 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK;
1052} 1050}
1053 1051
1054static int ocrdma_check_fw_config(struct ocrdma_dev *dev, 1052static int ocrdma_check_fw_config(struct ocrdma_dev *dev,
@@ -1990,19 +1988,12 @@ static void ocrdma_get_create_qp_rsp(struct ocrdma_create_qp_rsp *rsp,
1990 max_wqe_allocated = 1 << max_wqe_allocated; 1988 max_wqe_allocated = 1 << max_wqe_allocated;
1991 max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe); 1989 max_rqe_allocated = 1 << ((u16)rsp->max_wqe_rqe);
1992 1990
1993 if (qp->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1994 qp->sq.free_delta = 0;
1995 qp->rq.free_delta = 1;
1996 } else
1997 qp->sq.free_delta = 1;
1998
1999 qp->sq.max_cnt = max_wqe_allocated; 1991 qp->sq.max_cnt = max_wqe_allocated;
2000 qp->sq.max_wqe_idx = max_wqe_allocated - 1; 1992 qp->sq.max_wqe_idx = max_wqe_allocated - 1;
2001 1993
2002 if (!attrs->srq) { 1994 if (!attrs->srq) {
2003 qp->rq.max_cnt = max_rqe_allocated; 1995 qp->rq.max_cnt = max_rqe_allocated;
2004 qp->rq.max_wqe_idx = max_rqe_allocated - 1; 1996 qp->rq.max_wqe_idx = max_rqe_allocated - 1;
2005 qp->rq.free_delta = 1;
2006 } 1997 }
2007} 1998}
2008 1999
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index a20d16eaae71..b050e629e9c3 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -26,7 +26,6 @@
26 *******************************************************************/ 26 *******************************************************************/
27 27
28#include <linux/module.h> 28#include <linux/module.h>
29#include <linux/version.h>
30#include <linux/idr.h> 29#include <linux/idr.h>
31#include <rdma/ib_verbs.h> 30#include <rdma/ib_verbs.h>
32#include <rdma/ib_user_verbs.h> 31#include <rdma/ib_user_verbs.h>
@@ -98,13 +97,11 @@ static void ocrdma_build_sgid_mac(union ib_gid *sgid, unsigned char *mac_addr,
98 sgid->raw[15] = mac_addr[5]; 97 sgid->raw[15] = mac_addr[5];
99} 98}
100 99
101static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, 100static bool ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
102 bool is_vlan, u16 vlan_id) 101 bool is_vlan, u16 vlan_id)
103{ 102{
104 int i; 103 int i;
105 bool found = false;
106 union ib_gid new_sgid; 104 union ib_gid new_sgid;
107 int free_idx = OCRDMA_MAX_SGID;
108 unsigned long flags; 105 unsigned long flags;
109 106
110 memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid)); 107 memset(&ocrdma_zero_sgid, 0, sizeof(union ib_gid));
@@ -116,23 +113,19 @@ static void ocrdma_add_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
116 if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid, 113 if (!memcmp(&dev->sgid_tbl[i], &ocrdma_zero_sgid,
117 sizeof(union ib_gid))) { 114 sizeof(union ib_gid))) {
118 /* found free entry */ 115 /* found free entry */
119 if (!found) { 116 memcpy(&dev->sgid_tbl[i], &new_sgid,
120 free_idx = i; 117 sizeof(union ib_gid));
121 found = true; 118 spin_unlock_irqrestore(&dev->sgid_lock, flags);
122 break; 119 return true;
123 }
124 } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid, 120 } else if (!memcmp(&dev->sgid_tbl[i], &new_sgid,
125 sizeof(union ib_gid))) { 121 sizeof(union ib_gid))) {
126 /* entry already present, no addition is required. */ 122 /* entry already present, no addition is required. */
127 spin_unlock_irqrestore(&dev->sgid_lock, flags); 123 spin_unlock_irqrestore(&dev->sgid_lock, flags);
128 return; 124 return false;
129 } 125 }
130 } 126 }
131 /* if entry doesn't exist and if table has some space, add entry */
132 if (found)
133 memcpy(&dev->sgid_tbl[free_idx], &new_sgid,
134 sizeof(union ib_gid));
135 spin_unlock_irqrestore(&dev->sgid_lock, flags); 127 spin_unlock_irqrestore(&dev->sgid_lock, flags);
128 return false;
136} 129}
137 130
138static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr, 131static bool ocrdma_del_sgid(struct ocrdma_dev *dev, unsigned char *mac_addr,
@@ -168,7 +161,8 @@ static void ocrdma_add_default_sgid(struct ocrdma_dev *dev)
168 ocrdma_get_guid(dev, &sgid->raw[8]); 161 ocrdma_get_guid(dev, &sgid->raw[8]);
169} 162}
170 163
171static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev) 164#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
165static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
172{ 166{
173 struct net_device *netdev, *tmp; 167 struct net_device *netdev, *tmp;
174 u16 vlan_id; 168 u16 vlan_id;
@@ -176,8 +170,6 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
176 170
177 netdev = dev->nic_info.netdev; 171 netdev = dev->nic_info.netdev;
178 172
179 ocrdma_add_default_sgid(dev);
180
181 rcu_read_lock(); 173 rcu_read_lock();
182 for_each_netdev_rcu(&init_net, tmp) { 174 for_each_netdev_rcu(&init_net, tmp) {
183 if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) { 175 if (netdev == tmp || vlan_dev_real_dev(tmp) == netdev) {
@@ -195,10 +187,23 @@ static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
195 } 187 }
196 } 188 }
197 rcu_read_unlock(); 189 rcu_read_unlock();
190}
191#else
192static void ocrdma_add_vlan_sgids(struct ocrdma_dev *dev)
193{
194
195}
196#endif /* VLAN */
197
198static int ocrdma_build_sgid_tbl(struct ocrdma_dev *dev)
199{
200 ocrdma_add_default_sgid(dev);
201 ocrdma_add_vlan_sgids(dev);
198 return 0; 202 return 0;
199} 203}
200 204
201#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 205#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) || \
206defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
202 207
203static int ocrdma_inet6addr_event(struct notifier_block *notifier, 208static int ocrdma_inet6addr_event(struct notifier_block *notifier,
204 unsigned long event, void *ptr) 209 unsigned long event, void *ptr)
@@ -209,6 +214,7 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
209 struct ib_event gid_event; 214 struct ib_event gid_event;
210 struct ocrdma_dev *dev; 215 struct ocrdma_dev *dev;
211 bool found = false; 216 bool found = false;
217 bool updated = false;
212 bool is_vlan = false; 218 bool is_vlan = false;
213 u16 vid = 0; 219 u16 vid = 0;
214 220
@@ -234,23 +240,21 @@ static int ocrdma_inet6addr_event(struct notifier_block *notifier,
234 mutex_lock(&dev->dev_lock); 240 mutex_lock(&dev->dev_lock);
235 switch (event) { 241 switch (event) {
236 case NETDEV_UP: 242 case NETDEV_UP:
237 ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid); 243 updated = ocrdma_add_sgid(dev, netdev->dev_addr, is_vlan, vid);
238 break; 244 break;
239 case NETDEV_DOWN: 245 case NETDEV_DOWN:
240 found = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid); 246 updated = ocrdma_del_sgid(dev, netdev->dev_addr, is_vlan, vid);
241 if (found) {
242 /* found the matching entry, notify
243 * the consumers about it
244 */
245 gid_event.device = &dev->ibdev;
246 gid_event.element.port_num = 1;
247 gid_event.event = IB_EVENT_GID_CHANGE;
248 ib_dispatch_event(&gid_event);
249 }
250 break; 247 break;
251 default: 248 default:
252 break; 249 break;
253 } 250 }
251 if (updated) {
252 /* GID table updated, notify the consumers about it */
253 gid_event.device = &dev->ibdev;
254 gid_event.element.port_num = 1;
255 gid_event.event = IB_EVENT_GID_CHANGE;
256 ib_dispatch_event(&gid_event);
257 }
254 mutex_unlock(&dev->dev_lock); 258 mutex_unlock(&dev->dev_lock);
255 return NOTIFY_OK; 259 return NOTIFY_OK;
256} 260}
@@ -259,7 +263,7 @@ static struct notifier_block ocrdma_inet6addr_notifier = {
259 .notifier_call = ocrdma_inet6addr_event 263 .notifier_call = ocrdma_inet6addr_event
260}; 264};
261 265
262#endif /* IPV6 */ 266#endif /* IPV6 and VLAN */
263 267
264static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device, 268static enum rdma_link_layer ocrdma_link_layer(struct ib_device *device,
265 u8 port_num) 269 u8 port_num)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
index 7fd80cc0f037..c75cbdfa87e7 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_sli.h
@@ -418,6 +418,9 @@ enum {
418 418
419 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0, 419 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_SHIFT = 0,
420 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF, 420 OCRDMA_MBX_QUERY_CFG_MAX_SEND_SGE_MASK = 0xFFFF,
421 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT = 16,
422 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_MASK = 0xFFFF <<
423 OCRDMA_MBX_QUERY_CFG_MAX_WRITE_SGE_SHIFT,
421 424
422 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0, 425 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_SHIFT = 0,
423 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF, 426 OCRDMA_MBX_QUERY_CFG_MAX_ORD_PER_QP_MASK = 0xFFFF,
@@ -458,7 +461,7 @@ enum {
458 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, 461 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET,
459 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0, 462 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET = 0,
460 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF << 463 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_MASK = 0xFFFF <<
461 OCRDMA_MBX_QUERY_CFG_MAX_WQES_PER_WQ_OFFSET, 464 OCRDMA_MBX_QUERY_CFG_MAX_RQES_PER_RQ_OFFSET,
462 465
463 OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16, 466 OCRDMA_MBX_QUERY_CFG_MAX_CQ_OFFSET = 16,
464 OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF << 467 OCRDMA_MBX_QUERY_CFG_MAX_CQ_MASK = 0xFFFF <<
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index e9f74d1b48f6..2e2e7aecc990 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -53,7 +53,7 @@ int ocrdma_query_gid(struct ib_device *ibdev, u8 port,
53 53
54 dev = get_ocrdma_dev(ibdev); 54 dev = get_ocrdma_dev(ibdev);
55 memset(sgid, 0, sizeof(*sgid)); 55 memset(sgid, 0, sizeof(*sgid));
56 if (index > OCRDMA_MAX_SGID) 56 if (index >= OCRDMA_MAX_SGID)
57 return -EINVAL; 57 return -EINVAL;
58 58
59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid)); 59 memcpy(sgid, &dev->sgid_tbl[index], sizeof(*sgid));
@@ -83,8 +83,8 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
83 IB_DEVICE_SHUTDOWN_PORT | 83 IB_DEVICE_SHUTDOWN_PORT |
84 IB_DEVICE_SYS_IMAGE_GUID | 84 IB_DEVICE_SYS_IMAGE_GUID |
85 IB_DEVICE_LOCAL_DMA_LKEY; 85 IB_DEVICE_LOCAL_DMA_LKEY;
86 attr->max_sge = dev->attr.max_send_sge; 86 attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
87 attr->max_sge_rd = dev->attr.max_send_sge; 87 attr->max_sge_rd = 0;
88 attr->max_cq = dev->attr.max_cq; 88 attr->max_cq = dev->attr.max_cq;
89 attr->max_cqe = dev->attr.max_cqe; 89 attr->max_cqe = dev->attr.max_cqe;
90 attr->max_mr = dev->attr.max_mr; 90 attr->max_mr = dev->attr.max_mr;
@@ -97,7 +97,7 @@ int ocrdma_query_device(struct ib_device *ibdev, struct ib_device_attr *attr)
97 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp); 97 min(dev->attr.max_ord_per_qp, dev->attr.max_ird_per_qp);
98 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp; 98 attr->max_qp_init_rd_atom = dev->attr.max_ord_per_qp;
99 attr->max_srq = (dev->attr.max_qp - 1); 99 attr->max_srq = (dev->attr.max_qp - 1);
100 attr->max_srq_sge = attr->max_sge; 100 attr->max_srq_sge = attr->max_srq_sge;
101 attr->max_srq_wr = dev->attr.max_rqe; 101 attr->max_srq_wr = dev->attr.max_rqe;
102 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay; 102 attr->local_ca_ack_delay = dev->attr.local_ca_ack_delay;
103 attr->max_fast_reg_page_list_len = 0; 103 attr->max_fast_reg_page_list_len = 0;
@@ -940,8 +940,6 @@ static int ocrdma_copy_qp_uresp(struct ocrdma_qp *qp,
940 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET; 940 uresp.db_rq_offset = OCRDMA_DB_RQ_OFFSET;
941 uresp.db_shift = 16; 941 uresp.db_shift = 16;
942 } 942 }
943 uresp.free_wqe_delta = qp->sq.free_delta;
944 uresp.free_rqe_delta = qp->rq.free_delta;
945 943
946 if (qp->dpp_enabled) { 944 if (qp->dpp_enabled) {
947 uresp.dpp_credit = dpp_credit_lmt; 945 uresp.dpp_credit = dpp_credit_lmt;
@@ -1307,8 +1305,6 @@ static int ocrdma_hwq_free_cnt(struct ocrdma_qp_hwq_info *q)
1307 free_cnt = (q->max_cnt - q->head) + q->tail; 1305 free_cnt = (q->max_cnt - q->head) + q->tail;
1308 else 1306 else
1309 free_cnt = q->tail - q->head; 1307 free_cnt = q->tail - q->head;
1310 if (q->free_delta)
1311 free_cnt -= q->free_delta;
1312 return free_cnt; 1308 return free_cnt;
1313} 1309}
1314 1310
@@ -1501,7 +1497,6 @@ static int ocrdma_copy_srq_uresp(struct ocrdma_srq *srq, struct ib_udata *udata)
1501 (srq->pd->id * srq->dev->nic_info.db_page_size); 1497 (srq->pd->id * srq->dev->nic_info.db_page_size);
1502 uresp.db_page_size = srq->dev->nic_info.db_page_size; 1498 uresp.db_page_size = srq->dev->nic_info.db_page_size;
1503 uresp.num_rqe_allocated = srq->rq.max_cnt; 1499 uresp.num_rqe_allocated = srq->rq.max_cnt;
1504 uresp.free_rqe_delta = 1;
1505 if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) { 1500 if (srq->dev->nic_info.dev_family == OCRDMA_GEN2_FAMILY) {
1506 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET; 1501 uresp.db_rq_offset = OCRDMA_DB_GEN2_RQ1_OFFSET;
1507 uresp.db_shift = 24; 1502 uresp.db_shift = 24;
@@ -2306,8 +2301,10 @@ static bool ocrdma_poll_err_rcqe(struct ocrdma_qp *qp, struct ocrdma_cqe *cqe,
2306 *stop = true; 2301 *stop = true;
2307 expand = false; 2302 expand = false;
2308 } 2303 }
2309 } else 2304 } else {
2305 *polled = true;
2310 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status); 2306 expand = ocrdma_update_err_rcqe(ibwc, cqe, qp, status);
2307 }
2311 return expand; 2308 return expand;
2312} 2309}
2313 2310
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
index e6483439f25f..633f03d80274 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.h
@@ -28,7 +28,6 @@
28#ifndef __OCRDMA_VERBS_H__ 28#ifndef __OCRDMA_VERBS_H__
29#define __OCRDMA_VERBS_H__ 29#define __OCRDMA_VERBS_H__
30 30
31#include <linux/version.h>
32int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *, 31int ocrdma_post_send(struct ib_qp *, struct ib_send_wr *,
33 struct ib_send_wr **bad_wr); 32 struct ib_send_wr **bad_wr);
34int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *, 33int ocrdma_post_recv(struct ib_qp *, struct ib_recv_wr *,
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index d90a421e9cac..a2e418cba0ff 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -547,26 +547,12 @@ static void iommu_poll_events(struct amd_iommu *iommu)
547 spin_unlock_irqrestore(&iommu->lock, flags); 547 spin_unlock_irqrestore(&iommu->lock, flags);
548} 548}
549 549
550static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head) 550static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
551{ 551{
552 struct amd_iommu_fault fault; 552 struct amd_iommu_fault fault;
553 volatile u64 *raw;
554 int i;
555 553
556 INC_STATS_COUNTER(pri_requests); 554 INC_STATS_COUNTER(pri_requests);
557 555
558 raw = (u64 *)(iommu->ppr_log + head);
559
560 /*
561 * Hardware bug: Interrupt may arrive before the entry is written to
562 * memory. If this happens we need to wait for the entry to arrive.
563 */
564 for (i = 0; i < LOOP_TIMEOUT; ++i) {
565 if (PPR_REQ_TYPE(raw[0]) != 0)
566 break;
567 udelay(1);
568 }
569
570 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) { 556 if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
571 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n"); 557 pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
572 return; 558 return;
@@ -578,12 +564,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
578 fault.tag = PPR_TAG(raw[0]); 564 fault.tag = PPR_TAG(raw[0]);
579 fault.flags = PPR_FLAGS(raw[0]); 565 fault.flags = PPR_FLAGS(raw[0]);
580 566
581 /*
582 * To detect the hardware bug we need to clear the entry
583 * to back to zero.
584 */
585 raw[0] = raw[1] = 0;
586
587 atomic_notifier_call_chain(&ppr_notifier, 0, &fault); 567 atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
588} 568}
589 569
@@ -595,25 +575,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
595 if (iommu->ppr_log == NULL) 575 if (iommu->ppr_log == NULL)
596 return; 576 return;
597 577
578 /* enable ppr interrupts again */
579 writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
580
598 spin_lock_irqsave(&iommu->lock, flags); 581 spin_lock_irqsave(&iommu->lock, flags);
599 582
600 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 583 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
601 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 584 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
602 585
603 while (head != tail) { 586 while (head != tail) {
587 volatile u64 *raw;
588 u64 entry[2];
589 int i;
604 590
605 /* Handle PPR entry */ 591 raw = (u64 *)(iommu->ppr_log + head);
606 iommu_handle_ppr_entry(iommu, head); 592
593 /*
594 * Hardware bug: Interrupt may arrive before the entry is
595 * written to memory. If this happens we need to wait for the
596 * entry to arrive.
597 */
598 for (i = 0; i < LOOP_TIMEOUT; ++i) {
599 if (PPR_REQ_TYPE(raw[0]) != 0)
600 break;
601 udelay(1);
602 }
603
604 /* Avoid memcpy function-call overhead */
605 entry[0] = raw[0];
606 entry[1] = raw[1];
607 607
608 /* Update and refresh ring-buffer state*/ 608 /*
609 * To detect the hardware bug we need to clear the entry
610 * back to zero.
611 */
612 raw[0] = raw[1] = 0UL;
613
614 /* Update head pointer of hardware ring-buffer */
609 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE; 615 head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
610 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET); 616 writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
617
618 /*
619 * Release iommu->lock because ppr-handling might need to
620 * re-aquire it
621 */
622 spin_unlock_irqrestore(&iommu->lock, flags);
623
624 /* Handle PPR entry */
625 iommu_handle_ppr_entry(iommu, entry);
626
627 spin_lock_irqsave(&iommu->lock, flags);
628
629 /* Refresh ring-buffer information */
630 head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
611 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET); 631 tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
612 } 632 }
613 633
614 /* enable ppr interrupts again */
615 writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
616
617 spin_unlock_irqrestore(&iommu->lock, flags); 634 spin_unlock_irqrestore(&iommu->lock, flags);
618} 635}
619 636
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index c56790375e0f..542024ba6dba 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
1029 if (!iommu->dev) 1029 if (!iommu->dev)
1030 return 1; 1030 return 1;
1031 1031
1032 iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
1033 PCI_DEVFN(0, 0));
1034
1032 iommu->cap_ptr = h->cap_ptr; 1035 iommu->cap_ptr = h->cap_ptr;
1033 iommu->pci_seg = h->pci_seg; 1036 iommu->pci_seg = h->pci_seg;
1034 iommu->mmio_phys = h->mmio_phys; 1037 iommu->mmio_phys = h->mmio_phys;
@@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1323{ 1326{
1324 int i, j; 1327 int i, j;
1325 u32 ioc_feature_control; 1328 u32 ioc_feature_control;
1326 struct pci_dev *pdev = NULL; 1329 struct pci_dev *pdev = iommu->root_pdev;
1327 1330
1328 /* RD890 BIOSes may not have completely reconfigured the iommu */ 1331 /* RD890 BIOSes may not have completely reconfigured the iommu */
1329 if (!is_rd890_iommu(iommu->dev)) 1332 if (!is_rd890_iommu(iommu->dev) || !pdev)
1330 return; 1333 return;
1331 1334
1332 /* 1335 /*
1333 * First, we need to ensure that the iommu is enabled. This is 1336 * First, we need to ensure that the iommu is enabled. This is
1334 * controlled by a register in the northbridge 1337 * controlled by a register in the northbridge
1335 */ 1338 */
1336 pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
1337
1338 if (!pdev)
1339 return;
1340 1339
1341 /* Select Northbridge indirect register 0x75 and enable writing */ 1340 /* Select Northbridge indirect register 0x75 and enable writing */
1342 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7)); 1341 pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
@@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
1346 if (!(ioc_feature_control & 0x1)) 1345 if (!(ioc_feature_control & 0x1))
1347 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1); 1346 pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
1348 1347
1349 pci_dev_put(pdev);
1350
1351 /* Restore the iommu BAR */ 1348 /* Restore the iommu BAR */
1352 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4, 1349 pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
1353 iommu->stored_addr_lo); 1350 iommu->stored_addr_lo);
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 2452f3b71736..24355559a2ad 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -481,6 +481,9 @@ struct amd_iommu {
481 /* Pointer to PCI device of this IOMMU */ 481 /* Pointer to PCI device of this IOMMU */
482 struct pci_dev *dev; 482 struct pci_dev *dev;
483 483
484 /* Cache pdev to root device for resume quirks */
485 struct pci_dev *root_pdev;
486
484 /* physical address of MMIO space */ 487 /* physical address of MMIO space */
485 u64 mmio_phys; 488 u64 mmio_phys;
486 /* virtual address of MMIO space */ 489 /* virtual address of MMIO space */
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 04cb8c88d74b..12b2b55c519e 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -379,7 +379,7 @@ config LEDS_NETXBIG
379 379
380config LEDS_ASIC3 380config LEDS_ASIC3
381 bool "LED support for the HTC ASIC3" 381 bool "LED support for the HTC ASIC3"
382 depends on LEDS_CLASS 382 depends on LEDS_CLASS=y
383 depends on MFD_ASIC3 383 depends on MFD_ASIC3
384 default y 384 default y
385 help 385 help
@@ -390,7 +390,7 @@ config LEDS_ASIC3
390 390
391config LEDS_RENESAS_TPU 391config LEDS_RENESAS_TPU
392 bool "LED support for Renesas TPU" 392 bool "LED support for Renesas TPU"
393 depends on LEDS_CLASS && HAVE_CLK && GENERIC_GPIO 393 depends on LEDS_CLASS=y && HAVE_CLK && GENERIC_GPIO
394 help 394 help
395 This option enables build of the LED TPU platform driver, 395 This option enables build of the LED TPU platform driver,
396 suitable to drive any TPU channel on newer Renesas SoCs. 396 suitable to drive any TPU channel on newer Renesas SoCs.
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 8ee92c81aec2..e663e6f413e9 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -29,7 +29,7 @@ static void led_update_brightness(struct led_classdev *led_cdev)
29 led_cdev->brightness = led_cdev->brightness_get(led_cdev); 29 led_cdev->brightness = led_cdev->brightness_get(led_cdev);
30} 30}
31 31
32static ssize_t led_brightness_show(struct device *dev, 32static ssize_t led_brightness_show(struct device *dev,
33 struct device_attribute *attr, char *buf) 33 struct device_attribute *attr, char *buf)
34{ 34{
35 struct led_classdev *led_cdev = dev_get_drvdata(dev); 35 struct led_classdev *led_cdev = dev_get_drvdata(dev);
diff --git a/drivers/leds/led-core.c b/drivers/leds/led-core.c
index d6860043f6f9..d65353d8d3fc 100644
--- a/drivers/leds/led-core.c
+++ b/drivers/leds/led-core.c
@@ -44,13 +44,6 @@ static void led_set_software_blink(struct led_classdev *led_cdev,
44 if (!led_cdev->blink_brightness) 44 if (!led_cdev->blink_brightness)
45 led_cdev->blink_brightness = led_cdev->max_brightness; 45 led_cdev->blink_brightness = led_cdev->max_brightness;
46 46
47 if (led_get_trigger_data(led_cdev) &&
48 delay_on == led_cdev->blink_delay_on &&
49 delay_off == led_cdev->blink_delay_off)
50 return;
51
52 led_stop_software_blink(led_cdev);
53
54 led_cdev->blink_delay_on = delay_on; 47 led_cdev->blink_delay_on = delay_on;
55 led_cdev->blink_delay_off = delay_off; 48 led_cdev->blink_delay_off = delay_off;
56 49
diff --git a/drivers/md/dm-mpath.c b/drivers/md/dm-mpath.c
index 754f38f8a692..638dae048b4f 100644
--- a/drivers/md/dm-mpath.c
+++ b/drivers/md/dm-mpath.c
@@ -18,6 +18,7 @@
18#include <linux/slab.h> 18#include <linux/slab.h>
19#include <linux/time.h> 19#include <linux/time.h>
20#include <linux/workqueue.h> 20#include <linux/workqueue.h>
21#include <linux/delay.h>
21#include <scsi/scsi_dh.h> 22#include <scsi/scsi_dh.h>
22#include <linux/atomic.h> 23#include <linux/atomic.h>
23 24
@@ -61,11 +62,11 @@ struct multipath {
61 struct list_head list; 62 struct list_head list;
62 struct dm_target *ti; 63 struct dm_target *ti;
63 64
64 spinlock_t lock;
65
66 const char *hw_handler_name; 65 const char *hw_handler_name;
67 char *hw_handler_params; 66 char *hw_handler_params;
68 67
68 spinlock_t lock;
69
69 unsigned nr_priority_groups; 70 unsigned nr_priority_groups;
70 struct list_head priority_groups; 71 struct list_head priority_groups;
71 72
@@ -81,16 +82,17 @@ struct multipath {
81 struct priority_group *next_pg; /* Switch to this PG if set */ 82 struct priority_group *next_pg; /* Switch to this PG if set */
82 unsigned repeat_count; /* I/Os left before calling PS again */ 83 unsigned repeat_count; /* I/Os left before calling PS again */
83 84
84 unsigned queue_io; /* Must we queue all I/O? */ 85 unsigned queue_io:1; /* Must we queue all I/O? */
85 unsigned queue_if_no_path; /* Queue I/O if last path fails? */ 86 unsigned queue_if_no_path:1; /* Queue I/O if last path fails? */
86 unsigned saved_queue_if_no_path;/* Saved state during suspension */ 87 unsigned saved_queue_if_no_path:1; /* Saved state during suspension */
88
87 unsigned pg_init_retries; /* Number of times to retry pg_init */ 89 unsigned pg_init_retries; /* Number of times to retry pg_init */
88 unsigned pg_init_count; /* Number of times pg_init called */ 90 unsigned pg_init_count; /* Number of times pg_init called */
89 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */ 91 unsigned pg_init_delay_msecs; /* Number of msecs before pg_init retry */
90 92
93 unsigned queue_size;
91 struct work_struct process_queued_ios; 94 struct work_struct process_queued_ios;
92 struct list_head queued_ios; 95 struct list_head queued_ios;
93 unsigned queue_size;
94 96
95 struct work_struct trigger_event; 97 struct work_struct trigger_event;
96 98
@@ -328,14 +330,18 @@ static void __choose_pgpath(struct multipath *m, size_t nr_bytes)
328 /* 330 /*
329 * Loop through priority groups until we find a valid path. 331 * Loop through priority groups until we find a valid path.
330 * First time we skip PGs marked 'bypassed'. 332 * First time we skip PGs marked 'bypassed'.
331 * Second time we only try the ones we skipped. 333 * Second time we only try the ones we skipped, but set
334 * pg_init_delay_retry so we do not hammer controllers.
332 */ 335 */
333 do { 336 do {
334 list_for_each_entry(pg, &m->priority_groups, list) { 337 list_for_each_entry(pg, &m->priority_groups, list) {
335 if (pg->bypassed == bypassed) 338 if (pg->bypassed == bypassed)
336 continue; 339 continue;
337 if (!__choose_path_in_pg(m, pg, nr_bytes)) 340 if (!__choose_path_in_pg(m, pg, nr_bytes)) {
341 if (!bypassed)
342 m->pg_init_delay_retry = 1;
338 return; 343 return;
344 }
339 } 345 }
340 } while (bypassed--); 346 } while (bypassed--);
341 347
@@ -481,9 +487,6 @@ static void process_queued_ios(struct work_struct *work)
481 487
482 spin_lock_irqsave(&m->lock, flags); 488 spin_lock_irqsave(&m->lock, flags);
483 489
484 if (!m->queue_size)
485 goto out;
486
487 if (!m->current_pgpath) 490 if (!m->current_pgpath)
488 __choose_pgpath(m, 0); 491 __choose_pgpath(m, 0);
489 492
@@ -496,7 +499,6 @@ static void process_queued_ios(struct work_struct *work)
496 if (m->pg_init_required && !m->pg_init_in_progress && pgpath) 499 if (m->pg_init_required && !m->pg_init_in_progress && pgpath)
497 __pg_init_all_paths(m); 500 __pg_init_all_paths(m);
498 501
499out:
500 spin_unlock_irqrestore(&m->lock, flags); 502 spin_unlock_irqrestore(&m->lock, flags);
501 if (!must_queue) 503 if (!must_queue)
502 dispatch_queued_ios(m); 504 dispatch_queued_ios(m);
@@ -1517,11 +1519,16 @@ out:
1517static int multipath_ioctl(struct dm_target *ti, unsigned int cmd, 1519static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1518 unsigned long arg) 1520 unsigned long arg)
1519{ 1521{
1520 struct multipath *m = (struct multipath *) ti->private; 1522 struct multipath *m = ti->private;
1521 struct block_device *bdev = NULL; 1523 struct block_device *bdev;
1522 fmode_t mode = 0; 1524 fmode_t mode;
1523 unsigned long flags; 1525 unsigned long flags;
1524 int r = 0; 1526 int r;
1527
1528again:
1529 bdev = NULL;
1530 mode = 0;
1531 r = 0;
1525 1532
1526 spin_lock_irqsave(&m->lock, flags); 1533 spin_lock_irqsave(&m->lock, flags);
1527 1534
@@ -1546,6 +1553,12 @@ static int multipath_ioctl(struct dm_target *ti, unsigned int cmd,
1546 if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT) 1553 if (!r && ti->len != i_size_read(bdev->bd_inode) >> SECTOR_SHIFT)
1547 r = scsi_verify_blk_ioctl(NULL, cmd); 1554 r = scsi_verify_blk_ioctl(NULL, cmd);
1548 1555
1556 if (r == -EAGAIN && !fatal_signal_pending(current)) {
1557 queue_work(kmultipathd, &m->process_queued_ios);
1558 msleep(10);
1559 goto again;
1560 }
1561
1549 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg); 1562 return r ? : __blkdev_driver_ioctl(bdev, mode, cmd, arg);
1550} 1563}
1551 1564
@@ -1643,7 +1656,7 @@ out:
1643 *---------------------------------------------------------------*/ 1656 *---------------------------------------------------------------*/
1644static struct target_type multipath_target = { 1657static struct target_type multipath_target = {
1645 .name = "multipath", 1658 .name = "multipath",
1646 .version = {1, 3, 0}, 1659 .version = {1, 4, 0},
1647 .module = THIS_MODULE, 1660 .module = THIS_MODULE,
1648 .ctr = multipath_ctr, 1661 .ctr = multipath_ctr,
1649 .dtr = multipath_dtr, 1662 .dtr = multipath_dtr,
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index 737d38865b69..3e2907f0bc46 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -1082,12 +1082,89 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
1082 return 0; 1082 return 0;
1083} 1083}
1084 1084
1085static int __get_held_metadata_root(struct dm_pool_metadata *pmd, 1085static int __reserve_metadata_snap(struct dm_pool_metadata *pmd)
1086 dm_block_t *result) 1086{
1087 int r, inc;
1088 struct thin_disk_superblock *disk_super;
1089 struct dm_block *copy, *sblock;
1090 dm_block_t held_root;
1091
1092 /*
1093 * Copy the superblock.
1094 */
1095 dm_sm_inc_block(pmd->metadata_sm, THIN_SUPERBLOCK_LOCATION);
1096 r = dm_tm_shadow_block(pmd->tm, THIN_SUPERBLOCK_LOCATION,
1097 &sb_validator, &copy, &inc);
1098 if (r)
1099 return r;
1100
1101 BUG_ON(!inc);
1102
1103 held_root = dm_block_location(copy);
1104 disk_super = dm_block_data(copy);
1105
1106 if (le64_to_cpu(disk_super->held_root)) {
1107 DMWARN("Pool metadata snapshot already exists: release this before taking another.");
1108
1109 dm_tm_dec(pmd->tm, held_root);
1110 dm_tm_unlock(pmd->tm, copy);
1111 pmd->need_commit = 1;
1112
1113 return -EBUSY;
1114 }
1115
1116 /*
1117 * Wipe the spacemap since we're not publishing this.
1118 */
1119 memset(&disk_super->data_space_map_root, 0,
1120 sizeof(disk_super->data_space_map_root));
1121 memset(&disk_super->metadata_space_map_root, 0,
1122 sizeof(disk_super->metadata_space_map_root));
1123
1124 /*
1125 * Increment the data structures that need to be preserved.
1126 */
1127 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->data_mapping_root));
1128 dm_tm_inc(pmd->tm, le64_to_cpu(disk_super->device_details_root));
1129 dm_tm_unlock(pmd->tm, copy);
1130
1131 /*
1132 * Write the held root into the superblock.
1133 */
1134 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1135 &sb_validator, &sblock);
1136 if (r) {
1137 dm_tm_dec(pmd->tm, held_root);
1138 pmd->need_commit = 1;
1139 return r;
1140 }
1141
1142 disk_super = dm_block_data(sblock);
1143 disk_super->held_root = cpu_to_le64(held_root);
1144 dm_bm_unlock(sblock);
1145
1146 pmd->need_commit = 1;
1147
1148 return 0;
1149}
1150
1151int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd)
1152{
1153 int r;
1154
1155 down_write(&pmd->root_lock);
1156 r = __reserve_metadata_snap(pmd);
1157 up_write(&pmd->root_lock);
1158
1159 return r;
1160}
1161
1162static int __release_metadata_snap(struct dm_pool_metadata *pmd)
1087{ 1163{
1088 int r; 1164 int r;
1089 struct thin_disk_superblock *disk_super; 1165 struct thin_disk_superblock *disk_super;
1090 struct dm_block *sblock; 1166 struct dm_block *sblock, *copy;
1167 dm_block_t held_root;
1091 1168
1092 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION, 1169 r = dm_bm_write_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1093 &sb_validator, &sblock); 1170 &sb_validator, &sblock);
@@ -1095,18 +1172,65 @@ static int __get_held_metadata_root(struct dm_pool_metadata *pmd,
1095 return r; 1172 return r;
1096 1173
1097 disk_super = dm_block_data(sblock); 1174 disk_super = dm_block_data(sblock);
1175 held_root = le64_to_cpu(disk_super->held_root);
1176 disk_super->held_root = cpu_to_le64(0);
1177 pmd->need_commit = 1;
1178
1179 dm_bm_unlock(sblock);
1180
1181 if (!held_root) {
1182 DMWARN("No pool metadata snapshot found: nothing to release.");
1183 return -EINVAL;
1184 }
1185
1186 r = dm_tm_read_lock(pmd->tm, held_root, &sb_validator, &copy);
1187 if (r)
1188 return r;
1189
1190 disk_super = dm_block_data(copy);
1191 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->data_mapping_root));
1192 dm_sm_dec_block(pmd->metadata_sm, le64_to_cpu(disk_super->device_details_root));
1193 dm_sm_dec_block(pmd->metadata_sm, held_root);
1194
1195 return dm_tm_unlock(pmd->tm, copy);
1196}
1197
1198int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd)
1199{
1200 int r;
1201
1202 down_write(&pmd->root_lock);
1203 r = __release_metadata_snap(pmd);
1204 up_write(&pmd->root_lock);
1205
1206 return r;
1207}
1208
1209static int __get_metadata_snap(struct dm_pool_metadata *pmd,
1210 dm_block_t *result)
1211{
1212 int r;
1213 struct thin_disk_superblock *disk_super;
1214 struct dm_block *sblock;
1215
1216 r = dm_bm_read_lock(pmd->bm, THIN_SUPERBLOCK_LOCATION,
1217 &sb_validator, &sblock);
1218 if (r)
1219 return r;
1220
1221 disk_super = dm_block_data(sblock);
1098 *result = le64_to_cpu(disk_super->held_root); 1222 *result = le64_to_cpu(disk_super->held_root);
1099 1223
1100 return dm_bm_unlock(sblock); 1224 return dm_bm_unlock(sblock);
1101} 1225}
1102 1226
1103int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd, 1227int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
1104 dm_block_t *result) 1228 dm_block_t *result)
1105{ 1229{
1106 int r; 1230 int r;
1107 1231
1108 down_read(&pmd->root_lock); 1232 down_read(&pmd->root_lock);
1109 r = __get_held_metadata_root(pmd, result); 1233 r = __get_metadata_snap(pmd, result);
1110 up_read(&pmd->root_lock); 1234 up_read(&pmd->root_lock);
1111 1235
1112 return r; 1236 return r;
diff --git a/drivers/md/dm-thin-metadata.h b/drivers/md/dm-thin-metadata.h
index ed4725e67c96..b88918ccdaf6 100644
--- a/drivers/md/dm-thin-metadata.h
+++ b/drivers/md/dm-thin-metadata.h
@@ -90,11 +90,18 @@ int dm_pool_get_metadata_transaction_id(struct dm_pool_metadata *pmd,
90 90
91/* 91/*
92 * Hold/get root for userspace transaction. 92 * Hold/get root for userspace transaction.
93 *
94 * The metadata snapshot is a copy of the current superblock (minus the
95 * space maps). Userland can access the data structures for READ
96 * operations only. A small performance hit is incurred by providing this
97 * copy of the metadata to userland due to extra copy-on-write operations
98 * on the metadata nodes. Release this as soon as you finish with it.
93 */ 99 */
94int dm_pool_hold_metadata_root(struct dm_pool_metadata *pmd); 100int dm_pool_reserve_metadata_snap(struct dm_pool_metadata *pmd);
101int dm_pool_release_metadata_snap(struct dm_pool_metadata *pmd);
95 102
96int dm_pool_get_held_metadata_root(struct dm_pool_metadata *pmd, 103int dm_pool_get_metadata_snap(struct dm_pool_metadata *pmd,
97 dm_block_t *result); 104 dm_block_t *result);
98 105
99/* 106/*
100 * Actions on a single virtual device. 107 * Actions on a single virtual device.
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index eb3d138ff55a..37fdaf81bd1f 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -111,7 +111,7 @@ struct cell_key {
111 dm_block_t block; 111 dm_block_t block;
112}; 112};
113 113
114struct cell { 114struct dm_bio_prison_cell {
115 struct hlist_node list; 115 struct hlist_node list;
116 struct bio_prison *prison; 116 struct bio_prison *prison;
117 struct cell_key key; 117 struct cell_key key;
@@ -141,6 +141,8 @@ static uint32_t calc_nr_buckets(unsigned nr_cells)
141 return n; 141 return n;
142} 142}
143 143
144static struct kmem_cache *_cell_cache;
145
144/* 146/*
145 * @nr_cells should be the number of cells you want in use _concurrently_. 147 * @nr_cells should be the number of cells you want in use _concurrently_.
146 * Don't confuse it with the number of distinct keys. 148 * Don't confuse it with the number of distinct keys.
@@ -157,8 +159,7 @@ static struct bio_prison *prison_create(unsigned nr_cells)
157 return NULL; 159 return NULL;
158 160
159 spin_lock_init(&prison->lock); 161 spin_lock_init(&prison->lock);
160 prison->cell_pool = mempool_create_kmalloc_pool(nr_cells, 162 prison->cell_pool = mempool_create_slab_pool(nr_cells, _cell_cache);
161 sizeof(struct cell));
162 if (!prison->cell_pool) { 163 if (!prison->cell_pool) {
163 kfree(prison); 164 kfree(prison);
164 return NULL; 165 return NULL;
@@ -194,10 +195,10 @@ static int keys_equal(struct cell_key *lhs, struct cell_key *rhs)
194 (lhs->block == rhs->block); 195 (lhs->block == rhs->block);
195} 196}
196 197
197static struct cell *__search_bucket(struct hlist_head *bucket, 198static struct dm_bio_prison_cell *__search_bucket(struct hlist_head *bucket,
198 struct cell_key *key) 199 struct cell_key *key)
199{ 200{
200 struct cell *cell; 201 struct dm_bio_prison_cell *cell;
201 struct hlist_node *tmp; 202 struct hlist_node *tmp;
202 203
203 hlist_for_each_entry(cell, tmp, bucket, list) 204 hlist_for_each_entry(cell, tmp, bucket, list)
@@ -214,12 +215,12 @@ static struct cell *__search_bucket(struct hlist_head *bucket,
214 * Returns 1 if the cell was already held, 0 if @inmate is the new holder. 215 * Returns 1 if the cell was already held, 0 if @inmate is the new holder.
215 */ 216 */
216static int bio_detain(struct bio_prison *prison, struct cell_key *key, 217static int bio_detain(struct bio_prison *prison, struct cell_key *key,
217 struct bio *inmate, struct cell **ref) 218 struct bio *inmate, struct dm_bio_prison_cell **ref)
218{ 219{
219 int r = 1; 220 int r = 1;
220 unsigned long flags; 221 unsigned long flags;
221 uint32_t hash = hash_key(prison, key); 222 uint32_t hash = hash_key(prison, key);
222 struct cell *cell, *cell2; 223 struct dm_bio_prison_cell *cell, *cell2;
223 224
224 BUG_ON(hash > prison->nr_buckets); 225 BUG_ON(hash > prison->nr_buckets);
225 226
@@ -273,7 +274,7 @@ out:
273/* 274/*
274 * @inmates must have been initialised prior to this call 275 * @inmates must have been initialised prior to this call
275 */ 276 */
276static void __cell_release(struct cell *cell, struct bio_list *inmates) 277static void __cell_release(struct dm_bio_prison_cell *cell, struct bio_list *inmates)
277{ 278{
278 struct bio_prison *prison = cell->prison; 279 struct bio_prison *prison = cell->prison;
279 280
@@ -287,7 +288,7 @@ static void __cell_release(struct cell *cell, struct bio_list *inmates)
287 mempool_free(cell, prison->cell_pool); 288 mempool_free(cell, prison->cell_pool);
288} 289}
289 290
290static void cell_release(struct cell *cell, struct bio_list *bios) 291static void cell_release(struct dm_bio_prison_cell *cell, struct bio_list *bios)
291{ 292{
292 unsigned long flags; 293 unsigned long flags;
293 struct bio_prison *prison = cell->prison; 294 struct bio_prison *prison = cell->prison;
@@ -303,7 +304,7 @@ static void cell_release(struct cell *cell, struct bio_list *bios)
303 * bio may be in the cell. This function releases the cell, and also does 304 * bio may be in the cell. This function releases the cell, and also does
304 * a sanity check. 305 * a sanity check.
305 */ 306 */
306static void __cell_release_singleton(struct cell *cell, struct bio *bio) 307static void __cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
307{ 308{
308 BUG_ON(cell->holder != bio); 309 BUG_ON(cell->holder != bio);
309 BUG_ON(!bio_list_empty(&cell->bios)); 310 BUG_ON(!bio_list_empty(&cell->bios));
@@ -311,7 +312,7 @@ static void __cell_release_singleton(struct cell *cell, struct bio *bio)
311 __cell_release(cell, NULL); 312 __cell_release(cell, NULL);
312} 313}
313 314
314static void cell_release_singleton(struct cell *cell, struct bio *bio) 315static void cell_release_singleton(struct dm_bio_prison_cell *cell, struct bio *bio)
315{ 316{
316 unsigned long flags; 317 unsigned long flags;
317 struct bio_prison *prison = cell->prison; 318 struct bio_prison *prison = cell->prison;
@@ -324,7 +325,8 @@ static void cell_release_singleton(struct cell *cell, struct bio *bio)
324/* 325/*
325 * Sometimes we don't want the holder, just the additional bios. 326 * Sometimes we don't want the holder, just the additional bios.
326 */ 327 */
327static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates) 328static void __cell_release_no_holder(struct dm_bio_prison_cell *cell,
329 struct bio_list *inmates)
328{ 330{
329 struct bio_prison *prison = cell->prison; 331 struct bio_prison *prison = cell->prison;
330 332
@@ -334,7 +336,8 @@ static void __cell_release_no_holder(struct cell *cell, struct bio_list *inmates
334 mempool_free(cell, prison->cell_pool); 336 mempool_free(cell, prison->cell_pool);
335} 337}
336 338
337static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates) 339static void cell_release_no_holder(struct dm_bio_prison_cell *cell,
340 struct bio_list *inmates)
338{ 341{
339 unsigned long flags; 342 unsigned long flags;
340 struct bio_prison *prison = cell->prison; 343 struct bio_prison *prison = cell->prison;
@@ -344,7 +347,7 @@ static void cell_release_no_holder(struct cell *cell, struct bio_list *inmates)
344 spin_unlock_irqrestore(&prison->lock, flags); 347 spin_unlock_irqrestore(&prison->lock, flags);
345} 348}
346 349
347static void cell_error(struct cell *cell) 350static void cell_error(struct dm_bio_prison_cell *cell)
348{ 351{
349 struct bio_prison *prison = cell->prison; 352 struct bio_prison *prison = cell->prison;
350 struct bio_list bios; 353 struct bio_list bios;
@@ -491,7 +494,7 @@ static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
491 * also provides the interface for creating and destroying internal 494 * also provides the interface for creating and destroying internal
492 * devices. 495 * devices.
493 */ 496 */
494struct new_mapping; 497struct dm_thin_new_mapping;
495 498
496struct pool_features { 499struct pool_features {
497 unsigned zero_new_blocks:1; 500 unsigned zero_new_blocks:1;
@@ -537,7 +540,7 @@ struct pool {
537 struct deferred_set shared_read_ds; 540 struct deferred_set shared_read_ds;
538 struct deferred_set all_io_ds; 541 struct deferred_set all_io_ds;
539 542
540 struct new_mapping *next_mapping; 543 struct dm_thin_new_mapping *next_mapping;
541 mempool_t *mapping_pool; 544 mempool_t *mapping_pool;
542 mempool_t *endio_hook_pool; 545 mempool_t *endio_hook_pool;
543}; 546};
@@ -630,11 +633,11 @@ static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev
630 633
631/*----------------------------------------------------------------*/ 634/*----------------------------------------------------------------*/
632 635
633struct endio_hook { 636struct dm_thin_endio_hook {
634 struct thin_c *tc; 637 struct thin_c *tc;
635 struct deferred_entry *shared_read_entry; 638 struct deferred_entry *shared_read_entry;
636 struct deferred_entry *all_io_entry; 639 struct deferred_entry *all_io_entry;
637 struct new_mapping *overwrite_mapping; 640 struct dm_thin_new_mapping *overwrite_mapping;
638}; 641};
639 642
640static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master) 643static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
@@ -647,7 +650,8 @@ static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
647 bio_list_init(master); 650 bio_list_init(master);
648 651
649 while ((bio = bio_list_pop(&bios))) { 652 while ((bio = bio_list_pop(&bios))) {
650 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 653 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
654
651 if (h->tc == tc) 655 if (h->tc == tc)
652 bio_endio(bio, DM_ENDIO_REQUEUE); 656 bio_endio(bio, DM_ENDIO_REQUEUE);
653 else 657 else
@@ -736,7 +740,7 @@ static void wake_worker(struct pool *pool)
736/* 740/*
737 * Bio endio functions. 741 * Bio endio functions.
738 */ 742 */
739struct new_mapping { 743struct dm_thin_new_mapping {
740 struct list_head list; 744 struct list_head list;
741 745
742 unsigned quiesced:1; 746 unsigned quiesced:1;
@@ -746,7 +750,7 @@ struct new_mapping {
746 struct thin_c *tc; 750 struct thin_c *tc;
747 dm_block_t virt_block; 751 dm_block_t virt_block;
748 dm_block_t data_block; 752 dm_block_t data_block;
749 struct cell *cell, *cell2; 753 struct dm_bio_prison_cell *cell, *cell2;
750 int err; 754 int err;
751 755
752 /* 756 /*
@@ -759,7 +763,7 @@ struct new_mapping {
759 bio_end_io_t *saved_bi_end_io; 763 bio_end_io_t *saved_bi_end_io;
760}; 764};
761 765
762static void __maybe_add_mapping(struct new_mapping *m) 766static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
763{ 767{
764 struct pool *pool = m->tc->pool; 768 struct pool *pool = m->tc->pool;
765 769
@@ -772,7 +776,7 @@ static void __maybe_add_mapping(struct new_mapping *m)
772static void copy_complete(int read_err, unsigned long write_err, void *context) 776static void copy_complete(int read_err, unsigned long write_err, void *context)
773{ 777{
774 unsigned long flags; 778 unsigned long flags;
775 struct new_mapping *m = context; 779 struct dm_thin_new_mapping *m = context;
776 struct pool *pool = m->tc->pool; 780 struct pool *pool = m->tc->pool;
777 781
778 m->err = read_err || write_err ? -EIO : 0; 782 m->err = read_err || write_err ? -EIO : 0;
@@ -786,8 +790,8 @@ static void copy_complete(int read_err, unsigned long write_err, void *context)
786static void overwrite_endio(struct bio *bio, int err) 790static void overwrite_endio(struct bio *bio, int err)
787{ 791{
788 unsigned long flags; 792 unsigned long flags;
789 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 793 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
790 struct new_mapping *m = h->overwrite_mapping; 794 struct dm_thin_new_mapping *m = h->overwrite_mapping;
791 struct pool *pool = m->tc->pool; 795 struct pool *pool = m->tc->pool;
792 796
793 m->err = err; 797 m->err = err;
@@ -811,7 +815,7 @@ static void overwrite_endio(struct bio *bio, int err)
811/* 815/*
812 * This sends the bios in the cell back to the deferred_bios list. 816 * This sends the bios in the cell back to the deferred_bios list.
813 */ 817 */
814static void cell_defer(struct thin_c *tc, struct cell *cell, 818static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell,
815 dm_block_t data_block) 819 dm_block_t data_block)
816{ 820{
817 struct pool *pool = tc->pool; 821 struct pool *pool = tc->pool;
@@ -828,7 +832,7 @@ static void cell_defer(struct thin_c *tc, struct cell *cell,
828 * Same as cell_defer above, except it omits one particular detainee, 832 * Same as cell_defer above, except it omits one particular detainee,
829 * a write bio that covers the block and has already been processed. 833 * a write bio that covers the block and has already been processed.
830 */ 834 */
831static void cell_defer_except(struct thin_c *tc, struct cell *cell) 835static void cell_defer_except(struct thin_c *tc, struct dm_bio_prison_cell *cell)
832{ 836{
833 struct bio_list bios; 837 struct bio_list bios;
834 struct pool *pool = tc->pool; 838 struct pool *pool = tc->pool;
@@ -843,7 +847,7 @@ static void cell_defer_except(struct thin_c *tc, struct cell *cell)
843 wake_worker(pool); 847 wake_worker(pool);
844} 848}
845 849
846static void process_prepared_mapping(struct new_mapping *m) 850static void process_prepared_mapping(struct dm_thin_new_mapping *m)
847{ 851{
848 struct thin_c *tc = m->tc; 852 struct thin_c *tc = m->tc;
849 struct bio *bio; 853 struct bio *bio;
@@ -886,7 +890,7 @@ static void process_prepared_mapping(struct new_mapping *m)
886 mempool_free(m, tc->pool->mapping_pool); 890 mempool_free(m, tc->pool->mapping_pool);
887} 891}
888 892
889static void process_prepared_discard(struct new_mapping *m) 893static void process_prepared_discard(struct dm_thin_new_mapping *m)
890{ 894{
891 int r; 895 int r;
892 struct thin_c *tc = m->tc; 896 struct thin_c *tc = m->tc;
@@ -909,11 +913,11 @@ static void process_prepared_discard(struct new_mapping *m)
909} 913}
910 914
911static void process_prepared(struct pool *pool, struct list_head *head, 915static void process_prepared(struct pool *pool, struct list_head *head,
912 void (*fn)(struct new_mapping *)) 916 void (*fn)(struct dm_thin_new_mapping *))
913{ 917{
914 unsigned long flags; 918 unsigned long flags;
915 struct list_head maps; 919 struct list_head maps;
916 struct new_mapping *m, *tmp; 920 struct dm_thin_new_mapping *m, *tmp;
917 921
918 INIT_LIST_HEAD(&maps); 922 INIT_LIST_HEAD(&maps);
919 spin_lock_irqsave(&pool->lock, flags); 923 spin_lock_irqsave(&pool->lock, flags);
@@ -957,9 +961,9 @@ static int ensure_next_mapping(struct pool *pool)
957 return pool->next_mapping ? 0 : -ENOMEM; 961 return pool->next_mapping ? 0 : -ENOMEM;
958} 962}
959 963
960static struct new_mapping *get_next_mapping(struct pool *pool) 964static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
961{ 965{
962 struct new_mapping *r = pool->next_mapping; 966 struct dm_thin_new_mapping *r = pool->next_mapping;
963 967
964 BUG_ON(!pool->next_mapping); 968 BUG_ON(!pool->next_mapping);
965 969
@@ -971,11 +975,11 @@ static struct new_mapping *get_next_mapping(struct pool *pool)
971static void schedule_copy(struct thin_c *tc, dm_block_t virt_block, 975static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
972 struct dm_dev *origin, dm_block_t data_origin, 976 struct dm_dev *origin, dm_block_t data_origin,
973 dm_block_t data_dest, 977 dm_block_t data_dest,
974 struct cell *cell, struct bio *bio) 978 struct dm_bio_prison_cell *cell, struct bio *bio)
975{ 979{
976 int r; 980 int r;
977 struct pool *pool = tc->pool; 981 struct pool *pool = tc->pool;
978 struct new_mapping *m = get_next_mapping(pool); 982 struct dm_thin_new_mapping *m = get_next_mapping(pool);
979 983
980 INIT_LIST_HEAD(&m->list); 984 INIT_LIST_HEAD(&m->list);
981 m->quiesced = 0; 985 m->quiesced = 0;
@@ -997,7 +1001,8 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
997 * bio immediately. Otherwise we use kcopyd to clone the data first. 1001 * bio immediately. Otherwise we use kcopyd to clone the data first.
998 */ 1002 */
999 if (io_overwrites_block(pool, bio)) { 1003 if (io_overwrites_block(pool, bio)) {
1000 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1004 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1005
1001 h->overwrite_mapping = m; 1006 h->overwrite_mapping = m;
1002 m->bio = bio; 1007 m->bio = bio;
1003 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 1008 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
@@ -1025,7 +1030,7 @@ static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
1025 1030
1026static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block, 1031static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1027 dm_block_t data_origin, dm_block_t data_dest, 1032 dm_block_t data_origin, dm_block_t data_dest,
1028 struct cell *cell, struct bio *bio) 1033 struct dm_bio_prison_cell *cell, struct bio *bio)
1029{ 1034{
1030 schedule_copy(tc, virt_block, tc->pool_dev, 1035 schedule_copy(tc, virt_block, tc->pool_dev,
1031 data_origin, data_dest, cell, bio); 1036 data_origin, data_dest, cell, bio);
@@ -1033,18 +1038,18 @@ static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
1033 1038
1034static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block, 1039static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
1035 dm_block_t data_dest, 1040 dm_block_t data_dest,
1036 struct cell *cell, struct bio *bio) 1041 struct dm_bio_prison_cell *cell, struct bio *bio)
1037{ 1042{
1038 schedule_copy(tc, virt_block, tc->origin_dev, 1043 schedule_copy(tc, virt_block, tc->origin_dev,
1039 virt_block, data_dest, cell, bio); 1044 virt_block, data_dest, cell, bio);
1040} 1045}
1041 1046
1042static void schedule_zero(struct thin_c *tc, dm_block_t virt_block, 1047static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1043 dm_block_t data_block, struct cell *cell, 1048 dm_block_t data_block, struct dm_bio_prison_cell *cell,
1044 struct bio *bio) 1049 struct bio *bio)
1045{ 1050{
1046 struct pool *pool = tc->pool; 1051 struct pool *pool = tc->pool;
1047 struct new_mapping *m = get_next_mapping(pool); 1052 struct dm_thin_new_mapping *m = get_next_mapping(pool);
1048 1053
1049 INIT_LIST_HEAD(&m->list); 1054 INIT_LIST_HEAD(&m->list);
1050 m->quiesced = 1; 1055 m->quiesced = 1;
@@ -1065,12 +1070,12 @@ static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
1065 process_prepared_mapping(m); 1070 process_prepared_mapping(m);
1066 1071
1067 else if (io_overwrites_block(pool, bio)) { 1072 else if (io_overwrites_block(pool, bio)) {
1068 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1073 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1074
1069 h->overwrite_mapping = m; 1075 h->overwrite_mapping = m;
1070 m->bio = bio; 1076 m->bio = bio;
1071 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio); 1077 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
1072 remap_and_issue(tc, bio, data_block); 1078 remap_and_issue(tc, bio, data_block);
1073
1074 } else { 1079 } else {
1075 int r; 1080 int r;
1076 struct dm_io_region to; 1081 struct dm_io_region to;
@@ -1155,7 +1160,7 @@ static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
1155 */ 1160 */
1156static void retry_on_resume(struct bio *bio) 1161static void retry_on_resume(struct bio *bio)
1157{ 1162{
1158 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1163 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1159 struct thin_c *tc = h->tc; 1164 struct thin_c *tc = h->tc;
1160 struct pool *pool = tc->pool; 1165 struct pool *pool = tc->pool;
1161 unsigned long flags; 1166 unsigned long flags;
@@ -1165,7 +1170,7 @@ static void retry_on_resume(struct bio *bio)
1165 spin_unlock_irqrestore(&pool->lock, flags); 1170 spin_unlock_irqrestore(&pool->lock, flags);
1166} 1171}
1167 1172
1168static void no_space(struct cell *cell) 1173static void no_space(struct dm_bio_prison_cell *cell)
1169{ 1174{
1170 struct bio *bio; 1175 struct bio *bio;
1171 struct bio_list bios; 1176 struct bio_list bios;
@@ -1182,11 +1187,11 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1182 int r; 1187 int r;
1183 unsigned long flags; 1188 unsigned long flags;
1184 struct pool *pool = tc->pool; 1189 struct pool *pool = tc->pool;
1185 struct cell *cell, *cell2; 1190 struct dm_bio_prison_cell *cell, *cell2;
1186 struct cell_key key, key2; 1191 struct cell_key key, key2;
1187 dm_block_t block = get_bio_block(tc, bio); 1192 dm_block_t block = get_bio_block(tc, bio);
1188 struct dm_thin_lookup_result lookup_result; 1193 struct dm_thin_lookup_result lookup_result;
1189 struct new_mapping *m; 1194 struct dm_thin_new_mapping *m;
1190 1195
1191 build_virtual_key(tc->td, block, &key); 1196 build_virtual_key(tc->td, block, &key);
1192 if (bio_detain(tc->pool->prison, &key, bio, &cell)) 1197 if (bio_detain(tc->pool->prison, &key, bio, &cell))
@@ -1263,7 +1268,7 @@ static void process_discard(struct thin_c *tc, struct bio *bio)
1263static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block, 1268static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1264 struct cell_key *key, 1269 struct cell_key *key,
1265 struct dm_thin_lookup_result *lookup_result, 1270 struct dm_thin_lookup_result *lookup_result,
1266 struct cell *cell) 1271 struct dm_bio_prison_cell *cell)
1267{ 1272{
1268 int r; 1273 int r;
1269 dm_block_t data_block; 1274 dm_block_t data_block;
@@ -1290,7 +1295,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1290 dm_block_t block, 1295 dm_block_t block,
1291 struct dm_thin_lookup_result *lookup_result) 1296 struct dm_thin_lookup_result *lookup_result)
1292{ 1297{
1293 struct cell *cell; 1298 struct dm_bio_prison_cell *cell;
1294 struct pool *pool = tc->pool; 1299 struct pool *pool = tc->pool;
1295 struct cell_key key; 1300 struct cell_key key;
1296 1301
@@ -1305,7 +1310,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1305 if (bio_data_dir(bio) == WRITE) 1310 if (bio_data_dir(bio) == WRITE)
1306 break_sharing(tc, bio, block, &key, lookup_result, cell); 1311 break_sharing(tc, bio, block, &key, lookup_result, cell);
1307 else { 1312 else {
1308 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1313 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1309 1314
1310 h->shared_read_entry = ds_inc(&pool->shared_read_ds); 1315 h->shared_read_entry = ds_inc(&pool->shared_read_ds);
1311 1316
@@ -1315,7 +1320,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1315} 1320}
1316 1321
1317static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block, 1322static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1318 struct cell *cell) 1323 struct dm_bio_prison_cell *cell)
1319{ 1324{
1320 int r; 1325 int r;
1321 dm_block_t data_block; 1326 dm_block_t data_block;
@@ -1363,7 +1368,7 @@ static void process_bio(struct thin_c *tc, struct bio *bio)
1363{ 1368{
1364 int r; 1369 int r;
1365 dm_block_t block = get_bio_block(tc, bio); 1370 dm_block_t block = get_bio_block(tc, bio);
1366 struct cell *cell; 1371 struct dm_bio_prison_cell *cell;
1367 struct cell_key key; 1372 struct cell_key key;
1368 struct dm_thin_lookup_result lookup_result; 1373 struct dm_thin_lookup_result lookup_result;
1369 1374
@@ -1432,7 +1437,7 @@ static void process_deferred_bios(struct pool *pool)
1432 spin_unlock_irqrestore(&pool->lock, flags); 1437 spin_unlock_irqrestore(&pool->lock, flags);
1433 1438
1434 while ((bio = bio_list_pop(&bios))) { 1439 while ((bio = bio_list_pop(&bios))) {
1435 struct endio_hook *h = dm_get_mapinfo(bio)->ptr; 1440 struct dm_thin_endio_hook *h = dm_get_mapinfo(bio)->ptr;
1436 struct thin_c *tc = h->tc; 1441 struct thin_c *tc = h->tc;
1437 1442
1438 /* 1443 /*
@@ -1522,10 +1527,10 @@ static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1522 wake_worker(pool); 1527 wake_worker(pool);
1523} 1528}
1524 1529
1525static struct endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio) 1530static struct dm_thin_endio_hook *thin_hook_bio(struct thin_c *tc, struct bio *bio)
1526{ 1531{
1527 struct pool *pool = tc->pool; 1532 struct pool *pool = tc->pool;
1528 struct endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO); 1533 struct dm_thin_endio_hook *h = mempool_alloc(pool->endio_hook_pool, GFP_NOIO);
1529 1534
1530 h->tc = tc; 1535 h->tc = tc;
1531 h->shared_read_entry = NULL; 1536 h->shared_read_entry = NULL;
@@ -1687,6 +1692,9 @@ static void __pool_destroy(struct pool *pool)
1687 kfree(pool); 1692 kfree(pool);
1688} 1693}
1689 1694
1695static struct kmem_cache *_new_mapping_cache;
1696static struct kmem_cache *_endio_hook_cache;
1697
1690static struct pool *pool_create(struct mapped_device *pool_md, 1698static struct pool *pool_create(struct mapped_device *pool_md,
1691 struct block_device *metadata_dev, 1699 struct block_device *metadata_dev,
1692 unsigned long block_size, char **error) 1700 unsigned long block_size, char **error)
@@ -1755,16 +1763,16 @@ static struct pool *pool_create(struct mapped_device *pool_md,
1755 ds_init(&pool->all_io_ds); 1763 ds_init(&pool->all_io_ds);
1756 1764
1757 pool->next_mapping = NULL; 1765 pool->next_mapping = NULL;
1758 pool->mapping_pool = 1766 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1759 mempool_create_kmalloc_pool(MAPPING_POOL_SIZE, sizeof(struct new_mapping)); 1767 _new_mapping_cache);
1760 if (!pool->mapping_pool) { 1768 if (!pool->mapping_pool) {
1761 *error = "Error creating pool's mapping mempool"; 1769 *error = "Error creating pool's mapping mempool";
1762 err_p = ERR_PTR(-ENOMEM); 1770 err_p = ERR_PTR(-ENOMEM);
1763 goto bad_mapping_pool; 1771 goto bad_mapping_pool;
1764 } 1772 }
1765 1773
1766 pool->endio_hook_pool = 1774 pool->endio_hook_pool = mempool_create_slab_pool(ENDIO_HOOK_POOL_SIZE,
1767 mempool_create_kmalloc_pool(ENDIO_HOOK_POOL_SIZE, sizeof(struct endio_hook)); 1775 _endio_hook_cache);
1768 if (!pool->endio_hook_pool) { 1776 if (!pool->endio_hook_pool) {
1769 *error = "Error creating pool's endio_hook mempool"; 1777 *error = "Error creating pool's endio_hook mempool";
1770 err_p = ERR_PTR(-ENOMEM); 1778 err_p = ERR_PTR(-ENOMEM);
@@ -2276,6 +2284,36 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
2276 return 0; 2284 return 0;
2277} 2285}
2278 2286
2287static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2288{
2289 int r;
2290
2291 r = check_arg_count(argc, 1);
2292 if (r)
2293 return r;
2294
2295 r = dm_pool_reserve_metadata_snap(pool->pmd);
2296 if (r)
2297 DMWARN("reserve_metadata_snap message failed.");
2298
2299 return r;
2300}
2301
2302static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2303{
2304 int r;
2305
2306 r = check_arg_count(argc, 1);
2307 if (r)
2308 return r;
2309
2310 r = dm_pool_release_metadata_snap(pool->pmd);
2311 if (r)
2312 DMWARN("release_metadata_snap message failed.");
2313
2314 return r;
2315}
2316
2279/* 2317/*
2280 * Messages supported: 2318 * Messages supported:
2281 * create_thin <dev_id> 2319 * create_thin <dev_id>
@@ -2283,6 +2321,8 @@ static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct po
2283 * delete <dev_id> 2321 * delete <dev_id>
2284 * trim <dev_id> <new_size_in_sectors> 2322 * trim <dev_id> <new_size_in_sectors>
2285 * set_transaction_id <current_trans_id> <new_trans_id> 2323 * set_transaction_id <current_trans_id> <new_trans_id>
2324 * reserve_metadata_snap
2325 * release_metadata_snap
2286 */ 2326 */
2287static int pool_message(struct dm_target *ti, unsigned argc, char **argv) 2327static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2288{ 2328{
@@ -2302,6 +2342,12 @@ static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2302 else if (!strcasecmp(argv[0], "set_transaction_id")) 2342 else if (!strcasecmp(argv[0], "set_transaction_id"))
2303 r = process_set_transaction_id_mesg(argc, argv, pool); 2343 r = process_set_transaction_id_mesg(argc, argv, pool);
2304 2344
2345 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2346 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2347
2348 else if (!strcasecmp(argv[0], "release_metadata_snap"))
2349 r = process_release_metadata_snap_mesg(argc, argv, pool);
2350
2305 else 2351 else
2306 DMWARN("Unrecognised thin pool target message received: %s", argv[0]); 2352 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2307 2353
@@ -2361,7 +2407,7 @@ static int pool_status(struct dm_target *ti, status_type_t type,
2361 if (r) 2407 if (r)
2362 return r; 2408 return r;
2363 2409
2364 r = dm_pool_get_held_metadata_root(pool->pmd, &held_root); 2410 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2365 if (r) 2411 if (r)
2366 return r; 2412 return r;
2367 2413
@@ -2457,7 +2503,7 @@ static struct target_type pool_target = {
2457 .name = "thin-pool", 2503 .name = "thin-pool",
2458 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE | 2504 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2459 DM_TARGET_IMMUTABLE, 2505 DM_TARGET_IMMUTABLE,
2460 .version = {1, 1, 0}, 2506 .version = {1, 2, 0},
2461 .module = THIS_MODULE, 2507 .module = THIS_MODULE,
2462 .ctr = pool_ctr, 2508 .ctr = pool_ctr,
2463 .dtr = pool_dtr, 2509 .dtr = pool_dtr,
@@ -2613,9 +2659,9 @@ static int thin_endio(struct dm_target *ti,
2613 union map_info *map_context) 2659 union map_info *map_context)
2614{ 2660{
2615 unsigned long flags; 2661 unsigned long flags;
2616 struct endio_hook *h = map_context->ptr; 2662 struct dm_thin_endio_hook *h = map_context->ptr;
2617 struct list_head work; 2663 struct list_head work;
2618 struct new_mapping *m, *tmp; 2664 struct dm_thin_new_mapping *m, *tmp;
2619 struct pool *pool = h->tc->pool; 2665 struct pool *pool = h->tc->pool;
2620 2666
2621 if (h->shared_read_entry) { 2667 if (h->shared_read_entry) {
@@ -2755,7 +2801,32 @@ static int __init dm_thin_init(void)
2755 2801
2756 r = dm_register_target(&pool_target); 2802 r = dm_register_target(&pool_target);
2757 if (r) 2803 if (r)
2758 dm_unregister_target(&thin_target); 2804 goto bad_pool_target;
2805
2806 r = -ENOMEM;
2807
2808 _cell_cache = KMEM_CACHE(dm_bio_prison_cell, 0);
2809 if (!_cell_cache)
2810 goto bad_cell_cache;
2811
2812 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
2813 if (!_new_mapping_cache)
2814 goto bad_new_mapping_cache;
2815
2816 _endio_hook_cache = KMEM_CACHE(dm_thin_endio_hook, 0);
2817 if (!_endio_hook_cache)
2818 goto bad_endio_hook_cache;
2819
2820 return 0;
2821
2822bad_endio_hook_cache:
2823 kmem_cache_destroy(_new_mapping_cache);
2824bad_new_mapping_cache:
2825 kmem_cache_destroy(_cell_cache);
2826bad_cell_cache:
2827 dm_unregister_target(&pool_target);
2828bad_pool_target:
2829 dm_unregister_target(&thin_target);
2759 2830
2760 return r; 2831 return r;
2761} 2832}
@@ -2764,6 +2835,10 @@ static void dm_thin_exit(void)
2764{ 2835{
2765 dm_unregister_target(&thin_target); 2836 dm_unregister_target(&thin_target);
2766 dm_unregister_target(&pool_target); 2837 dm_unregister_target(&pool_target);
2838
2839 kmem_cache_destroy(_cell_cache);
2840 kmem_cache_destroy(_new_mapping_cache);
2841 kmem_cache_destroy(_endio_hook_cache);
2767} 2842}
2768 2843
2769module_init(dm_thin_init); 2844module_init(dm_thin_init);
diff --git a/drivers/md/persistent-data/dm-transaction-manager.c b/drivers/md/persistent-data/dm-transaction-manager.c
index 6f8d38747d7f..400fe144c0cd 100644
--- a/drivers/md/persistent-data/dm-transaction-manager.c
+++ b/drivers/md/persistent-data/dm-transaction-manager.c
@@ -249,6 +249,7 @@ int dm_tm_shadow_block(struct dm_transaction_manager *tm, dm_block_t orig,
249 249
250 return r; 250 return r;
251} 251}
252EXPORT_SYMBOL_GPL(dm_tm_shadow_block);
252 253
253int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b, 254int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
254 struct dm_block_validator *v, 255 struct dm_block_validator *v,
@@ -259,6 +260,7 @@ int dm_tm_read_lock(struct dm_transaction_manager *tm, dm_block_t b,
259 260
260 return dm_bm_read_lock(tm->bm, b, v, blk); 261 return dm_bm_read_lock(tm->bm, b, v, blk);
261} 262}
263EXPORT_SYMBOL_GPL(dm_tm_read_lock);
262 264
263int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b) 265int dm_tm_unlock(struct dm_transaction_manager *tm, struct dm_block *b)
264{ 266{
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 835de7168cd3..a9c7981ddd24 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -2550,6 +2550,7 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2550 err = -EINVAL; 2550 err = -EINVAL;
2551 spin_lock_init(&conf->device_lock); 2551 spin_lock_init(&conf->device_lock);
2552 rdev_for_each(rdev, mddev) { 2552 rdev_for_each(rdev, mddev) {
2553 struct request_queue *q;
2553 int disk_idx = rdev->raid_disk; 2554 int disk_idx = rdev->raid_disk;
2554 if (disk_idx >= mddev->raid_disks 2555 if (disk_idx >= mddev->raid_disks
2555 || disk_idx < 0) 2556 || disk_idx < 0)
@@ -2562,6 +2563,9 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2562 if (disk->rdev) 2563 if (disk->rdev)
2563 goto abort; 2564 goto abort;
2564 disk->rdev = rdev; 2565 disk->rdev = rdev;
2566 q = bdev_get_queue(rdev->bdev);
2567 if (q->merge_bvec_fn)
2568 mddev->merge_check_needed = 1;
2565 2569
2566 disk->head_position = 0; 2570 disk->head_position = 0;
2567 } 2571 }
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 987db37cb875..99ae6068e456 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -3475,6 +3475,7 @@ static int run(struct mddev *mddev)
3475 3475
3476 rdev_for_each(rdev, mddev) { 3476 rdev_for_each(rdev, mddev) {
3477 long long diff; 3477 long long diff;
3478 struct request_queue *q;
3478 3479
3479 disk_idx = rdev->raid_disk; 3480 disk_idx = rdev->raid_disk;
3480 if (disk_idx < 0) 3481 if (disk_idx < 0)
@@ -3493,6 +3494,9 @@ static int run(struct mddev *mddev)
3493 goto out_free_conf; 3494 goto out_free_conf;
3494 disk->rdev = rdev; 3495 disk->rdev = rdev;
3495 } 3496 }
3497 q = bdev_get_queue(rdev->bdev);
3498 if (q->merge_bvec_fn)
3499 mddev->merge_check_needed = 1;
3496 diff = (rdev->new_data_offset - rdev->data_offset); 3500 diff = (rdev->new_data_offset - rdev->data_offset);
3497 if (!mddev->reshape_backwards) 3501 if (!mddev->reshape_backwards)
3498 diff = -diff; 3502 diff = -diff;
diff --git a/drivers/media/video/pms.c b/drivers/media/video/pms.c
index af2d9086d7e8..c370c2d87c17 100644
--- a/drivers/media/video/pms.c
+++ b/drivers/media/video/pms.c
@@ -29,6 +29,7 @@
29#include <linux/ioport.h> 29#include <linux/ioport.h>
30#include <linux/init.h> 30#include <linux/init.h>
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/slab.h>
32#include <linux/uaccess.h> 33#include <linux/uaccess.h>
33#include <linux/isa.h> 34#include <linux/isa.h>
34#include <asm/io.h> 35#include <asm/io.h>
diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
index a5c591ffe395..d99db5623acf 100644
--- a/drivers/message/fusion/mptbase.c
+++ b/drivers/message/fusion/mptbase.c
@@ -1653,7 +1653,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1653 unsigned long port; 1653 unsigned long port;
1654 u32 msize; 1654 u32 msize;
1655 u32 psize; 1655 u32 psize;
1656 u8 revision;
1657 int r = -ENODEV; 1656 int r = -ENODEV;
1658 struct pci_dev *pdev; 1657 struct pci_dev *pdev;
1659 1658
@@ -1670,8 +1669,6 @@ mpt_mapresources(MPT_ADAPTER *ioc)
1670 return r; 1669 return r;
1671 } 1670 }
1672 1671
1673 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision);
1674
1675 if (sizeof(dma_addr_t) > 4) { 1672 if (sizeof(dma_addr_t) > 4) {
1676 const uint64_t required_mask = dma_get_required_mask 1673 const uint64_t required_mask = dma_get_required_mask
1677 (&pdev->dev); 1674 (&pdev->dev);
@@ -1779,7 +1776,6 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1779 MPT_ADAPTER *ioc; 1776 MPT_ADAPTER *ioc;
1780 u8 cb_idx; 1777 u8 cb_idx;
1781 int r = -ENODEV; 1778 int r = -ENODEV;
1782 u8 revision;
1783 u8 pcixcmd; 1779 u8 pcixcmd;
1784 static int mpt_ids = 0; 1780 static int mpt_ids = 0;
1785#ifdef CONFIG_PROC_FS 1781#ifdef CONFIG_PROC_FS
@@ -1887,8 +1883,8 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1887 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n", 1883 dinitprintk(ioc, printk(MYIOC_s_INFO_FMT "facts @ %p, pfacts[0] @ %p\n",
1888 ioc->name, &ioc->facts, &ioc->pfacts[0])); 1884 ioc->name, &ioc->facts, &ioc->pfacts[0]));
1889 1885
1890 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1886 mpt_get_product_name(pdev->vendor, pdev->device, pdev->revision,
1891 mpt_get_product_name(pdev->vendor, pdev->device, revision, ioc->prod_name); 1887 ioc->prod_name);
1892 1888
1893 switch (pdev->device) 1889 switch (pdev->device)
1894 { 1890 {
@@ -1903,7 +1899,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1903 break; 1899 break;
1904 1900
1905 case MPI_MANUFACTPAGE_DEVICEID_FC929X: 1901 case MPI_MANUFACTPAGE_DEVICEID_FC929X:
1906 if (revision < XL_929) { 1902 if (pdev->revision < XL_929) {
1907 /* 929X Chip Fix. Set Split transactions level 1903 /* 929X Chip Fix. Set Split transactions level
1908 * for PCIX. Set MOST bits to zero. 1904 * for PCIX. Set MOST bits to zero.
1909 */ 1905 */
@@ -1934,7 +1930,7 @@ mpt_attach(struct pci_dev *pdev, const struct pci_device_id *id)
1934 /* 1030 Chip Fix. Disable Split transactions 1930 /* 1030 Chip Fix. Disable Split transactions
1935 * for PCIX. Set MOST bits to zero if Rev < C0( = 8). 1931 * for PCIX. Set MOST bits to zero if Rev < C0( = 8).
1936 */ 1932 */
1937 if (revision < C0_1030) { 1933 if (pdev->revision < C0_1030) {
1938 pci_read_config_byte(pdev, 0x6a, &pcixcmd); 1934 pci_read_config_byte(pdev, 0x6a, &pcixcmd);
1939 pcixcmd &= 0x8F; 1935 pcixcmd &= 0x8F;
1940 pci_write_config_byte(pdev, 0x6a, pcixcmd); 1936 pci_write_config_byte(pdev, 0x6a, pcixcmd);
@@ -6483,6 +6479,7 @@ mpt_config(MPT_ADAPTER *ioc, CONFIGPARMS *pCfg)
6483 printk(MYIOC_s_INFO_FMT "%s: host reset in" 6479 printk(MYIOC_s_INFO_FMT "%s: host reset in"
6484 " progress mpt_config timed out.!!\n", 6480 " progress mpt_config timed out.!!\n",
6485 __func__, ioc->name); 6481 __func__, ioc->name);
6482 mutex_unlock(&ioc->mptbase_cmds.mutex);
6486 return -EFAULT; 6483 return -EFAULT;
6487 } 6484 }
6488 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags); 6485 spin_unlock_irqrestore(&ioc->taskmgmt_lock, flags);
diff --git a/drivers/message/fusion/mptctl.c b/drivers/message/fusion/mptctl.c
index 6e6e16aab9da..b383b6961e59 100644
--- a/drivers/message/fusion/mptctl.c
+++ b/drivers/message/fusion/mptctl.c
@@ -1250,7 +1250,6 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
1250 int iocnum; 1250 int iocnum;
1251 unsigned int port; 1251 unsigned int port;
1252 int cim_rev; 1252 int cim_rev;
1253 u8 revision;
1254 struct scsi_device *sdev; 1253 struct scsi_device *sdev;
1255 VirtDevice *vdevice; 1254 VirtDevice *vdevice;
1256 1255
@@ -1324,8 +1323,7 @@ mptctl_getiocinfo (unsigned long arg, unsigned int data_size)
1324 pdev = (struct pci_dev *) ioc->pcidev; 1323 pdev = (struct pci_dev *) ioc->pcidev;
1325 1324
1326 karg->pciId = pdev->device; 1325 karg->pciId = pdev->device;
1327 pci_read_config_byte(pdev, PCI_CLASS_REVISION, &revision); 1326 karg->hwRev = pdev->revision;
1328 karg->hwRev = revision;
1329 karg->subSystemDevice = pdev->subsystem_device; 1327 karg->subSystemDevice = pdev->subsystem_device;
1330 karg->subSystemVendor = pdev->subsystem_vendor; 1328 karg->subSystemVendor = pdev->subsystem_vendor;
1331 1329
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 671c8bc14bbc..50e83dc5dc49 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2735,6 +2735,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
2735 REGULATOR_SUPPLY("vcore", "uart2"), 2735 REGULATOR_SUPPLY("vcore", "uart2"),
2736 REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"), 2736 REGULATOR_SUPPLY("v-ape", "nmk-ske-keypad.0"),
2737 REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"), 2737 REGULATOR_SUPPLY("v-hsi", "ste_hsi.0"),
2738 REGULATOR_SUPPLY("vddvario", "smsc911x.0"),
2738}; 2739};
2739 2740
2740static struct regulator_consumer_supply db8500_vsmps2_consumers[] = { 2741static struct regulator_consumer_supply db8500_vsmps2_consumers[] = {
diff --git a/drivers/mfd/stmpe-i2c.c b/drivers/mfd/stmpe-i2c.c
index 373f423b1181..947a06a1845f 100644
--- a/drivers/mfd/stmpe-i2c.c
+++ b/drivers/mfd/stmpe-i2c.c
@@ -6,7 +6,7 @@
6 * 6 *
7 * License Terms: GNU General Public License, version 2 7 * License Terms: GNU General Public License, version 2
8 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson 8 * Author: Rabin Vincent <rabin.vincent@stericsson.com> for ST-Ericsson
9 * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics 9 * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
10 */ 10 */
11 11
12#include <linux/i2c.h> 12#include <linux/i2c.h>
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index afd459013ecb..9edfe864cc05 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -4,7 +4,7 @@
4 * Copyright (C) ST Microelectronics SA 2011 4 * Copyright (C) ST Microelectronics SA 2011
5 * 5 *
6 * License Terms: GNU General Public License, version 2 6 * License Terms: GNU General Public License, version 2
7 * Author: Viresh Kumar <viresh.kumar@st.com> for ST Microelectronics 7 * Author: Viresh Kumar <viresh.linux@gmail.com> for ST Microelectronics
8 */ 8 */
9 9
10#include <linux/spi/spi.h> 10#include <linux/spi/spi.h>
@@ -146,4 +146,4 @@ module_exit(stmpe_exit);
146 146
147MODULE_LICENSE("GPL v2"); 147MODULE_LICENSE("GPL v2");
148MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver"); 148MODULE_DESCRIPTION("STMPE MFD SPI Interface Driver");
149MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 149MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 93936f1b75eb..23f5463d4cae 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -835,7 +835,7 @@ static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
835 struct mei_cl *cl, 835 struct mei_cl *cl,
836 struct mei_io_list *cmpl_list) 836 struct mei_io_list *cmpl_list)
837{ 837{
838 if ((*slots * sizeof(u32)) >= (sizeof(struct mei_msg_hdr) + 838 if ((*slots * sizeof(u32)) < (sizeof(struct mei_msg_hdr) +
839 sizeof(struct hbm_flow_control))) { 839 sizeof(struct hbm_flow_control))) {
840 /* return the cancel routine */ 840 /* return the cancel routine */
841 list_del(&cb_pos->cb_list); 841 list_del(&cb_pos->cb_list);
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index c70333228337..7de13891e49e 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -982,7 +982,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
982 err = request_threaded_irq(pdev->irq, 982 err = request_threaded_irq(pdev->irq,
983 NULL, 983 NULL,
984 mei_interrupt_thread_handler, 984 mei_interrupt_thread_handler,
985 0, mei_driver_name, dev); 985 IRQF_ONESHOT, mei_driver_name, dev);
986 else 986 else
987 err = request_threaded_irq(pdev->irq, 987 err = request_threaded_irq(pdev->irq,
988 mei_interrupt_quick_handler, 988 mei_interrupt_quick_handler,
@@ -992,7 +992,7 @@ static int __devinit mei_probe(struct pci_dev *pdev,
992 if (err) { 992 if (err) {
993 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n", 993 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
994 pdev->irq); 994 pdev->irq);
995 goto unmap_memory; 995 goto disable_msi;
996 } 996 }
997 INIT_DELAYED_WORK(&dev->timer_work, mei_timer); 997 INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
998 if (mei_hw_init(dev)) { 998 if (mei_hw_init(dev)) {
@@ -1023,8 +1023,8 @@ release_irq:
1023 mei_disable_interrupts(dev); 1023 mei_disable_interrupts(dev);
1024 flush_scheduled_work(); 1024 flush_scheduled_work();
1025 free_irq(pdev->irq, dev); 1025 free_irq(pdev->irq, dev);
1026disable_msi:
1026 pci_disable_msi(pdev); 1027 pci_disable_msi(pdev);
1027unmap_memory:
1028 pci_iounmap(pdev, dev->mem_addr); 1028 pci_iounmap(pdev, dev->mem_addr);
1029free_device: 1029free_device:
1030 kfree(dev); 1030 kfree(dev);
@@ -1101,6 +1101,8 @@ static void __devexit mei_remove(struct pci_dev *pdev)
1101 1101
1102 pci_release_regions(pdev); 1102 pci_release_regions(pdev);
1103 pci_disable_device(pdev); 1103 pci_disable_device(pdev);
1104
1105 misc_deregister(&mei_misc_device);
1104} 1106}
1105#ifdef CONFIG_PM 1107#ifdef CONFIG_PM
1106static int mei_pci_suspend(struct device *device) 1108static int mei_pci_suspend(struct device *device)
@@ -1216,7 +1218,6 @@ module_init(mei_init_module);
1216 */ 1218 */
1217static void __exit mei_exit_module(void) 1219static void __exit mei_exit_module(void)
1218{ 1220{
1219 misc_deregister(&mei_misc_device);
1220 pci_unregister_driver(&mei_driver); 1221 pci_unregister_driver(&mei_driver);
1221 1222
1222 pr_debug("unloaded successfully.\n"); 1223 pr_debug("unloaded successfully.\n");
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index 6be5605707b4..e2ec0505eb5c 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -341,7 +341,7 @@ static const struct watchdog_ops wd_ops = {
341}; 341};
342static const struct watchdog_info wd_info = { 342static const struct watchdog_info wd_info = {
343 .identity = INTEL_AMT_WATCHDOG_ID, 343 .identity = INTEL_AMT_WATCHDOG_ID,
344 .options = WDIOF_KEEPALIVEPING, 344 .options = WDIOF_KEEPALIVEPING | WDIOF_ALARMONLY,
345}; 345};
346 346
347static struct watchdog_device amt_wd_dev = { 347static struct watchdog_device amt_wd_dev = {
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 2d4a4b746750..258b203397aa 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1326,7 +1326,7 @@ static int mmc_suspend(struct mmc_host *host)
1326 if (!err) 1326 if (!err)
1327 mmc_card_set_sleep(host->card); 1327 mmc_card_set_sleep(host->card);
1328 } else if (!mmc_host_is_spi(host)) 1328 } else if (!mmc_host_is_spi(host))
1329 mmc_deselect_cards(host); 1329 err = mmc_deselect_cards(host);
1330 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200); 1330 host->card->state &= ~(MMC_STATE_HIGHSPEED | MMC_STATE_HIGHSPEED_200);
1331 mmc_release_host(host); 1331 mmc_release_host(host);
1332 1332
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index c272c6868ecf..b2b43f624b9e 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1075,16 +1075,18 @@ static void mmc_sd_detect(struct mmc_host *host)
1075 */ 1075 */
1076static int mmc_sd_suspend(struct mmc_host *host) 1076static int mmc_sd_suspend(struct mmc_host *host)
1077{ 1077{
1078 int err = 0;
1079
1078 BUG_ON(!host); 1080 BUG_ON(!host);
1079 BUG_ON(!host->card); 1081 BUG_ON(!host->card);
1080 1082
1081 mmc_claim_host(host); 1083 mmc_claim_host(host);
1082 if (!mmc_host_is_spi(host)) 1084 if (!mmc_host_is_spi(host))
1083 mmc_deselect_cards(host); 1085 err = mmc_deselect_cards(host);
1084 host->card->state &= ~MMC_STATE_HIGHSPEED; 1086 host->card->state &= ~MMC_STATE_HIGHSPEED;
1085 mmc_release_host(host); 1087 mmc_release_host(host);
1086 1088
1087 return 0; 1089 return err;
1088} 1090}
1089 1091
1090/* 1092/*
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 13d0e95380ab..41c5fd8848f4 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -218,6 +218,12 @@ static int sdio_enable_wide(struct mmc_card *card)
218 if (ret) 218 if (ret)
219 return ret; 219 return ret;
220 220
221 if ((ctrl & SDIO_BUS_WIDTH_MASK) == SDIO_BUS_WIDTH_RESERVED)
222 pr_warning("%s: SDIO_CCCR_IF is invalid: 0x%02x\n",
223 mmc_hostname(card->host), ctrl);
224
225 /* set as 4-bit bus width */
226 ctrl &= ~SDIO_BUS_WIDTH_MASK;
221 ctrl |= SDIO_BUS_WIDTH_4BIT; 227 ctrl |= SDIO_BUS_WIDTH_4BIT;
222 228
223 ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL); 229 ret = mmc_io_rw_direct(card, 1, 0, SDIO_CCCR_IF, ctrl, NULL);
diff --git a/drivers/mmc/host/atmel-mci-regs.h b/drivers/mmc/host/atmel-mci-regs.h
index 787aba1682bb..ab56f7db5315 100644
--- a/drivers/mmc/host/atmel-mci-regs.h
+++ b/drivers/mmc/host/atmel-mci-regs.h
@@ -140,4 +140,18 @@
140#define atmci_writel(port,reg,value) \ 140#define atmci_writel(port,reg,value) \
141 __raw_writel((value), (port)->regs + reg) 141 __raw_writel((value), (port)->regs + reg)
142 142
143/*
144 * Fix sconfig's burst size according to atmel MCI. We need to convert them as:
145 * 1 -> 0, 4 -> 1, 8 -> 2, 16 -> 3.
146 *
147 * This can be done by finding most significant bit set.
148 */
149static inline unsigned int atmci_convert_chksize(unsigned int maxburst)
150{
151 if (maxburst > 1)
152 return fls(maxburst) - 2;
153 else
154 return 0;
155}
156
143#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */ 157#endif /* __DRIVERS_MMC_ATMEL_MCI_H__ */
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index 420aca642b14..f2c115e06438 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -910,6 +910,7 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
910 enum dma_data_direction direction; 910 enum dma_data_direction direction;
911 enum dma_transfer_direction slave_dirn; 911 enum dma_transfer_direction slave_dirn;
912 unsigned int sglen; 912 unsigned int sglen;
913 u32 maxburst;
913 u32 iflags; 914 u32 iflags;
914 915
915 data->error = -EINPROGRESS; 916 data->error = -EINPROGRESS;
@@ -943,17 +944,18 @@ atmci_prepare_data_dma(struct atmel_mci *host, struct mmc_data *data)
943 if (!chan) 944 if (!chan)
944 return -ENODEV; 945 return -ENODEV;
945 946
946 if (host->caps.has_dma)
947 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(3) | ATMCI_DMAEN);
948
949 if (data->flags & MMC_DATA_READ) { 947 if (data->flags & MMC_DATA_READ) {
950 direction = DMA_FROM_DEVICE; 948 direction = DMA_FROM_DEVICE;
951 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM; 949 host->dma_conf.direction = slave_dirn = DMA_DEV_TO_MEM;
950 maxburst = atmci_convert_chksize(host->dma_conf.src_maxburst);
952 } else { 951 } else {
953 direction = DMA_TO_DEVICE; 952 direction = DMA_TO_DEVICE;
954 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV; 953 host->dma_conf.direction = slave_dirn = DMA_MEM_TO_DEV;
954 maxburst = atmci_convert_chksize(host->dma_conf.dst_maxburst);
955 } 955 }
956 956
957 atmci_writel(host, ATMCI_DMA, ATMCI_DMA_CHKSIZE(maxburst) | ATMCI_DMAEN);
958
957 sglen = dma_map_sg(chan->device->dev, data->sg, 959 sglen = dma_map_sg(chan->device->dev, data->sg,
958 data->sg_len, direction); 960 data->sg_len, direction);
959 961
@@ -2314,6 +2316,8 @@ static int __init atmci_probe(struct platform_device *pdev)
2314 2316
2315 platform_set_drvdata(pdev, host); 2317 platform_set_drvdata(pdev, host);
2316 2318
2319 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2320
2317 /* We need at least one slot to succeed */ 2321 /* We need at least one slot to succeed */
2318 nr_slots = 0; 2322 nr_slots = 0;
2319 ret = -ENODEV; 2323 ret = -ENODEV;
@@ -2352,8 +2356,6 @@ static int __init atmci_probe(struct platform_device *pdev)
2352 } 2356 }
2353 } 2357 }
2354 2358
2355 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2356
2357 dev_info(&pdev->dev, 2359 dev_info(&pdev->dev,
2358 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", 2360 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2359 host->mapbase, irq, nr_slots); 2361 host->mapbase, irq, nr_slots);
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index 9bbf45f8c538..1ca5e72ceb65 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -418,6 +418,8 @@ static int dw_mci_idmac_init(struct dw_mci *host)
418 p->des3 = host->sg_dma; 418 p->des3 = host->sg_dma;
419 p->des0 = IDMAC_DES0_ER; 419 p->des0 = IDMAC_DES0_ER;
420 420
421 mci_writel(host, BMOD, SDMMC_IDMAC_SWRESET);
422
421 /* Mask out interrupts - get Tx & Rx complete only */ 423 /* Mask out interrupts - get Tx & Rx complete only */
422 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI | 424 mci_writel(host, IDINTEN, SDMMC_IDMAC_INT_NI | SDMMC_IDMAC_INT_RI |
423 SDMMC_IDMAC_INT_TI); 425 SDMMC_IDMAC_INT_TI);
@@ -615,14 +617,15 @@ static void dw_mci_setup_bus(struct dw_mci_slot *slot)
615 u32 div; 617 u32 div;
616 618
617 if (slot->clock != host->current_speed) { 619 if (slot->clock != host->current_speed) {
618 if (host->bus_hz % slot->clock) 620 div = host->bus_hz / slot->clock;
621 if (host->bus_hz % slot->clock && host->bus_hz > slot->clock)
619 /* 622 /*
620 * move the + 1 after the divide to prevent 623 * move the + 1 after the divide to prevent
621 * over-clocking the card. 624 * over-clocking the card.
622 */ 625 */
623 div = ((host->bus_hz / slot->clock) >> 1) + 1; 626 div += 1;
624 else 627
625 div = (host->bus_hz / slot->clock) >> 1; 628 div = (host->bus_hz != slot->clock) ? DIV_ROUND_UP(div, 2) : 0;
626 629
627 dev_info(&slot->mmc->class_dev, 630 dev_info(&slot->mmc->class_dev,
628 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ" 631 "Bus speed (slot %d) = %dHz (slot req %dHz, actual %dHZ"
@@ -939,8 +942,8 @@ static void dw_mci_command_complete(struct dw_mci *host, struct mmc_command *cmd
939 mdelay(20); 942 mdelay(20);
940 943
941 if (cmd->data) { 944 if (cmd->data) {
942 host->data = NULL;
943 dw_mci_stop_dma(host); 945 dw_mci_stop_dma(host);
946 host->data = NULL;
944 } 947 }
945 } 948 }
946} 949}
@@ -1623,7 +1626,6 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1623 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) { 1626 if (pending & (SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI)) {
1624 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI); 1627 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_TI | SDMMC_IDMAC_INT_RI);
1625 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI); 1628 mci_writel(host, IDSTS, SDMMC_IDMAC_INT_NI);
1626 set_bit(EVENT_DATA_COMPLETE, &host->pending_events);
1627 host->dma_ops->complete(host); 1629 host->dma_ops->complete(host);
1628 } 1630 }
1629#endif 1631#endif
@@ -1725,7 +1727,8 @@ static void dw_mci_work_routine_card(struct work_struct *work)
1725 1727
1726#ifdef CONFIG_MMC_DW_IDMAC 1728#ifdef CONFIG_MMC_DW_IDMAC
1727 ctrl = mci_readl(host, BMOD); 1729 ctrl = mci_readl(host, BMOD);
1728 ctrl |= 0x01; /* Software reset of DMA */ 1730 /* Software reset of DMA */
1731 ctrl |= SDMMC_IDMAC_SWRESET;
1729 mci_writel(host, BMOD, ctrl); 1732 mci_writel(host, BMOD, ctrl);
1730#endif 1733#endif
1731 1734
@@ -1950,10 +1953,6 @@ int dw_mci_probe(struct dw_mci *host)
1950 spin_lock_init(&host->lock); 1953 spin_lock_init(&host->lock);
1951 INIT_LIST_HEAD(&host->queue); 1954 INIT_LIST_HEAD(&host->queue);
1952 1955
1953
1954 host->dma_ops = host->pdata->dma_ops;
1955 dw_mci_init_dma(host);
1956
1957 /* 1956 /*
1958 * Get the host data width - this assumes that HCON has been set with 1957 * Get the host data width - this assumes that HCON has been set with
1959 * the correct values. 1958 * the correct values.
@@ -1981,10 +1980,11 @@ int dw_mci_probe(struct dw_mci *host)
1981 } 1980 }
1982 1981
1983 /* Reset all blocks */ 1982 /* Reset all blocks */
1984 if (!mci_wait_reset(&host->dev, host)) { 1983 if (!mci_wait_reset(&host->dev, host))
1985 ret = -ENODEV; 1984 return -ENODEV;
1986 goto err_dmaunmap; 1985
1987 } 1986 host->dma_ops = host->pdata->dma_ops;
1987 dw_mci_init_dma(host);
1988 1988
1989 /* Clear the interrupts for the host controller */ 1989 /* Clear the interrupts for the host controller */
1990 mci_writel(host, RINTSTS, 0xFFFFFFFF); 1990 mci_writel(host, RINTSTS, 0xFFFFFFFF);
@@ -2170,14 +2170,14 @@ int dw_mci_resume(struct dw_mci *host)
2170 if (host->vmmc) 2170 if (host->vmmc)
2171 regulator_enable(host->vmmc); 2171 regulator_enable(host->vmmc);
2172 2172
2173 if (host->dma_ops->init)
2174 host->dma_ops->init(host);
2175
2176 if (!mci_wait_reset(&host->dev, host)) { 2173 if (!mci_wait_reset(&host->dev, host)) {
2177 ret = -ENODEV; 2174 ret = -ENODEV;
2178 return ret; 2175 return ret;
2179 } 2176 }
2180 2177
2178 if (host->dma_ops->init)
2179 host->dma_ops->init(host);
2180
2181 /* Restore the old value at FIFOTH register */ 2181 /* Restore the old value at FIFOTH register */
2182 mci_writel(host, FIFOTH, host->fifoth_val); 2182 mci_writel(host, FIFOTH, host->fifoth_val);
2183 2183
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index f0fcce40cd8d..50ff19a62368 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -1216,12 +1216,7 @@ static void mmci_dt_populate_generic_pdata(struct device_node *np,
1216 int bus_width = 0; 1216 int bus_width = 0;
1217 1217
1218 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0); 1218 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
1219 if (!pdata->gpio_wp)
1220 pdata->gpio_wp = -1;
1221
1222 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0); 1219 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
1223 if (!pdata->gpio_cd)
1224 pdata->gpio_cd = -1;
1225 1220
1226 if (of_get_property(np, "cd-inverted", NULL)) 1221 if (of_get_property(np, "cd-inverted", NULL))
1227 pdata->cd_invert = true; 1222 pdata->cd_invert = true;
@@ -1276,6 +1271,12 @@ static int __devinit mmci_probe(struct amba_device *dev,
1276 return -EINVAL; 1271 return -EINVAL;
1277 } 1272 }
1278 1273
1274 if (!plat) {
1275 plat = devm_kzalloc(&dev->dev, sizeof(*plat), GFP_KERNEL);
1276 if (!plat)
1277 return -ENOMEM;
1278 }
1279
1279 if (np) 1280 if (np)
1280 mmci_dt_populate_generic_pdata(np, plat); 1281 mmci_dt_populate_generic_pdata(np, plat);
1281 1282
@@ -1424,6 +1425,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
1424 writel(0, host->base + MMCIMASK1); 1425 writel(0, host->base + MMCIMASK1);
1425 writel(0xfff, host->base + MMCICLEAR); 1426 writel(0xfff, host->base + MMCICLEAR);
1426 1427
1428 if (plat->gpio_cd == -EPROBE_DEFER) {
1429 ret = -EPROBE_DEFER;
1430 goto err_gpio_cd;
1431 }
1427 if (gpio_is_valid(plat->gpio_cd)) { 1432 if (gpio_is_valid(plat->gpio_cd)) {
1428 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 1433 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
1429 if (ret == 0) 1434 if (ret == 0)
@@ -1447,6 +1452,10 @@ static int __devinit mmci_probe(struct amba_device *dev,
1447 if (ret >= 0) 1452 if (ret >= 0)
1448 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd); 1453 host->gpio_cd_irq = gpio_to_irq(plat->gpio_cd);
1449 } 1454 }
1455 if (plat->gpio_wp == -EPROBE_DEFER) {
1456 ret = -EPROBE_DEFER;
1457 goto err_gpio_wp;
1458 }
1450 if (gpio_is_valid(plat->gpio_wp)) { 1459 if (gpio_is_valid(plat->gpio_wp)) {
1451 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)"); 1460 ret = gpio_request(plat->gpio_wp, DRIVER_NAME " (wp)");
1452 if (ret == 0) 1461 if (ret == 0)
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index 34a90266ab11..277161d279b8 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -894,8 +894,8 @@ static struct platform_driver mxs_mmc_driver = {
894 .owner = THIS_MODULE, 894 .owner = THIS_MODULE,
895#ifdef CONFIG_PM 895#ifdef CONFIG_PM
896 .pm = &mxs_mmc_pm_ops, 896 .pm = &mxs_mmc_pm_ops,
897 .of_match_table = mxs_mmc_dt_ids,
898#endif 897#endif
898 .of_match_table = mxs_mmc_dt_ids,
899 }, 899 },
900}; 900};
901 901
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 552196c764d4..3e8dcf8d2e05 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -1300,7 +1300,7 @@ static const struct mmc_host_ops mmc_omap_ops = {
1300 .set_ios = mmc_omap_set_ios, 1300 .set_ios = mmc_omap_set_ios,
1301}; 1301};
1302 1302
1303static int __init mmc_omap_new_slot(struct mmc_omap_host *host, int id) 1303static int __devinit mmc_omap_new_slot(struct mmc_omap_host *host, int id)
1304{ 1304{
1305 struct mmc_omap_slot *slot = NULL; 1305 struct mmc_omap_slot *slot = NULL;
1306 struct mmc_host *mmc; 1306 struct mmc_host *mmc;
@@ -1485,24 +1485,26 @@ static int __devinit mmc_omap_probe(struct platform_device *pdev)
1485 } 1485 }
1486 1486
1487 host->nr_slots = pdata->nr_slots; 1487 host->nr_slots = pdata->nr_slots;
1488 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
1489
1490 host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1491 if (!host->mmc_omap_wq)
1492 goto err_plat_cleanup;
1493
1488 for (i = 0; i < pdata->nr_slots; i++) { 1494 for (i = 0; i < pdata->nr_slots; i++) {
1489 ret = mmc_omap_new_slot(host, i); 1495 ret = mmc_omap_new_slot(host, i);
1490 if (ret < 0) { 1496 if (ret < 0) {
1491 while (--i >= 0) 1497 while (--i >= 0)
1492 mmc_omap_remove_slot(host->slots[i]); 1498 mmc_omap_remove_slot(host->slots[i]);
1493 1499
1494 goto err_plat_cleanup; 1500 goto err_destroy_wq;
1495 } 1501 }
1496 } 1502 }
1497 1503
1498 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
1499
1500 host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1501 if (!host->mmc_omap_wq)
1502 goto err_plat_cleanup;
1503
1504 return 0; 1504 return 0;
1505 1505
1506err_destroy_wq:
1507 destroy_workqueue(host->mmc_omap_wq);
1506err_plat_cleanup: 1508err_plat_cleanup:
1507 if (pdata->cleanup) 1509 if (pdata->cleanup)
1508 pdata->cleanup(&pdev->dev); 1510 pdata->cleanup(&pdev->dev);
diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
index 55a164fcaa15..a50c205ea208 100644
--- a/drivers/mmc/host/sdhci-s3c.c
+++ b/drivers/mmc/host/sdhci-s3c.c
@@ -404,7 +404,7 @@ static void sdhci_s3c_setup_card_detect_gpio(struct sdhci_s3c *sc)
404 if (sc->ext_cd_irq && 404 if (sc->ext_cd_irq &&
405 request_threaded_irq(sc->ext_cd_irq, NULL, 405 request_threaded_irq(sc->ext_cd_irq, NULL,
406 sdhci_s3c_gpio_card_detect_thread, 406 sdhci_s3c_gpio_card_detect_thread,
407 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 407 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
408 dev_name(dev), sc) == 0) { 408 dev_name(dev), sc) == 0) {
409 int status = gpio_get_value(sc->ext_cd_gpio); 409 int status = gpio_get_value(sc->ext_cd_gpio);
410 if (pdata->ext_cd_gpio_invert) 410 if (pdata->ext_cd_gpio_invert)
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 1fe32dfa7cd4..423da8194cd8 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -4,7 +4,7 @@
4 * Support of SDHCI platform devices for spear soc family 4 * Support of SDHCI platform devices for spear soc family
5 * 5 *
6 * Copyright (C) 2010 ST Microelectronics 6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * Inspired by sdhci-pltfm.c 9 * Inspired by sdhci-pltfm.c
10 * 10 *
@@ -289,5 +289,5 @@ static struct platform_driver sdhci_driver = {
289module_platform_driver(sdhci_driver); 289module_platform_driver(sdhci_driver);
290 290
291MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver"); 291MODULE_DESCRIPTION("SPEAr Secure Digital Host Controller Interface driver");
292MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 292MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
293MODULE_LICENSE("GPL v2"); 293MODULE_LICENSE("GPL v2");
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index e626732aff77..f4b8b4db3a9a 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
680 } 680 }
681 681
682 if (count >= 0xF) { 682 if (count >= 0xF) {
683 pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n", 683 DBG("%s: Too large timeout 0x%x requested for CMD%d!\n",
684 mmc_hostname(host->mmc), count, cmd->opcode); 684 mmc_hostname(host->mmc), count, cmd->opcode);
685 count = 0xE; 685 count = 0xE;
686 } 686 }
687 687
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index 5760c1a4b3f6..27143e042af5 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -128,7 +128,7 @@ config MTD_AFS_PARTS
128 128
129config MTD_OF_PARTS 129config MTD_OF_PARTS
130 tristate "OpenFirmware partitioning information support" 130 tristate "OpenFirmware partitioning information support"
131 default Y 131 default y
132 depends on OF 132 depends on OF
133 help 133 help
134 This provides a partition parsing function which derives 134 This provides a partition parsing function which derives
diff --git a/drivers/mtd/bcm63xxpart.c b/drivers/mtd/bcm63xxpart.c
index 608321ee056e..63d2a64331f7 100644
--- a/drivers/mtd/bcm63xxpart.c
+++ b/drivers/mtd/bcm63xxpart.c
@@ -4,7 +4,7 @@
4 * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org> 4 * Copyright © 2006-2008 Florian Fainelli <florian@openwrt.org>
5 * Mike Albon <malbon@openwrt.org> 5 * Mike Albon <malbon@openwrt.org>
6 * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net> 6 * Copyright © 2009-2010 Daniel Dickinson <openwrt@cshore.neomailbox.net>
7 * Copyright © 2011 Jonas Gorski <jonas.gorski@gmail.com> 7 * Copyright © 2011-2012 Jonas Gorski <jonas.gorski@gmail.com>
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -82,6 +82,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
82 int namelen = 0; 82 int namelen = 0;
83 int i; 83 int i;
84 u32 computed_crc; 84 u32 computed_crc;
85 bool rootfs_first = false;
85 86
86 if (bcm63xx_detect_cfe(master)) 87 if (bcm63xx_detect_cfe(master))
87 return -EINVAL; 88 return -EINVAL;
@@ -109,6 +110,7 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
109 char *boardid = &(buf->board_id[0]); 110 char *boardid = &(buf->board_id[0]);
110 char *tagversion = &(buf->tag_version[0]); 111 char *tagversion = &(buf->tag_version[0]);
111 112
113 sscanf(buf->flash_image_start, "%u", &rootfsaddr);
112 sscanf(buf->kernel_address, "%u", &kerneladdr); 114 sscanf(buf->kernel_address, "%u", &kerneladdr);
113 sscanf(buf->kernel_length, "%u", &kernellen); 115 sscanf(buf->kernel_length, "%u", &kernellen);
114 sscanf(buf->total_length, "%u", &totallen); 116 sscanf(buf->total_length, "%u", &totallen);
@@ -117,10 +119,19 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
117 tagversion, boardid); 119 tagversion, boardid);
118 120
119 kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE; 121 kerneladdr = kerneladdr - BCM63XX_EXTENDED_SIZE;
120 rootfsaddr = kerneladdr + kernellen; 122 rootfsaddr = rootfsaddr - BCM63XX_EXTENDED_SIZE;
121 spareaddr = roundup(totallen, master->erasesize) + cfelen; 123 spareaddr = roundup(totallen, master->erasesize) + cfelen;
122 sparelen = master->size - spareaddr - nvramlen; 124 sparelen = master->size - spareaddr - nvramlen;
123 rootfslen = spareaddr - rootfsaddr; 125
126 if (rootfsaddr < kerneladdr) {
127 /* default Broadcom layout */
128 rootfslen = kerneladdr - rootfsaddr;
129 rootfs_first = true;
130 } else {
131 /* OpenWrt layout */
132 rootfsaddr = kerneladdr + kernellen;
133 rootfslen = spareaddr - rootfsaddr;
134 }
124 } else { 135 } else {
125 pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n", 136 pr_warn("CFE boot tag CRC invalid (expected %08x, actual %08x)\n",
126 buf->header_crc, computed_crc); 137 buf->header_crc, computed_crc);
@@ -156,18 +167,26 @@ static int bcm63xx_parse_cfe_partitions(struct mtd_info *master,
156 curpart++; 167 curpart++;
157 168
158 if (kernellen > 0) { 169 if (kernellen > 0) {
159 parts[curpart].name = "kernel"; 170 int kernelpart = curpart;
160 parts[curpart].offset = kerneladdr; 171
161 parts[curpart].size = kernellen; 172 if (rootfslen > 0 && rootfs_first)
173 kernelpart++;
174 parts[kernelpart].name = "kernel";
175 parts[kernelpart].offset = kerneladdr;
176 parts[kernelpart].size = kernellen;
162 curpart++; 177 curpart++;
163 } 178 }
164 179
165 if (rootfslen > 0) { 180 if (rootfslen > 0) {
166 parts[curpart].name = "rootfs"; 181 int rootfspart = curpart;
167 parts[curpart].offset = rootfsaddr; 182
168 parts[curpart].size = rootfslen; 183 if (kernellen > 0 && rootfs_first)
169 if (sparelen > 0) 184 rootfspart--;
170 parts[curpart].size += sparelen; 185 parts[rootfspart].name = "rootfs";
186 parts[rootfspart].offset = rootfsaddr;
187 parts[rootfspart].size = rootfslen;
188 if (sparelen > 0 && !rootfs_first)
189 parts[rootfspart].size += sparelen;
171 curpart++; 190 curpart++;
172 } 191 }
173 192
diff --git a/drivers/mtd/chips/cfi_cmdset_0002.c b/drivers/mtd/chips/cfi_cmdset_0002.c
index d02592e6a0f0..22d0493a026f 100644
--- a/drivers/mtd/chips/cfi_cmdset_0002.c
+++ b/drivers/mtd/chips/cfi_cmdset_0002.c
@@ -317,7 +317,7 @@ static void fixup_s29gl064n_sectors(struct mtd_info *mtd)
317 317
318 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) { 318 if ((cfi->cfiq->EraseRegionInfo[0] & 0xffff) == 0x003f) {
319 cfi->cfiq->EraseRegionInfo[0] |= 0x0040; 319 cfi->cfiq->EraseRegionInfo[0] |= 0x0040;
320 pr_warning("%s: Bad S29GL064N CFI data, adjust from 64 to 128 sectors\n", mtd->name); 320 pr_warning("%s: Bad S29GL064N CFI data; adjust from 64 to 128 sectors\n", mtd->name);
321 } 321 }
322} 322}
323 323
@@ -328,10 +328,23 @@ static void fixup_s29gl032n_sectors(struct mtd_info *mtd)
328 328
329 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) { 329 if ((cfi->cfiq->EraseRegionInfo[1] & 0xffff) == 0x007e) {
330 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040; 330 cfi->cfiq->EraseRegionInfo[1] &= ~0x0040;
331 pr_warning("%s: Bad S29GL032N CFI data, adjust from 127 to 63 sectors\n", mtd->name); 331 pr_warning("%s: Bad S29GL032N CFI data; adjust from 127 to 63 sectors\n", mtd->name);
332 } 332 }
333} 333}
334 334
335static void fixup_s29ns512p_sectors(struct mtd_info *mtd)
336{
337 struct map_info *map = mtd->priv;
338 struct cfi_private *cfi = map->fldrv_priv;
339
340 /*
341 * S29NS512P flash uses more than 8bits to report number of sectors,
342 * which is not permitted by CFI.
343 */
344 cfi->cfiq->EraseRegionInfo[0] = 0x020001ff;
345 pr_warning("%s: Bad S29NS512P CFI data; adjust to 512 sectors\n", mtd->name);
346}
347
335/* Used to fix CFI-Tables of chips without Extended Query Tables */ 348/* Used to fix CFI-Tables of chips without Extended Query Tables */
336static struct cfi_fixup cfi_nopri_fixup_table[] = { 349static struct cfi_fixup cfi_nopri_fixup_table[] = {
337 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */ 350 { CFI_MFR_SST, 0x234a, fixup_sst39vf }, /* SST39VF1602 */
@@ -362,6 +375,7 @@ static struct cfi_fixup cfi_fixup_table[] = {
362 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors }, 375 { CFI_MFR_AMD, 0x1301, fixup_s29gl064n_sectors },
363 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors }, 376 { CFI_MFR_AMD, 0x1a00, fixup_s29gl032n_sectors },
364 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors }, 377 { CFI_MFR_AMD, 0x1a01, fixup_s29gl032n_sectors },
378 { CFI_MFR_AMD, 0x3f00, fixup_s29ns512p_sectors },
365 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */ 379 { CFI_MFR_SST, 0x536a, fixup_sst38vf640x_sectorsize }, /* SST38VF6402 */
366 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */ 380 { CFI_MFR_SST, 0x536b, fixup_sst38vf640x_sectorsize }, /* SST38VF6401 */
367 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */ 381 { CFI_MFR_SST, 0x536c, fixup_sst38vf640x_sectorsize }, /* SST38VF6404 */
diff --git a/drivers/mtd/cmdlinepart.c b/drivers/mtd/cmdlinepart.c
index ddf9ec6d9168..4558e0f4d07f 100644
--- a/drivers/mtd/cmdlinepart.c
+++ b/drivers/mtd/cmdlinepart.c
@@ -70,7 +70,7 @@ struct cmdline_mtd_partition {
70/* mtdpart_setup() parses into here */ 70/* mtdpart_setup() parses into here */
71static struct cmdline_mtd_partition *partitions; 71static struct cmdline_mtd_partition *partitions;
72 72
73/* the command line passed to mtdpart_setupd() */ 73/* the command line passed to mtdpart_setup() */
74static char *cmdline; 74static char *cmdline;
75static int cmdline_parsed = 0; 75static int cmdline_parsed = 0;
76 76
diff --git a/drivers/mtd/devices/block2mtd.c b/drivers/mtd/devices/block2mtd.c
index a4a80b742e65..681e2ee0f2d6 100644
--- a/drivers/mtd/devices/block2mtd.c
+++ b/drivers/mtd/devices/block2mtd.c
@@ -52,8 +52,6 @@ static int _block2mtd_erase(struct block2mtd_dev *dev, loff_t to, size_t len)
52 52
53 while (pages) { 53 while (pages) {
54 page = page_read(mapping, index); 54 page = page_read(mapping, index);
55 if (!page)
56 return -ENOMEM;
57 if (IS_ERR(page)) 55 if (IS_ERR(page))
58 return PTR_ERR(page); 56 return PTR_ERR(page);
59 57
@@ -112,8 +110,6 @@ static int block2mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
112 len = len - cpylen; 110 len = len - cpylen;
113 111
114 page = page_read(dev->blkdev->bd_inode->i_mapping, index); 112 page = page_read(dev->blkdev->bd_inode->i_mapping, index);
115 if (!page)
116 return -ENOMEM;
117 if (IS_ERR(page)) 113 if (IS_ERR(page))
118 return PTR_ERR(page); 114 return PTR_ERR(page);
119 115
@@ -148,8 +144,6 @@ static int _block2mtd_write(struct block2mtd_dev *dev, const u_char *buf,
148 len = len - cpylen; 144 len = len - cpylen;
149 145
150 page = page_read(mapping, index); 146 page = page_read(mapping, index);
151 if (!page)
152 return -ENOMEM;
153 if (IS_ERR(page)) 147 if (IS_ERR(page))
154 return PTR_ERR(page); 148 return PTR_ERR(page);
155 149
@@ -271,7 +265,6 @@ static struct block2mtd_dev *add_device(char *devname, int erase_size)
271 dev->mtd.flags = MTD_CAP_RAM; 265 dev->mtd.flags = MTD_CAP_RAM;
272 dev->mtd._erase = block2mtd_erase; 266 dev->mtd._erase = block2mtd_erase;
273 dev->mtd._write = block2mtd_write; 267 dev->mtd._write = block2mtd_write;
274 dev->mtd._writev = mtd_writev;
275 dev->mtd._sync = block2mtd_sync; 268 dev->mtd._sync = block2mtd_sync;
276 dev->mtd._read = block2mtd_read; 269 dev->mtd._read = block2mtd_read;
277 dev->mtd.priv = dev; 270 dev->mtd.priv = dev;
diff --git a/drivers/mtd/devices/docg3.c b/drivers/mtd/devices/docg3.c
index 50aa90aa7a7f..f70854d728fe 100644
--- a/drivers/mtd/devices/docg3.c
+++ b/drivers/mtd/devices/docg3.c
@@ -227,7 +227,7 @@ static void doc_read_data_area(struct docg3 *docg3, void *buf, int len,
227 u8 data8, *dst8; 227 u8 data8, *dst8;
228 228
229 doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len); 229 doc_dbg("doc_read_data_area(buf=%p, len=%d)\n", buf, len);
230 cdr = len & 0x3; 230 cdr = len & 0x1;
231 len4 = len - cdr; 231 len4 = len - cdr;
232 232
233 if (first) 233 if (first)
@@ -732,12 +732,24 @@ err:
732 * @len: the number of bytes to be read (must be a multiple of 4) 732 * @len: the number of bytes to be read (must be a multiple of 4)
733 * @buf: the buffer to be filled in (or NULL is forget bytes) 733 * @buf: the buffer to be filled in (or NULL is forget bytes)
734 * @first: 1 if first time read, DOC_READADDRESS should be set 734 * @first: 1 if first time read, DOC_READADDRESS should be set
735 * @last_odd: 1 if last read ended up on an odd byte
736 *
737 * Reads bytes from a prepared page. There is a trickery here : if the last read
738 * ended up on an odd offset in the 1024 bytes double page, ie. between the 2
739 * planes, the first byte must be read apart. If a word (16bit) read was used,
740 * the read would return the byte of plane 2 as low *and* high endian, which
741 * will mess the read.
735 * 742 *
736 */ 743 */
737static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf, 744static int doc_read_page_getbytes(struct docg3 *docg3, int len, u_char *buf,
738 int first) 745 int first, int last_odd)
739{ 746{
740 doc_read_data_area(docg3, buf, len, first); 747 if (last_odd && len > 0) {
748 doc_read_data_area(docg3, buf, 1, first);
749 doc_read_data_area(docg3, buf ? buf + 1 : buf, len - 1, 0);
750 } else {
751 doc_read_data_area(docg3, buf, len, first);
752 }
741 doc_delay(docg3, 2); 753 doc_delay(docg3, 2);
742 return len; 754 return len;
743} 755}
@@ -850,6 +862,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
850 u8 *buf = ops->datbuf; 862 u8 *buf = ops->datbuf;
851 size_t len, ooblen, nbdata, nboob; 863 size_t len, ooblen, nbdata, nboob;
852 u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1; 864 u8 hwecc[DOC_ECC_BCH_SIZE], eccconf1;
865 int max_bitflips = 0;
853 866
854 if (buf) 867 if (buf)
855 len = ops->len; 868 len = ops->len;
@@ -876,7 +889,7 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
876 ret = 0; 889 ret = 0;
877 skip = from % DOC_LAYOUT_PAGE_SIZE; 890 skip = from % DOC_LAYOUT_PAGE_SIZE;
878 mutex_lock(&docg3->cascade->lock); 891 mutex_lock(&docg3->cascade->lock);
879 while (!ret && (len > 0 || ooblen > 0)) { 892 while (ret >= 0 && (len > 0 || ooblen > 0)) {
880 calc_block_sector(from - skip, &block0, &block1, &page, &ofs, 893 calc_block_sector(from - skip, &block0, &block1, &page, &ofs,
881 docg3->reliable); 894 docg3->reliable);
882 nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip); 895 nbdata = min_t(size_t, len, DOC_LAYOUT_PAGE_SIZE - skip);
@@ -887,20 +900,20 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
887 ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES); 900 ret = doc_read_page_ecc_init(docg3, DOC_ECC_BCH_TOTAL_BYTES);
888 if (ret < 0) 901 if (ret < 0)
889 goto err_in_read; 902 goto err_in_read;
890 ret = doc_read_page_getbytes(docg3, skip, NULL, 1); 903 ret = doc_read_page_getbytes(docg3, skip, NULL, 1, 0);
891 if (ret < skip) 904 if (ret < skip)
892 goto err_in_read; 905 goto err_in_read;
893 ret = doc_read_page_getbytes(docg3, nbdata, buf, 0); 906 ret = doc_read_page_getbytes(docg3, nbdata, buf, 0, skip % 2);
894 if (ret < nbdata) 907 if (ret < nbdata)
895 goto err_in_read; 908 goto err_in_read;
896 doc_read_page_getbytes(docg3, 909 doc_read_page_getbytes(docg3,
897 DOC_LAYOUT_PAGE_SIZE - nbdata - skip, 910 DOC_LAYOUT_PAGE_SIZE - nbdata - skip,
898 NULL, 0); 911 NULL, 0, (skip + nbdata) % 2);
899 ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0); 912 ret = doc_read_page_getbytes(docg3, nboob, oobbuf, 0, 0);
900 if (ret < nboob) 913 if (ret < nboob)
901 goto err_in_read; 914 goto err_in_read;
902 doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob, 915 doc_read_page_getbytes(docg3, DOC_LAYOUT_OOB_SIZE - nboob,
903 NULL, 0); 916 NULL, 0, nboob % 2);
904 917
905 doc_get_bch_hw_ecc(docg3, hwecc); 918 doc_get_bch_hw_ecc(docg3, hwecc);
906 eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1); 919 eccconf1 = doc_register_readb(docg3, DOC_ECCCONF1);
@@ -936,7 +949,8 @@ static int doc_read_oob(struct mtd_info *mtd, loff_t from,
936 } 949 }
937 if (ret > 0) { 950 if (ret > 0) {
938 mtd->ecc_stats.corrected += ret; 951 mtd->ecc_stats.corrected += ret;
939 ret = -EUCLEAN; 952 max_bitflips = max(max_bitflips, ret);
953 ret = max_bitflips;
940 } 954 }
941 } 955 }
942 956
@@ -1004,7 +1018,7 @@ static int doc_reload_bbt(struct docg3 *docg3)
1004 DOC_LAYOUT_PAGE_SIZE); 1018 DOC_LAYOUT_PAGE_SIZE);
1005 if (!ret) 1019 if (!ret)
1006 doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE, 1020 doc_read_page_getbytes(docg3, DOC_LAYOUT_PAGE_SIZE,
1007 buf, 1); 1021 buf, 1, 0);
1008 buf += DOC_LAYOUT_PAGE_SIZE; 1022 buf += DOC_LAYOUT_PAGE_SIZE;
1009 } 1023 }
1010 doc_read_page_finish(docg3); 1024 doc_read_page_finish(docg3);
@@ -1064,10 +1078,10 @@ static int doc_get_erase_count(struct docg3 *docg3, loff_t from)
1064 ret = doc_reset_seq(docg3); 1078 ret = doc_reset_seq(docg3);
1065 if (!ret) 1079 if (!ret)
1066 ret = doc_read_page_prepare(docg3, block0, block1, page, 1080 ret = doc_read_page_prepare(docg3, block0, block1, page,
1067 ofs + DOC_LAYOUT_WEAR_OFFSET); 1081 ofs + DOC_LAYOUT_WEAR_OFFSET, 0);
1068 if (!ret) 1082 if (!ret)
1069 ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE, 1083 ret = doc_read_page_getbytes(docg3, DOC_LAYOUT_WEAR_SIZE,
1070 buf, 1); 1084 buf, 1, 0);
1071 doc_read_page_finish(docg3); 1085 doc_read_page_finish(docg3);
1072 1086
1073 if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK)) 1087 if (ret || (buf[0] != DOC_ERASE_MARK) || (buf[2] != DOC_ERASE_MARK))
diff --git a/drivers/mtd/devices/m25p80.c b/drivers/mtd/devices/m25p80.c
index 1924d247c1cb..5d0d68c3fe27 100644
--- a/drivers/mtd/devices/m25p80.c
+++ b/drivers/mtd/devices/m25p80.c
@@ -639,12 +639,16 @@ static const struct spi_device_id m25p_ids[] = {
639 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, 639 { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) },
640 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, 640 { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) },
641 641
642 /* Everspin */
643 { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2) },
644
642 /* Intel/Numonyx -- xxxs33b */ 645 /* Intel/Numonyx -- xxxs33b */
643 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, 646 { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) },
644 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, 647 { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) },
645 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, 648 { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) },
646 649
647 /* Macronix */ 650 /* Macronix */
651 { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) },
648 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, 652 { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) },
649 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, 653 { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) },
650 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) }, 654 { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) },
@@ -728,6 +732,7 @@ static const struct spi_device_id m25p_ids[] = {
728 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, 732 { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) },
729 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, 733 { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) },
730 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, 734 { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) },
735 { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) },
731 736
732 /* Catalyst / On Semiconductor -- non-JEDEC */ 737 /* Catalyst / On Semiconductor -- non-JEDEC */
733 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) }, 738 { "cat25c11", CAT25_INFO( 16, 8, 16, 1) },
diff --git a/drivers/mtd/devices/spear_smi.c b/drivers/mtd/devices/spear_smi.c
index 797d43cd3550..67960362681e 100644
--- a/drivers/mtd/devices/spear_smi.c
+++ b/drivers/mtd/devices/spear_smi.c
@@ -990,9 +990,9 @@ static int __devinit spear_smi_probe(struct platform_device *pdev)
990 goto err_clk; 990 goto err_clk;
991 } 991 }
992 992
993 ret = clk_enable(dev->clk); 993 ret = clk_prepare_enable(dev->clk);
994 if (ret) 994 if (ret)
995 goto err_clk_enable; 995 goto err_clk_prepare_enable;
996 996
997 ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev); 997 ret = request_irq(irq, spear_smi_int_handler, 0, pdev->name, dev);
998 if (ret) { 998 if (ret) {
@@ -1020,8 +1020,8 @@ err_bank_setup:
1020 free_irq(irq, dev); 1020 free_irq(irq, dev);
1021 platform_set_drvdata(pdev, NULL); 1021 platform_set_drvdata(pdev, NULL);
1022err_irq: 1022err_irq:
1023 clk_disable(dev->clk); 1023 clk_disable_unprepare(dev->clk);
1024err_clk_enable: 1024err_clk_prepare_enable:
1025 clk_put(dev->clk); 1025 clk_put(dev->clk);
1026err_clk: 1026err_clk:
1027 iounmap(dev->io_base); 1027 iounmap(dev->io_base);
@@ -1074,7 +1074,7 @@ static int __devexit spear_smi_remove(struct platform_device *pdev)
1074 irq = platform_get_irq(pdev, 0); 1074 irq = platform_get_irq(pdev, 0);
1075 free_irq(irq, dev); 1075 free_irq(irq, dev);
1076 1076
1077 clk_disable(dev->clk); 1077 clk_disable_unprepare(dev->clk);
1078 clk_put(dev->clk); 1078 clk_put(dev->clk);
1079 iounmap(dev->io_base); 1079 iounmap(dev->io_base);
1080 kfree(dev); 1080 kfree(dev);
@@ -1091,7 +1091,7 @@ int spear_smi_suspend(struct platform_device *pdev, pm_message_t state)
1091 struct spear_smi *dev = platform_get_drvdata(pdev); 1091 struct spear_smi *dev = platform_get_drvdata(pdev);
1092 1092
1093 if (dev && dev->clk) 1093 if (dev && dev->clk)
1094 clk_disable(dev->clk); 1094 clk_disable_unprepare(dev->clk);
1095 1095
1096 return 0; 1096 return 0;
1097} 1097}
@@ -1102,7 +1102,7 @@ int spear_smi_resume(struct platform_device *pdev)
1102 int ret = -EPERM; 1102 int ret = -EPERM;
1103 1103
1104 if (dev && dev->clk) 1104 if (dev && dev->clk)
1105 ret = clk_enable(dev->clk); 1105 ret = clk_prepare_enable(dev->clk);
1106 1106
1107 if (!ret) 1107 if (!ret)
1108 spear_smi_hw_init(dev); 1108 spear_smi_hw_init(dev);
diff --git a/drivers/mtd/lpddr/qinfo_probe.c b/drivers/mtd/lpddr/qinfo_probe.c
index dbfe17baf046..45abed67f1ef 100644
--- a/drivers/mtd/lpddr/qinfo_probe.c
+++ b/drivers/mtd/lpddr/qinfo_probe.c
@@ -57,7 +57,7 @@ static struct qinfo_query_info qinfo_array[] = {
57 57
58static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str) 58static long lpddr_get_qinforec_pos(struct map_info *map, char *id_str)
59{ 59{
60 int qinfo_lines = sizeof(qinfo_array)/sizeof(struct qinfo_query_info); 60 int qinfo_lines = ARRAY_SIZE(qinfo_array);
61 int i; 61 int i;
62 int bankwidth = map_bankwidth(map) * 8; 62 int bankwidth = map_bankwidth(map) * 8;
63 int major, minor; 63 int major, minor;
diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig
index 8af67cfd671a..5ba2458e799a 100644
--- a/drivers/mtd/maps/Kconfig
+++ b/drivers/mtd/maps/Kconfig
@@ -224,7 +224,7 @@ config MTD_CK804XROM
224 224
225config MTD_SCB2_FLASH 225config MTD_SCB2_FLASH
226 tristate "BIOS flash chip on Intel SCB2 boards" 226 tristate "BIOS flash chip on Intel SCB2 boards"
227 depends on X86 && MTD_JEDECPROBE 227 depends on X86 && MTD_JEDECPROBE && PCI
228 help 228 help
229 Support for treating the BIOS flash chip on Intel SCB2 boards 229 Support for treating the BIOS flash chip on Intel SCB2 boards
230 as an MTD device - with this you can reprogram your BIOS. 230 as an MTD device - with this you can reprogram your BIOS.
diff --git a/drivers/mtd/maps/intel_vr_nor.c b/drivers/mtd/maps/intel_vr_nor.c
index 92e1f41634c7..93f03175c82d 100644
--- a/drivers/mtd/maps/intel_vr_nor.c
+++ b/drivers/mtd/maps/intel_vr_nor.c
@@ -260,18 +260,7 @@ static struct pci_driver vr_nor_pci_driver = {
260 .id_table = vr_nor_pci_ids, 260 .id_table = vr_nor_pci_ids,
261}; 261};
262 262
263static int __init vr_nor_mtd_init(void) 263module_pci_driver(vr_nor_pci_driver);
264{
265 return pci_register_driver(&vr_nor_pci_driver);
266}
267
268static void __exit vr_nor_mtd_exit(void)
269{
270 pci_unregister_driver(&vr_nor_pci_driver);
271}
272
273module_init(vr_nor_mtd_init);
274module_exit(vr_nor_mtd_exit);
275 264
276MODULE_AUTHOR("Andy Lowe"); 265MODULE_AUTHOR("Andy Lowe");
277MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range"); 266MODULE_DESCRIPTION("MTD map driver for NOR flash on Intel Vermilion Range");
diff --git a/drivers/mtd/maps/pci.c b/drivers/mtd/maps/pci.c
index 1d005a3e9b41..f14ce0af763f 100644
--- a/drivers/mtd/maps/pci.c
+++ b/drivers/mtd/maps/pci.c
@@ -352,18 +352,7 @@ static struct pci_driver mtd_pci_driver = {
352 .id_table = mtd_pci_ids, 352 .id_table = mtd_pci_ids,
353}; 353};
354 354
355static int __init mtd_pci_maps_init(void) 355module_pci_driver(mtd_pci_driver);
356{
357 return pci_register_driver(&mtd_pci_driver);
358}
359
360static void __exit mtd_pci_maps_exit(void)
361{
362 pci_unregister_driver(&mtd_pci_driver);
363}
364
365module_init(mtd_pci_maps_init);
366module_exit(mtd_pci_maps_exit);
367 356
368MODULE_LICENSE("GPL"); 357MODULE_LICENSE("GPL");
369MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); 358MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>");
diff --git a/drivers/mtd/maps/scb2_flash.c b/drivers/mtd/maps/scb2_flash.c
index 934a72c80078..9dcbc684abdb 100644
--- a/drivers/mtd/maps/scb2_flash.c
+++ b/drivers/mtd/maps/scb2_flash.c
@@ -234,20 +234,7 @@ static struct pci_driver scb2_flash_driver = {
234 .remove = __devexit_p(scb2_flash_remove), 234 .remove = __devexit_p(scb2_flash_remove),
235}; 235};
236 236
237static int __init 237module_pci_driver(scb2_flash_driver);
238scb2_flash_init(void)
239{
240 return pci_register_driver(&scb2_flash_driver);
241}
242
243static void __exit
244scb2_flash_exit(void)
245{
246 pci_unregister_driver(&scb2_flash_driver);
247}
248
249module_init(scb2_flash_init);
250module_exit(scb2_flash_exit);
251 238
252MODULE_LICENSE("GPL"); 239MODULE_LICENSE("GPL");
253MODULE_AUTHOR("Tim Hockin <thockin@sun.com>"); 240MODULE_AUTHOR("Tim Hockin <thockin@sun.com>");
diff --git a/drivers/mtd/maps/wr_sbc82xx_flash.c b/drivers/mtd/maps/wr_sbc82xx_flash.c
index 71b0ba797912..e7534c82f93a 100644
--- a/drivers/mtd/maps/wr_sbc82xx_flash.c
+++ b/drivers/mtd/maps/wr_sbc82xx_flash.c
@@ -59,7 +59,7 @@ static struct mtd_partition bigflash_parts[] = {
59 } 59 }
60}; 60};
61 61
62static const char *part_probes[] __initdata = {"cmdlinepart", "RedBoot", NULL}; 62static const char *part_probes[] __initconst = {"cmdlinepart", "RedBoot", NULL};
63 63
64#define init_sbc82xx_one_flash(map, br, or) \ 64#define init_sbc82xx_one_flash(map, br, or) \
65do { \ 65do { \
diff --git a/drivers/mtd/mtdcore.c b/drivers/mtd/mtdcore.c
index c837507dfb1c..575730744fdb 100644
--- a/drivers/mtd/mtdcore.c
+++ b/drivers/mtd/mtdcore.c
@@ -250,6 +250,43 @@ static ssize_t mtd_name_show(struct device *dev,
250} 250}
251static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL); 251static DEVICE_ATTR(name, S_IRUGO, mtd_name_show, NULL);
252 252
253static ssize_t mtd_ecc_strength_show(struct device *dev,
254 struct device_attribute *attr, char *buf)
255{
256 struct mtd_info *mtd = dev_get_drvdata(dev);
257
258 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->ecc_strength);
259}
260static DEVICE_ATTR(ecc_strength, S_IRUGO, mtd_ecc_strength_show, NULL);
261
262static ssize_t mtd_bitflip_threshold_show(struct device *dev,
263 struct device_attribute *attr,
264 char *buf)
265{
266 struct mtd_info *mtd = dev_get_drvdata(dev);
267
268 return snprintf(buf, PAGE_SIZE, "%u\n", mtd->bitflip_threshold);
269}
270
271static ssize_t mtd_bitflip_threshold_store(struct device *dev,
272 struct device_attribute *attr,
273 const char *buf, size_t count)
274{
275 struct mtd_info *mtd = dev_get_drvdata(dev);
276 unsigned int bitflip_threshold;
277 int retval;
278
279 retval = kstrtouint(buf, 0, &bitflip_threshold);
280 if (retval)
281 return retval;
282
283 mtd->bitflip_threshold = bitflip_threshold;
284 return count;
285}
286static DEVICE_ATTR(bitflip_threshold, S_IRUGO | S_IWUSR,
287 mtd_bitflip_threshold_show,
288 mtd_bitflip_threshold_store);
289
253static struct attribute *mtd_attrs[] = { 290static struct attribute *mtd_attrs[] = {
254 &dev_attr_type.attr, 291 &dev_attr_type.attr,
255 &dev_attr_flags.attr, 292 &dev_attr_flags.attr,
@@ -260,6 +297,8 @@ static struct attribute *mtd_attrs[] = {
260 &dev_attr_oobsize.attr, 297 &dev_attr_oobsize.attr,
261 &dev_attr_numeraseregions.attr, 298 &dev_attr_numeraseregions.attr,
262 &dev_attr_name.attr, 299 &dev_attr_name.attr,
300 &dev_attr_ecc_strength.attr,
301 &dev_attr_bitflip_threshold.attr,
263 NULL, 302 NULL,
264}; 303};
265 304
@@ -322,6 +361,10 @@ int add_mtd_device(struct mtd_info *mtd)
322 mtd->index = i; 361 mtd->index = i;
323 mtd->usecount = 0; 362 mtd->usecount = 0;
324 363
364 /* default value if not set by driver */
365 if (mtd->bitflip_threshold == 0)
366 mtd->bitflip_threshold = mtd->ecc_strength;
367
325 if (is_power_of_2(mtd->erasesize)) 368 if (is_power_of_2(mtd->erasesize))
326 mtd->erasesize_shift = ffs(mtd->erasesize) - 1; 369 mtd->erasesize_shift = ffs(mtd->erasesize) - 1;
327 else 370 else
@@ -757,12 +800,24 @@ EXPORT_SYMBOL_GPL(mtd_get_unmapped_area);
757int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, 800int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
758 u_char *buf) 801 u_char *buf)
759{ 802{
803 int ret_code;
760 *retlen = 0; 804 *retlen = 0;
761 if (from < 0 || from > mtd->size || len > mtd->size - from) 805 if (from < 0 || from > mtd->size || len > mtd->size - from)
762 return -EINVAL; 806 return -EINVAL;
763 if (!len) 807 if (!len)
764 return 0; 808 return 0;
765 return mtd->_read(mtd, from, len, retlen, buf); 809
810 /*
811 * In the absence of an error, drivers return a non-negative integer
812 * representing the maximum number of bitflips that were corrected on
813 * any one ecc region (if applicable; zero otherwise).
814 */
815 ret_code = mtd->_read(mtd, from, len, retlen, buf);
816 if (unlikely(ret_code < 0))
817 return ret_code;
818 if (mtd->ecc_strength == 0)
819 return 0; /* device lacks ecc */
820 return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
766} 821}
767EXPORT_SYMBOL_GPL(mtd_read); 822EXPORT_SYMBOL_GPL(mtd_read);
768 823
diff --git a/drivers/mtd/mtdoops.c b/drivers/mtd/mtdoops.c
index ae36d7e1e913..551e316e4454 100644
--- a/drivers/mtd/mtdoops.c
+++ b/drivers/mtd/mtdoops.c
@@ -304,32 +304,17 @@ static void find_next_position(struct mtdoops_context *cxt)
304} 304}
305 305
306static void mtdoops_do_dump(struct kmsg_dumper *dumper, 306static void mtdoops_do_dump(struct kmsg_dumper *dumper,
307 enum kmsg_dump_reason reason, const char *s1, unsigned long l1, 307 enum kmsg_dump_reason reason)
308 const char *s2, unsigned long l2)
309{ 308{
310 struct mtdoops_context *cxt = container_of(dumper, 309 struct mtdoops_context *cxt = container_of(dumper,
311 struct mtdoops_context, dump); 310 struct mtdoops_context, dump);
312 unsigned long s1_start, s2_start;
313 unsigned long l1_cpy, l2_cpy;
314 char *dst;
315
316 if (reason != KMSG_DUMP_OOPS &&
317 reason != KMSG_DUMP_PANIC)
318 return;
319 311
320 /* Only dump oopses if dump_oops is set */ 312 /* Only dump oopses if dump_oops is set */
321 if (reason == KMSG_DUMP_OOPS && !dump_oops) 313 if (reason == KMSG_DUMP_OOPS && !dump_oops)
322 return; 314 return;
323 315
324 dst = cxt->oops_buf + MTDOOPS_HEADER_SIZE; /* Skip the header */ 316 kmsg_dump_get_buffer(dumper, true, cxt->oops_buf + MTDOOPS_HEADER_SIZE,
325 l2_cpy = min(l2, record_size - MTDOOPS_HEADER_SIZE); 317 record_size - MTDOOPS_HEADER_SIZE, NULL);
326 l1_cpy = min(l1, record_size - MTDOOPS_HEADER_SIZE - l2_cpy);
327
328 s2_start = l2 - l2_cpy;
329 s1_start = l1 - l1_cpy;
330
331 memcpy(dst, s1 + s1_start, l1_cpy);
332 memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
333 318
334 /* Panics must be written immediately */ 319 /* Panics must be written immediately */
335 if (reason != KMSG_DUMP_OOPS) 320 if (reason != KMSG_DUMP_OOPS)
@@ -375,6 +360,7 @@ static void mtdoops_notify_add(struct mtd_info *mtd)
375 return; 360 return;
376 } 361 }
377 362
363 cxt->dump.max_reason = KMSG_DUMP_OOPS;
378 cxt->dump.dump = mtdoops_do_dump; 364 cxt->dump.dump = mtdoops_do_dump;
379 err = kmsg_dump_register(&cxt->dump); 365 err = kmsg_dump_register(&cxt->dump);
380 if (err) { 366 if (err) {
diff --git a/drivers/mtd/mtdpart.c b/drivers/mtd/mtdpart.c
index 9651c06de0a9..d518e4db8a0b 100644
--- a/drivers/mtd/mtdpart.c
+++ b/drivers/mtd/mtdpart.c
@@ -67,12 +67,12 @@ static int part_read(struct mtd_info *mtd, loff_t from, size_t len,
67 stats = part->master->ecc_stats; 67 stats = part->master->ecc_stats;
68 res = part->master->_read(part->master, from + part->offset, len, 68 res = part->master->_read(part->master, from + part->offset, len,
69 retlen, buf); 69 retlen, buf);
70 if (unlikely(res)) { 70 if (unlikely(mtd_is_eccerr(res)))
71 if (mtd_is_bitflip(res)) 71 mtd->ecc_stats.failed +=
72 mtd->ecc_stats.corrected += part->master->ecc_stats.corrected - stats.corrected; 72 part->master->ecc_stats.failed - stats.failed;
73 if (mtd_is_eccerr(res)) 73 else
74 mtd->ecc_stats.failed += part->master->ecc_stats.failed - stats.failed; 74 mtd->ecc_stats.corrected +=
75 } 75 part->master->ecc_stats.corrected - stats.corrected;
76 return res; 76 return res;
77} 77}
78 78
@@ -517,6 +517,8 @@ static struct mtd_part *allocate_partition(struct mtd_info *master,
517 517
518 slave->mtd.ecclayout = master->ecclayout; 518 slave->mtd.ecclayout = master->ecclayout;
519 slave->mtd.ecc_strength = master->ecc_strength; 519 slave->mtd.ecc_strength = master->ecc_strength;
520 slave->mtd.bitflip_threshold = master->bitflip_threshold;
521
520 if (master->_block_isbad) { 522 if (master->_block_isbad) {
521 uint64_t offs = 0; 523 uint64_t offs = 0;
522 524
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index 7d17cecad69d..31bb7e5b504a 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -115,6 +115,46 @@ config MTD_NAND_OMAP2
115 Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4 115 Support for NAND flash on Texas Instruments OMAP2, OMAP3 and OMAP4
116 platforms. 116 platforms.
117 117
118config MTD_NAND_OMAP_BCH
119 depends on MTD_NAND && MTD_NAND_OMAP2 && ARCH_OMAP3
120 bool "Enable support for hardware BCH error correction"
121 default n
122 select BCH
123 select BCH_CONST_PARAMS
124 help
125 Support for hardware BCH error correction.
126
127choice
128 prompt "BCH error correction capability"
129 depends on MTD_NAND_OMAP_BCH
130
131config MTD_NAND_OMAP_BCH8
132 bool "8 bits / 512 bytes (recommended)"
133 help
134 Support correcting up to 8 bitflips per 512-byte block.
135 This will use 13 bytes of spare area per 512 bytes of page data.
136 This is the recommended mode, as 4-bit mode does not work
137 on some OMAP3 revisions, due to a hardware bug.
138
139config MTD_NAND_OMAP_BCH4
140 bool "4 bits / 512 bytes"
141 help
142 Support correcting up to 4 bitflips per 512-byte block.
143 This will use 7 bytes of spare area per 512 bytes of page data.
144 Note that this mode does not work on some OMAP3 revisions, due to a
145 hardware bug. Please check your OMAP datasheet before selecting this
146 mode.
147
148endchoice
149
150if MTD_NAND_OMAP_BCH
151config BCH_CONST_M
152 default 13
153config BCH_CONST_T
154 default 4 if MTD_NAND_OMAP_BCH4
155 default 8 if MTD_NAND_OMAP_BCH8
156endif
157
118config MTD_NAND_IDS 158config MTD_NAND_IDS
119 tristate 159 tristate
120 160
@@ -440,7 +480,7 @@ config MTD_NAND_NANDSIM
440 480
441config MTD_NAND_GPMI_NAND 481config MTD_NAND_GPMI_NAND
442 bool "GPMI NAND Flash Controller driver" 482 bool "GPMI NAND Flash Controller driver"
443 depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28) 483 depends on MTD_NAND && (SOC_IMX23 || SOC_IMX28 || SOC_IMX6Q)
444 help 484 help
445 Enables NAND Flash support for IMX23 or IMX28. 485 Enables NAND Flash support for IMX23 or IMX28.
446 The GPMI controller is very powerful, with the help of BCH 486 The GPMI controller is very powerful, with the help of BCH
diff --git a/drivers/mtd/nand/alauda.c b/drivers/mtd/nand/alauda.c
index 4f20e1d8bef1..60a0dfdb0808 100644
--- a/drivers/mtd/nand/alauda.c
+++ b/drivers/mtd/nand/alauda.c
@@ -414,7 +414,7 @@ static int alauda_bounce_read(struct mtd_info *mtd, loff_t from, size_t len,
414 } 414 }
415 err = 0; 415 err = 0;
416 if (corrected) 416 if (corrected)
417 err = -EUCLEAN; 417 err = 1; /* return max_bitflips per ecc step */
418 if (uncorrected) 418 if (uncorrected)
419 err = -EBADMSG; 419 err = -EBADMSG;
420out: 420out:
@@ -446,7 +446,7 @@ static int alauda_read(struct mtd_info *mtd, loff_t from, size_t len,
446 } 446 }
447 err = 0; 447 err = 0;
448 if (corrected) 448 if (corrected)
449 err = -EUCLEAN; 449 err = 1; /* return max_bitflips per ecc step */
450 if (uncorrected) 450 if (uncorrected)
451 err = -EBADMSG; 451 err = -EBADMSG;
452 return err; 452 return err;
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index 2165576a1c67..97ac6712bb19 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -324,9 +324,10 @@ static int atmel_nand_calculate(struct mtd_info *mtd,
324 * mtd: mtd info structure 324 * mtd: mtd info structure
325 * chip: nand chip info structure 325 * chip: nand chip info structure
326 * buf: buffer to store read data 326 * buf: buffer to store read data
327 * oob_required: caller expects OOB data read to chip->oob_poi
327 */ 328 */
328static int atmel_nand_read_page(struct mtd_info *mtd, 329static int atmel_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
329 struct nand_chip *chip, uint8_t *buf, int page) 330 uint8_t *buf, int oob_required, int page)
330{ 331{
331 int eccsize = chip->ecc.size; 332 int eccsize = chip->ecc.size;
332 int eccbytes = chip->ecc.bytes; 333 int eccbytes = chip->ecc.bytes;
@@ -335,6 +336,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
335 uint8_t *oob = chip->oob_poi; 336 uint8_t *oob = chip->oob_poi;
336 uint8_t *ecc_pos; 337 uint8_t *ecc_pos;
337 int stat; 338 int stat;
339 unsigned int max_bitflips = 0;
338 340
339 /* 341 /*
340 * Errata: ALE is incorrectly wired up to the ECC controller 342 * Errata: ALE is incorrectly wired up to the ECC controller
@@ -371,10 +373,12 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
371 /* check if there's an error */ 373 /* check if there's an error */
372 stat = chip->ecc.correct(mtd, p, oob, NULL); 374 stat = chip->ecc.correct(mtd, p, oob, NULL);
373 375
374 if (stat < 0) 376 if (stat < 0) {
375 mtd->ecc_stats.failed++; 377 mtd->ecc_stats.failed++;
376 else 378 } else {
377 mtd->ecc_stats.corrected += stat; 379 mtd->ecc_stats.corrected += stat;
380 max_bitflips = max_t(unsigned int, max_bitflips, stat);
381 }
378 382
379 /* get back to oob start (end of page) */ 383 /* get back to oob start (end of page) */
380 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1); 384 chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize, -1);
@@ -382,7 +386,7 @@ static int atmel_nand_read_page(struct mtd_info *mtd,
382 /* read the oob */ 386 /* read the oob */
383 chip->read_buf(mtd, oob, mtd->oobsize); 387 chip->read_buf(mtd, oob, mtd->oobsize);
384 388
385 return 0; 389 return max_bitflips;
386} 390}
387 391
388/* 392/*
diff --git a/drivers/mtd/nand/au1550nd.c b/drivers/mtd/nand/au1550nd.c
index 73abbc3e093e..9f609d2dcf62 100644
--- a/drivers/mtd/nand/au1550nd.c
+++ b/drivers/mtd/nand/au1550nd.c
@@ -508,8 +508,6 @@ static int __devinit au1550nd_probe(struct platform_device *pdev)
508 this->chip_delay = 30; 508 this->chip_delay = 30;
509 this->ecc.mode = NAND_ECC_SOFT; 509 this->ecc.mode = NAND_ECC_SOFT;
510 510
511 this->options = NAND_NO_AUTOINCR;
512
513 if (pd->devwidth) 511 if (pd->devwidth)
514 this->options |= NAND_BUSWIDTH_16; 512 this->options |= NAND_BUSWIDTH_16;
515 513
diff --git a/drivers/mtd/nand/bcm_umi_bch.c b/drivers/mtd/nand/bcm_umi_bch.c
index a930666d0687..5914bb32e001 100644
--- a/drivers/mtd/nand/bcm_umi_bch.c
+++ b/drivers/mtd/nand/bcm_umi_bch.c
@@ -22,9 +22,9 @@
22 22
23/* ---- Private Function Prototypes -------------------------------------- */ 23/* ---- Private Function Prototypes -------------------------------------- */
24static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, 24static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
25 struct nand_chip *chip, uint8_t *buf, int page); 25 struct nand_chip *chip, uint8_t *buf, int oob_required, int page);
26static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd, 26static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
27 struct nand_chip *chip, const uint8_t *buf); 27 struct nand_chip *chip, const uint8_t *buf, int oob_required);
28 28
29/* ---- Private Variables ------------------------------------------------ */ 29/* ---- Private Variables ------------------------------------------------ */
30 30
@@ -103,11 +103,12 @@ static struct nand_ecclayout nand_hw_eccoob_4096 = {
103* @mtd: mtd info structure 103* @mtd: mtd info structure
104* @chip: nand chip info structure 104* @chip: nand chip info structure
105* @buf: buffer to store read data 105* @buf: buffer to store read data
106* @oob_required: caller expects OOB data read to chip->oob_poi
106* 107*
107***************************************************************************/ 108***************************************************************************/
108static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, 109static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
109 struct nand_chip *chip, uint8_t * buf, 110 struct nand_chip *chip, uint8_t * buf,
110 int page) 111 int oob_required, int page)
111{ 112{
112 int sectorIdx = 0; 113 int sectorIdx = 0;
113 int eccsize = chip->ecc.size; 114 int eccsize = chip->ecc.size;
@@ -116,6 +117,7 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
116 uint8_t eccCalc[NAND_ECC_NUM_BYTES]; 117 uint8_t eccCalc[NAND_ECC_NUM_BYTES];
117 int sectorOobSize = mtd->oobsize / eccsteps; 118 int sectorOobSize = mtd->oobsize / eccsteps;
118 int stat; 119 int stat;
120 unsigned int max_bitflips = 0;
119 121
120 for (sectorIdx = 0; sectorIdx < eccsteps; 122 for (sectorIdx = 0; sectorIdx < eccsteps;
121 sectorIdx++, datap += eccsize) { 123 sectorIdx++, datap += eccsize) {
@@ -177,9 +179,10 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
177 } 179 }
178#endif 180#endif
179 mtd->ecc_stats.corrected += stat; 181 mtd->ecc_stats.corrected += stat;
182 max_bitflips = max_t(unsigned int, max_bitflips, stat);
180 } 183 }
181 } 184 }
182 return 0; 185 return max_bitflips;
183} 186}
184 187
185/**************************************************************************** 188/****************************************************************************
@@ -188,10 +191,11 @@ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd,
188* @mtd: mtd info structure 191* @mtd: mtd info structure
189* @chip: nand chip info structure 192* @chip: nand chip info structure
190* @buf: data buffer 193* @buf: data buffer
194* @oob_required: must write chip->oob_poi to OOB
191* 195*
192***************************************************************************/ 196***************************************************************************/
193static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd, 197static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd,
194 struct nand_chip *chip, const uint8_t *buf) 198 struct nand_chip *chip, const uint8_t *buf, int oob_required)
195{ 199{
196 int sectorIdx = 0; 200 int sectorIdx = 0;
197 int eccsize = chip->ecc.size; 201 int eccsize = chip->ecc.size;
diff --git a/drivers/mtd/nand/bcm_umi_nand.c b/drivers/mtd/nand/bcm_umi_nand.c
index 6908cdde3065..c855e7cd337b 100644
--- a/drivers/mtd/nand/bcm_umi_nand.c
+++ b/drivers/mtd/nand/bcm_umi_nand.c
@@ -341,7 +341,7 @@ static int bcm_umi_nand_verify_buf(struct mtd_info *mtd, const u_char * buf,
341 * for MLC parts which may have permanently stuck bits. 341 * for MLC parts which may have permanently stuck bits.
342 */ 342 */
343 struct nand_chip *chip = mtd->priv; 343 struct nand_chip *chip = mtd->priv;
344 int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0); 344 int ret = chip->ecc.read_page(mtd, chip, readbackbuf, 0, 0);
345 if (ret < 0) 345 if (ret < 0)
346 return -EFAULT; 346 return -EFAULT;
347 else { 347 else {
@@ -476,12 +476,7 @@ static int __devinit bcm_umi_nand_probe(struct platform_device *pdev)
476 this->badblock_pattern = &largepage_bbt; 476 this->badblock_pattern = &largepage_bbt;
477 } 477 }
478 478
479 /* 479 this->ecc.strength = 8;
480 * FIXME: ecc strength value of 6 bits per 512 bytes of data is a
481 * conservative guess, given 13 ecc bytes and using bch alg.
482 * (Assume Galois field order m=15 to allow a margin of error.)
483 */
484 this->ecc.strength = 6;
485 480
486#endif 481#endif
487 482
diff --git a/drivers/mtd/nand/bf5xx_nand.c b/drivers/mtd/nand/bf5xx_nand.c
index d7b86b925de5..3f1c18599cbd 100644
--- a/drivers/mtd/nand/bf5xx_nand.c
+++ b/drivers/mtd/nand/bf5xx_nand.c
@@ -558,7 +558,7 @@ static void bf5xx_nand_dma_write_buf(struct mtd_info *mtd,
558} 558}
559 559
560static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 560static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
561 uint8_t *buf, int page) 561 uint8_t *buf, int oob_required, int page)
562{ 562{
563 bf5xx_nand_read_buf(mtd, buf, mtd->writesize); 563 bf5xx_nand_read_buf(mtd, buf, mtd->writesize);
564 bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize); 564 bf5xx_nand_read_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -567,7 +567,7 @@ static int bf5xx_nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip
567} 567}
568 568
569static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 569static void bf5xx_nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
570 const uint8_t *buf) 570 const uint8_t *buf, int oob_required)
571{ 571{
572 bf5xx_nand_write_buf(mtd, buf, mtd->writesize); 572 bf5xx_nand_write_buf(mtd, buf, mtd->writesize);
573 bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); 573 bf5xx_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize);
diff --git a/drivers/mtd/nand/cafe_nand.c b/drivers/mtd/nand/cafe_nand.c
index 2a96e1a12062..41371ba1a811 100644
--- a/drivers/mtd/nand/cafe_nand.c
+++ b/drivers/mtd/nand/cafe_nand.c
@@ -364,25 +364,27 @@ static int cafe_nand_write_oob(struct mtd_info *mtd,
364 364
365/* Don't use -- use nand_read_oob_std for now */ 365/* Don't use -- use nand_read_oob_std for now */
366static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 366static int cafe_nand_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
367 int page, int sndcmd) 367 int page)
368{ 368{
369 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 369 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
370 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 370 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
371 return 1; 371 return 0;
372} 372}
373/** 373/**
374 * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read 374 * cafe_nand_read_page_syndrome - [REPLACEABLE] hardware ecc syndrome based page read
375 * @mtd: mtd info structure 375 * @mtd: mtd info structure
376 * @chip: nand chip info structure 376 * @chip: nand chip info structure
377 * @buf: buffer to store read data 377 * @buf: buffer to store read data
378 * @oob_required: caller expects OOB data read to chip->oob_poi
378 * 379 *
379 * The hw generator calculates the error syndrome automatically. Therefor 380 * The hw generator calculates the error syndrome automatically. Therefor
380 * we need a special oob layout and handling. 381 * we need a special oob layout and handling.
381 */ 382 */
382static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip, 383static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
383 uint8_t *buf, int page) 384 uint8_t *buf, int oob_required, int page)
384{ 385{
385 struct cafe_priv *cafe = mtd->priv; 386 struct cafe_priv *cafe = mtd->priv;
387 unsigned int max_bitflips = 0;
386 388
387 cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n", 389 cafe_dev_dbg(&cafe->pdev->dev, "ECC result %08x SYN1,2 %08x\n",
388 cafe_readl(cafe, NAND_ECC_RESULT), 390 cafe_readl(cafe, NAND_ECC_RESULT),
@@ -449,10 +451,11 @@ static int cafe_nand_read_page(struct mtd_info *mtd, struct nand_chip *chip,
449 } else { 451 } else {
450 dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n); 452 dev_dbg(&cafe->pdev->dev, "Corrected %d symbol errors\n", n);
451 mtd->ecc_stats.corrected += n; 453 mtd->ecc_stats.corrected += n;
454 max_bitflips = max_t(unsigned int, max_bitflips, n);
452 } 455 }
453 } 456 }
454 457
455 return 0; 458 return max_bitflips;
456} 459}
457 460
458static struct nand_ecclayout cafe_oobinfo_2048 = { 461static struct nand_ecclayout cafe_oobinfo_2048 = {
@@ -518,7 +521,8 @@ static struct nand_bbt_descr cafe_bbt_mirror_descr_512 = {
518 521
519 522
520static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd, 523static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
521 struct nand_chip *chip, const uint8_t *buf) 524 struct nand_chip *chip,
525 const uint8_t *buf, int oob_required)
522{ 526{
523 struct cafe_priv *cafe = mtd->priv; 527 struct cafe_priv *cafe = mtd->priv;
524 528
@@ -530,16 +534,17 @@ static void cafe_nand_write_page_lowlevel(struct mtd_info *mtd,
530} 534}
531 535
532static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 536static int cafe_nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
533 const uint8_t *buf, int page, int cached, int raw) 537 const uint8_t *buf, int oob_required, int page,
538 int cached, int raw)
534{ 539{
535 int status; 540 int status;
536 541
537 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 542 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
538 543
539 if (unlikely(raw)) 544 if (unlikely(raw))
540 chip->ecc.write_page_raw(mtd, chip, buf); 545 chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
541 else 546 else
542 chip->ecc.write_page(mtd, chip, buf); 547 chip->ecc.write_page(mtd, chip, buf, oob_required);
543 548
544 /* 549 /*
545 * Cached progamming disabled for now, Not sure if its worth the 550 * Cached progamming disabled for now, Not sure if its worth the
@@ -685,7 +690,7 @@ static int __devinit cafe_nand_probe(struct pci_dev *pdev,
685 690
686 /* Enable the following for a flash based bad block table */ 691 /* Enable the following for a flash based bad block table */
687 cafe->nand.bbt_options = NAND_BBT_USE_FLASH; 692 cafe->nand.bbt_options = NAND_BBT_USE_FLASH;
688 cafe->nand.options = NAND_NO_AUTOINCR | NAND_OWN_BUFFERS; 693 cafe->nand.options = NAND_OWN_BUFFERS;
689 694
690 if (skipbbt) { 695 if (skipbbt) {
691 cafe->nand.options |= NAND_SKIP_BBTSCAN; 696 cafe->nand.options |= NAND_SKIP_BBTSCAN;
@@ -888,17 +893,7 @@ static struct pci_driver cafe_nand_pci_driver = {
888 .resume = cafe_nand_resume, 893 .resume = cafe_nand_resume,
889}; 894};
890 895
891static int __init cafe_nand_init(void) 896module_pci_driver(cafe_nand_pci_driver);
892{
893 return pci_register_driver(&cafe_nand_pci_driver);
894}
895
896static void __exit cafe_nand_exit(void)
897{
898 pci_unregister_driver(&cafe_nand_pci_driver);
899}
900module_init(cafe_nand_init);
901module_exit(cafe_nand_exit);
902 897
903MODULE_LICENSE("GPL"); 898MODULE_LICENSE("GPL");
904MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); 899MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>");
diff --git a/drivers/mtd/nand/cs553x_nand.c b/drivers/mtd/nand/cs553x_nand.c
index 821c34c62500..adb6c3ef37fb 100644
--- a/drivers/mtd/nand/cs553x_nand.c
+++ b/drivers/mtd/nand/cs553x_nand.c
@@ -240,7 +240,6 @@ static int __init cs553x_init_one(int cs, int mmio, unsigned long adr)
240 240
241 /* Enable the following for a flash based bad block table */ 241 /* Enable the following for a flash based bad block table */
242 this->bbt_options = NAND_BBT_USE_FLASH; 242 this->bbt_options = NAND_BBT_USE_FLASH;
243 this->options = NAND_NO_AUTOINCR;
244 243
245 /* Scan to find existence of the device */ 244 /* Scan to find existence of the device */
246 if (nand_scan(new_mtd, 1)) { 245 if (nand_scan(new_mtd, 1)) {
diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
index a9e57d686297..0650aafa0dd2 100644
--- a/drivers/mtd/nand/denali.c
+++ b/drivers/mtd/nand/denali.c
@@ -924,9 +924,10 @@ bool is_erased(uint8_t *buf, int len)
924#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO) 924#define ECC_LAST_ERR(x) ((x) & ERR_CORRECTION_INFO__LAST_ERR_INFO)
925 925
926static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, 926static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
927 uint32_t irq_status) 927 uint32_t irq_status, unsigned int *max_bitflips)
928{ 928{
929 bool check_erased_page = false; 929 bool check_erased_page = false;
930 unsigned int bitflips = 0;
930 931
931 if (irq_status & INTR_STATUS__ECC_ERR) { 932 if (irq_status & INTR_STATUS__ECC_ERR) {
932 /* read the ECC errors. we'll ignore them for now */ 933 /* read the ECC errors. we'll ignore them for now */
@@ -965,6 +966,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
965 /* correct the ECC error */ 966 /* correct the ECC error */
966 buf[offset] ^= err_correction_value; 967 buf[offset] ^= err_correction_value;
967 denali->mtd.ecc_stats.corrected++; 968 denali->mtd.ecc_stats.corrected++;
969 bitflips++;
968 } 970 }
969 } else { 971 } else {
970 /* if the error is not correctable, need to 972 /* if the error is not correctable, need to
@@ -984,6 +986,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
984 clear_interrupts(denali); 986 clear_interrupts(denali);
985 denali_set_intr_modes(denali, true); 987 denali_set_intr_modes(denali, true);
986 } 988 }
989 *max_bitflips = bitflips;
987 return check_erased_page; 990 return check_erased_page;
988} 991}
989 992
@@ -1084,7 +1087,7 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
1084 * by write_page above. 1087 * by write_page above.
1085 * */ 1088 * */
1086static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip, 1089static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1087 const uint8_t *buf) 1090 const uint8_t *buf, int oob_required)
1088{ 1091{
1089 /* for regular page writes, we let HW handle all the ECC 1092 /* for regular page writes, we let HW handle all the ECC
1090 * data written to the device. */ 1093 * data written to the device. */
@@ -1096,7 +1099,7 @@ static void denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
1096 * write_page() function above. 1099 * write_page() function above.
1097 */ 1100 */
1098static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1101static void denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1099 const uint8_t *buf) 1102 const uint8_t *buf, int oob_required)
1100{ 1103{
1101 /* for raw page writes, we want to disable ECC and simply write 1104 /* for raw page writes, we want to disable ECC and simply write
1102 whatever data is in the buffer. */ 1105 whatever data is in the buffer. */
@@ -1110,17 +1113,17 @@ static int denali_write_oob(struct mtd_info *mtd, struct nand_chip *chip,
1110} 1113}
1111 1114
1112static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 1115static int denali_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1113 int page, int sndcmd) 1116 int page)
1114{ 1117{
1115 read_oob_data(mtd, chip->oob_poi, page); 1118 read_oob_data(mtd, chip->oob_poi, page);
1116 1119
1117 return 0; /* notify NAND core to send command to 1120 return 0;
1118 NAND device. */
1119} 1121}
1120 1122
1121static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip, 1123static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1122 uint8_t *buf, int page) 1124 uint8_t *buf, int oob_required, int page)
1123{ 1125{
1126 unsigned int max_bitflips;
1124 struct denali_nand_info *denali = mtd_to_denali(mtd); 1127 struct denali_nand_info *denali = mtd_to_denali(mtd);
1125 1128
1126 dma_addr_t addr = denali->buf.dma_buf; 1129 dma_addr_t addr = denali->buf.dma_buf;
@@ -1153,7 +1156,7 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1153 1156
1154 memcpy(buf, denali->buf.buf, mtd->writesize); 1157 memcpy(buf, denali->buf.buf, mtd->writesize);
1155 1158
1156 check_erased_page = handle_ecc(denali, buf, irq_status); 1159 check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
1157 denali_enable_dma(denali, false); 1160 denali_enable_dma(denali, false);
1158 1161
1159 if (check_erased_page) { 1162 if (check_erased_page) {
@@ -1167,11 +1170,11 @@ static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
1167 denali->mtd.ecc_stats.failed++; 1170 denali->mtd.ecc_stats.failed++;
1168 } 1171 }
1169 } 1172 }
1170 return 0; 1173 return max_bitflips;
1171} 1174}
1172 1175
1173static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1176static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1174 uint8_t *buf, int page) 1177 uint8_t *buf, int oob_required, int page)
1175{ 1178{
1176 struct denali_nand_info *denali = mtd_to_denali(mtd); 1179 struct denali_nand_info *denali = mtd_to_denali(mtd);
1177 1180
@@ -1702,17 +1705,4 @@ static struct pci_driver denali_pci_driver = {
1702 .remove = denali_pci_remove, 1705 .remove = denali_pci_remove,
1703}; 1706};
1704 1707
1705static int __devinit denali_init(void) 1708module_pci_driver(denali_pci_driver);
1706{
1707 printk(KERN_INFO "Spectra MTD driver\n");
1708 return pci_register_driver(&denali_pci_driver);
1709}
1710
1711/* Free memory */
1712static void __devexit denali_exit(void)
1713{
1714 pci_unregister_driver(&denali_pci_driver);
1715}
1716
1717module_init(denali_init);
1718module_exit(denali_exit);
diff --git a/drivers/mtd/nand/docg4.c b/drivers/mtd/nand/docg4.c
index b08202664543..a225e49a5623 100644
--- a/drivers/mtd/nand/docg4.c
+++ b/drivers/mtd/nand/docg4.c
@@ -720,6 +720,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
720 struct docg4_priv *doc = nand->priv; 720 struct docg4_priv *doc = nand->priv;
721 void __iomem *docptr = doc->virtadr; 721 void __iomem *docptr = doc->virtadr;
722 uint16_t status, edc_err, *buf16; 722 uint16_t status, edc_err, *buf16;
723 int bits_corrected = 0;
723 724
724 dev_dbg(doc->dev, "%s: page %08x\n", __func__, page); 725 dev_dbg(doc->dev, "%s: page %08x\n", __func__, page);
725 726
@@ -772,7 +773,7 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
772 773
773 /* If bitflips are reported, attempt to correct with ecc */ 774 /* If bitflips are reported, attempt to correct with ecc */
774 if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) { 775 if (edc_err & DOC_ECCCONF1_BCH_SYNDROM_ERR) {
775 int bits_corrected = correct_data(mtd, buf, page); 776 bits_corrected = correct_data(mtd, buf, page);
776 if (bits_corrected == -EBADMSG) 777 if (bits_corrected == -EBADMSG)
777 mtd->ecc_stats.failed++; 778 mtd->ecc_stats.failed++;
778 else 779 else
@@ -781,24 +782,24 @@ static int read_page(struct mtd_info *mtd, struct nand_chip *nand,
781 } 782 }
782 783
783 writew(0, docptr + DOC_DATAEND); 784 writew(0, docptr + DOC_DATAEND);
784 return 0; 785 return bits_corrected;
785} 786}
786 787
787 788
788static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand, 789static int docg4_read_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
789 uint8_t *buf, int page) 790 uint8_t *buf, int oob_required, int page)
790{ 791{
791 return read_page(mtd, nand, buf, page, false); 792 return read_page(mtd, nand, buf, page, false);
792} 793}
793 794
794static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand, 795static int docg4_read_page(struct mtd_info *mtd, struct nand_chip *nand,
795 uint8_t *buf, int page) 796 uint8_t *buf, int oob_required, int page)
796{ 797{
797 return read_page(mtd, nand, buf, page, true); 798 return read_page(mtd, nand, buf, page, true);
798} 799}
799 800
800static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand, 801static int docg4_read_oob(struct mtd_info *mtd, struct nand_chip *nand,
801 int page, int sndcmd) 802 int page)
802{ 803{
803 struct docg4_priv *doc = nand->priv; 804 struct docg4_priv *doc = nand->priv;
804 void __iomem *docptr = doc->virtadr; 805 void __iomem *docptr = doc->virtadr;
@@ -952,13 +953,13 @@ static void write_page(struct mtd_info *mtd, struct nand_chip *nand,
952} 953}
953 954
954static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand, 955static void docg4_write_page_raw(struct mtd_info *mtd, struct nand_chip *nand,
955 const uint8_t *buf) 956 const uint8_t *buf, int oob_required)
956{ 957{
957 return write_page(mtd, nand, buf, false); 958 return write_page(mtd, nand, buf, false);
958} 959}
959 960
960static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand, 961static void docg4_write_page(struct mtd_info *mtd, struct nand_chip *nand,
961 const uint8_t *buf) 962 const uint8_t *buf, int oob_required)
962{ 963{
963 return write_page(mtd, nand, buf, true); 964 return write_page(mtd, nand, buf, true);
964} 965}
@@ -1002,7 +1003,7 @@ static int __init read_factory_bbt(struct mtd_info *mtd)
1002 return -ENOMEM; 1003 return -ENOMEM;
1003 1004
1004 read_page_prologue(mtd, g4_addr); 1005 read_page_prologue(mtd, g4_addr);
1005 status = docg4_read_page(mtd, nand, buf, DOCG4_FACTORY_BBT_PAGE); 1006 status = docg4_read_page(mtd, nand, buf, 0, DOCG4_FACTORY_BBT_PAGE);
1006 if (status) 1007 if (status)
1007 goto exit; 1008 goto exit;
1008 1009
@@ -1079,7 +1080,7 @@ static int docg4_block_markbad(struct mtd_info *mtd, loff_t ofs)
1079 1080
1080 /* write first page of block */ 1081 /* write first page of block */
1081 write_page_prologue(mtd, g4_addr); 1082 write_page_prologue(mtd, g4_addr);
1082 docg4_write_page(mtd, nand, buf); 1083 docg4_write_page(mtd, nand, buf, 1);
1083 ret = pageprog(mtd); 1084 ret = pageprog(mtd);
1084 if (!ret) 1085 if (!ret)
1085 mtd->ecc_stats.badblocks++; 1086 mtd->ecc_stats.badblocks++;
@@ -1192,8 +1193,7 @@ static void __init init_mtd_structs(struct mtd_info *mtd)
1192 nand->ecc.prepad = 8; 1193 nand->ecc.prepad = 8;
1193 nand->ecc.bytes = 8; 1194 nand->ecc.bytes = 8;
1194 nand->ecc.strength = DOCG4_T; 1195 nand->ecc.strength = DOCG4_T;
1195 nand->options = 1196 nand->options = NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE;
1196 NAND_BUSWIDTH_16 | NAND_NO_SUBPAGE_WRITE | NAND_NO_AUTOINCR;
1197 nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA; 1197 nand->IO_ADDR_R = nand->IO_ADDR_W = doc->virtadr + DOC_IOSPACE_DATA;
1198 nand->controller = &nand->hwcontrol; 1198 nand->controller = &nand->hwcontrol;
1199 spin_lock_init(&nand->controller->lock); 1199 spin_lock_init(&nand->controller->lock);
diff --git a/drivers/mtd/nand/fsl_elbc_nand.c b/drivers/mtd/nand/fsl_elbc_nand.c
index 80b5264f0a32..784293806110 100644
--- a/drivers/mtd/nand/fsl_elbc_nand.c
+++ b/drivers/mtd/nand/fsl_elbc_nand.c
@@ -75,6 +75,7 @@ struct fsl_elbc_fcm_ctrl {
75 unsigned int use_mdr; /* Non zero if the MDR is to be set */ 75 unsigned int use_mdr; /* Non zero if the MDR is to be set */
76 unsigned int oob; /* Non zero if operating on OOB data */ 76 unsigned int oob; /* Non zero if operating on OOB data */
77 unsigned int counter; /* counter for the initializations */ 77 unsigned int counter; /* counter for the initializations */
78 unsigned int max_bitflips; /* Saved during READ0 cmd */
78}; 79};
79 80
80/* These map to the positions used by the FCM hardware ECC generator */ 81/* These map to the positions used by the FCM hardware ECC generator */
@@ -253,6 +254,8 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
253 if (chip->ecc.mode != NAND_ECC_HW) 254 if (chip->ecc.mode != NAND_ECC_HW)
254 return 0; 255 return 0;
255 256
257 elbc_fcm_ctrl->max_bitflips = 0;
258
256 if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) { 259 if (elbc_fcm_ctrl->read_bytes == mtd->writesize + mtd->oobsize) {
257 uint32_t lteccr = in_be32(&lbc->lteccr); 260 uint32_t lteccr = in_be32(&lbc->lteccr);
258 /* 261 /*
@@ -262,11 +265,16 @@ static int fsl_elbc_run_command(struct mtd_info *mtd)
262 * bits 28-31 are uncorrectable errors, marked elsewhere. 265 * bits 28-31 are uncorrectable errors, marked elsewhere.
263 * for small page nand only 1 bit is used. 266 * for small page nand only 1 bit is used.
264 * if the ELBC doesn't have the lteccr register it reads 0 267 * if the ELBC doesn't have the lteccr register it reads 0
268 * FIXME: 4 bits can be corrected on NANDs with 2k pages, so
269 * count the number of sub-pages with bitflips and update
270 * ecc_stats.corrected accordingly.
265 */ 271 */
266 if (lteccr & 0x000F000F) 272 if (lteccr & 0x000F000F)
267 out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */ 273 out_be32(&lbc->lteccr, 0x000F000F); /* clear lteccr */
268 if (lteccr & 0x000F0000) 274 if (lteccr & 0x000F0000) {
269 mtd->ecc_stats.corrected++; 275 mtd->ecc_stats.corrected++;
276 elbc_fcm_ctrl->max_bitflips = 1;
277 }
270 } 278 }
271 279
272 return 0; 280 return 0;
@@ -738,26 +746,28 @@ static int fsl_elbc_chip_init_tail(struct mtd_info *mtd)
738 return 0; 746 return 0;
739} 747}
740 748
741static int fsl_elbc_read_page(struct mtd_info *mtd, 749static int fsl_elbc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
742 struct nand_chip *chip, 750 uint8_t *buf, int oob_required, int page)
743 uint8_t *buf,
744 int page)
745{ 751{
752 struct fsl_elbc_mtd *priv = chip->priv;
753 struct fsl_lbc_ctrl *ctrl = priv->ctrl;
754 struct fsl_elbc_fcm_ctrl *elbc_fcm_ctrl = ctrl->nand;
755
746 fsl_elbc_read_buf(mtd, buf, mtd->writesize); 756 fsl_elbc_read_buf(mtd, buf, mtd->writesize);
747 fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize); 757 if (oob_required)
758 fsl_elbc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
748 759
749 if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL) 760 if (fsl_elbc_wait(mtd, chip) & NAND_STATUS_FAIL)
750 mtd->ecc_stats.failed++; 761 mtd->ecc_stats.failed++;
751 762
752 return 0; 763 return elbc_fcm_ctrl->max_bitflips;
753} 764}
754 765
755/* ECC will be calculated automatically, and errors will be detected in 766/* ECC will be calculated automatically, and errors will be detected in
756 * waitfunc. 767 * waitfunc.
757 */ 768 */
758static void fsl_elbc_write_page(struct mtd_info *mtd, 769static void fsl_elbc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
759 struct nand_chip *chip, 770 const uint8_t *buf, int oob_required)
760 const uint8_t *buf)
761{ 771{
762 fsl_elbc_write_buf(mtd, buf, mtd->writesize); 772 fsl_elbc_write_buf(mtd, buf, mtd->writesize);
763 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 773 fsl_elbc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -795,7 +805,7 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
795 chip->bbt_md = &bbt_mirror_descr; 805 chip->bbt_md = &bbt_mirror_descr;
796 806
797 /* set up nand options */ 807 /* set up nand options */
798 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR; 808 chip->options = NAND_NO_READRDY;
799 chip->bbt_options = NAND_BBT_USE_FLASH; 809 chip->bbt_options = NAND_BBT_USE_FLASH;
800 810
801 chip->controller = &elbc_fcm_ctrl->controller; 811 chip->controller = &elbc_fcm_ctrl->controller;
@@ -814,11 +824,6 @@ static int fsl_elbc_chip_init(struct fsl_elbc_mtd *priv)
814 chip->ecc.size = 512; 824 chip->ecc.size = 512;
815 chip->ecc.bytes = 3; 825 chip->ecc.bytes = 3;
816 chip->ecc.strength = 1; 826 chip->ecc.strength = 1;
817 /*
818 * FIXME: can hardware ecc correct 4 bitflips if page size is
819 * 2k? Then does hardware report number of corrections for this
820 * case? If so, ecc_stats reporting needs to be fixed as well.
821 */
822 } else { 827 } else {
823 /* otherwise fall back to default software ECC */ 828 /* otherwise fall back to default software ECC */
824 chip->ecc.mode = NAND_ECC_SOFT; 829 chip->ecc.mode = NAND_ECC_SOFT;
diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c
index c30ac7b83d28..9602c1b7e27e 100644
--- a/drivers/mtd/nand/fsl_ifc_nand.c
+++ b/drivers/mtd/nand/fsl_ifc_nand.c
@@ -63,6 +63,7 @@ struct fsl_ifc_nand_ctrl {
63 unsigned int oob; /* Non zero if operating on OOB data */ 63 unsigned int oob; /* Non zero if operating on OOB data */
64 unsigned int eccread; /* Non zero for a full-page ECC read */ 64 unsigned int eccread; /* Non zero for a full-page ECC read */
65 unsigned int counter; /* counter for the initializations */ 65 unsigned int counter; /* counter for the initializations */
66 unsigned int max_bitflips; /* Saved during READ0 cmd */
66}; 67};
67 68
68static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl; 69static struct fsl_ifc_nand_ctrl *ifc_nand_ctrl;
@@ -262,6 +263,8 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
262 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER) 263 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_WPER)
263 dev_err(priv->dev, "NAND Flash Write Protect Error\n"); 264 dev_err(priv->dev, "NAND Flash Write Protect Error\n");
264 265
266 nctrl->max_bitflips = 0;
267
265 if (nctrl->eccread) { 268 if (nctrl->eccread) {
266 int errors; 269 int errors;
267 int bufnum = nctrl->page & priv->bufnum_mask; 270 int bufnum = nctrl->page & priv->bufnum_mask;
@@ -290,6 +293,9 @@ static void fsl_ifc_run_command(struct mtd_info *mtd)
290 } 293 }
291 294
292 mtd->ecc_stats.corrected += errors; 295 mtd->ecc_stats.corrected += errors;
296 nctrl->max_bitflips = max_t(unsigned int,
297 nctrl->max_bitflips,
298 errors);
293 } 299 }
294 300
295 nctrl->eccread = 0; 301 nctrl->eccread = 0;
@@ -375,21 +381,31 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command,
375 381
376 return; 382 return;
377 383
378 /* READID must read all 8 possible bytes */
379 case NAND_CMD_READID: 384 case NAND_CMD_READID:
385 case NAND_CMD_PARAM: {
386 int timing = IFC_FIR_OP_RB;
387 if (command == NAND_CMD_PARAM)
388 timing = IFC_FIR_OP_RBCD;
389
380 out_be32(&ifc->ifc_nand.nand_fir0, 390 out_be32(&ifc->ifc_nand.nand_fir0,
381 (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) | 391 (IFC_FIR_OP_CMD0 << IFC_NAND_FIR0_OP0_SHIFT) |
382 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | 392 (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) |
383 (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT)); 393 (timing << IFC_NAND_FIR0_OP2_SHIFT));
384 out_be32(&ifc->ifc_nand.nand_fcr0, 394 out_be32(&ifc->ifc_nand.nand_fcr0,
385 NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT); 395 command << IFC_NAND_FCR0_CMD0_SHIFT);
386 /* 8 bytes for manuf, device and exts */ 396 out_be32(&ifc->ifc_nand.row3, column);
387 out_be32(&ifc->ifc_nand.nand_fbcr, 8); 397
388 ifc_nand_ctrl->read_bytes = 8; 398 /*
399 * although currently it's 8 bytes for READID, we always read
400 * the maximum 256 bytes(for PARAM)
401 */
402 out_be32(&ifc->ifc_nand.nand_fbcr, 256);
403 ifc_nand_ctrl->read_bytes = 256;
389 404
390 set_addr(mtd, 0, 0, 0); 405 set_addr(mtd, 0, 0, 0);
391 fsl_ifc_run_command(mtd); 406 fsl_ifc_run_command(mtd);
392 return; 407 return;
408 }
393 409
394 /* ERASE1 stores the block and page address */ 410 /* ERASE1 stores the block and page address */
395 case NAND_CMD_ERASE1: 411 case NAND_CMD_ERASE1:
@@ -682,15 +698,16 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip)
682 return nand_fsr | NAND_STATUS_WP; 698 return nand_fsr | NAND_STATUS_WP;
683} 699}
684 700
685static int fsl_ifc_read_page(struct mtd_info *mtd, 701static int fsl_ifc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
686 struct nand_chip *chip, 702 uint8_t *buf, int oob_required, int page)
687 uint8_t *buf, int page)
688{ 703{
689 struct fsl_ifc_mtd *priv = chip->priv; 704 struct fsl_ifc_mtd *priv = chip->priv;
690 struct fsl_ifc_ctrl *ctrl = priv->ctrl; 705 struct fsl_ifc_ctrl *ctrl = priv->ctrl;
706 struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl;
691 707
692 fsl_ifc_read_buf(mtd, buf, mtd->writesize); 708 fsl_ifc_read_buf(mtd, buf, mtd->writesize);
693 fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize); 709 if (oob_required)
710 fsl_ifc_read_buf(mtd, chip->oob_poi, mtd->oobsize);
694 711
695 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER) 712 if (ctrl->nand_stat & IFC_NAND_EVTER_STAT_ECCER)
696 dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n"); 713 dev_err(priv->dev, "NAND Flash ECC Uncorrectable Error\n");
@@ -698,15 +715,14 @@ static int fsl_ifc_read_page(struct mtd_info *mtd,
698 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) 715 if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC)
699 mtd->ecc_stats.failed++; 716 mtd->ecc_stats.failed++;
700 717
701 return 0; 718 return nctrl->max_bitflips;
702} 719}
703 720
704/* ECC will be calculated automatically, and errors will be detected in 721/* ECC will be calculated automatically, and errors will be detected in
705 * waitfunc. 722 * waitfunc.
706 */ 723 */
707static void fsl_ifc_write_page(struct mtd_info *mtd, 724static void fsl_ifc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
708 struct nand_chip *chip, 725 const uint8_t *buf, int oob_required)
709 const uint8_t *buf)
710{ 726{
711 fsl_ifc_write_buf(mtd, buf, mtd->writesize); 727 fsl_ifc_write_buf(mtd, buf, mtd->writesize);
712 fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize); 728 fsl_ifc_write_buf(mtd, chip->oob_poi, mtd->oobsize);
@@ -789,7 +805,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
789 out_be32(&ifc->ifc_nand.ncfgr, 0x0); 805 out_be32(&ifc->ifc_nand.ncfgr, 0x0);
790 806
791 /* set up nand options */ 807 /* set up nand options */
792 chip->options = NAND_NO_READRDY | NAND_NO_AUTOINCR; 808 chip->options = NAND_NO_READRDY;
793 chip->bbt_options = NAND_BBT_USE_FLASH; 809 chip->bbt_options = NAND_BBT_USE_FLASH;
794 810
795 811
@@ -811,6 +827,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv)
811 /* Hardware generates ECC per 512 Bytes */ 827 /* Hardware generates ECC per 512 Bytes */
812 chip->ecc.size = 512; 828 chip->ecc.size = 512;
813 chip->ecc.bytes = 8; 829 chip->ecc.bytes = 8;
830 chip->ecc.strength = 4;
814 831
815 switch (csor & CSOR_NAND_PGS_MASK) { 832 switch (csor & CSOR_NAND_PGS_MASK) {
816 case CSOR_NAND_PGS_512: 833 case CSOR_NAND_PGS_512:
diff --git a/drivers/mtd/nand/fsmc_nand.c b/drivers/mtd/nand/fsmc_nand.c
index 1b8330e1155a..38d26240d8b1 100644
--- a/drivers/mtd/nand/fsmc_nand.c
+++ b/drivers/mtd/nand/fsmc_nand.c
@@ -692,6 +692,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
692 * @mtd: mtd info structure 692 * @mtd: mtd info structure
693 * @chip: nand chip info structure 693 * @chip: nand chip info structure
694 * @buf: buffer to store read data 694 * @buf: buffer to store read data
695 * @oob_required: caller expects OOB data read to chip->oob_poi
695 * @page: page number to read 696 * @page: page number to read
696 * 697 *
697 * This routine is needed for fsmc version 8 as reading from NAND chip has to be 698 * This routine is needed for fsmc version 8 as reading from NAND chip has to be
@@ -701,7 +702,7 @@ static void fsmc_write_buf_dma(struct mtd_info *mtd, const uint8_t *buf,
701 * max of 8 bits) 702 * max of 8 bits)
702 */ 703 */
703static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 704static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
704 uint8_t *buf, int page) 705 uint8_t *buf, int oob_required, int page)
705{ 706{
706 struct fsmc_nand_data *host = container_of(mtd, 707 struct fsmc_nand_data *host = container_of(mtd,
707 struct fsmc_nand_data, mtd); 708 struct fsmc_nand_data, mtd);
@@ -720,6 +721,7 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
720 */ 721 */
721 uint16_t ecc_oob[7]; 722 uint16_t ecc_oob[7];
722 uint8_t *oob = (uint8_t *)&ecc_oob[0]; 723 uint8_t *oob = (uint8_t *)&ecc_oob[0];
724 unsigned int max_bitflips = 0;
723 725
724 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) { 726 for (i = 0, s = 0; s < eccsteps; s++, i += eccbytes, p += eccsize) {
725 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page); 727 chip->cmdfunc(mtd, NAND_CMD_READ0, s * eccsize, page);
@@ -748,13 +750,15 @@ static int fsmc_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
748 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 750 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
749 751
750 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 752 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
751 if (stat < 0) 753 if (stat < 0) {
752 mtd->ecc_stats.failed++; 754 mtd->ecc_stats.failed++;
753 else 755 } else {
754 mtd->ecc_stats.corrected += stat; 756 mtd->ecc_stats.corrected += stat;
757 max_bitflips = max_t(unsigned int, max_bitflips, stat);
758 }
755 } 759 }
756 760
757 return 0; 761 return max_bitflips;
758} 762}
759 763
760/* 764/*
@@ -994,9 +998,9 @@ static int __init fsmc_nand_probe(struct platform_device *pdev)
994 return PTR_ERR(host->clk); 998 return PTR_ERR(host->clk);
995 } 999 }
996 1000
997 ret = clk_enable(host->clk); 1001 ret = clk_prepare_enable(host->clk);
998 if (ret) 1002 if (ret)
999 goto err_clk_enable; 1003 goto err_clk_prepare_enable;
1000 1004
1001 /* 1005 /*
1002 * This device ID is actually a common AMBA ID as used on the 1006 * This device ID is actually a common AMBA ID as used on the
@@ -1176,8 +1180,8 @@ err_req_write_chnl:
1176 if (host->mode == USE_DMA_ACCESS) 1180 if (host->mode == USE_DMA_ACCESS)
1177 dma_release_channel(host->read_dma_chan); 1181 dma_release_channel(host->read_dma_chan);
1178err_req_read_chnl: 1182err_req_read_chnl:
1179 clk_disable(host->clk); 1183 clk_disable_unprepare(host->clk);
1180err_clk_enable: 1184err_clk_prepare_enable:
1181 clk_put(host->clk); 1185 clk_put(host->clk);
1182 return ret; 1186 return ret;
1183} 1187}
@@ -1198,7 +1202,7 @@ static int fsmc_nand_remove(struct platform_device *pdev)
1198 dma_release_channel(host->write_dma_chan); 1202 dma_release_channel(host->write_dma_chan);
1199 dma_release_channel(host->read_dma_chan); 1203 dma_release_channel(host->read_dma_chan);
1200 } 1204 }
1201 clk_disable(host->clk); 1205 clk_disable_unprepare(host->clk);
1202 clk_put(host->clk); 1206 clk_put(host->clk);
1203 } 1207 }
1204 1208
@@ -1210,7 +1214,7 @@ static int fsmc_nand_suspend(struct device *dev)
1210{ 1214{
1211 struct fsmc_nand_data *host = dev_get_drvdata(dev); 1215 struct fsmc_nand_data *host = dev_get_drvdata(dev);
1212 if (host) 1216 if (host)
1213 clk_disable(host->clk); 1217 clk_disable_unprepare(host->clk);
1214 return 0; 1218 return 0;
1215} 1219}
1216 1220
@@ -1218,7 +1222,7 @@ static int fsmc_nand_resume(struct device *dev)
1218{ 1222{
1219 struct fsmc_nand_data *host = dev_get_drvdata(dev); 1223 struct fsmc_nand_data *host = dev_get_drvdata(dev);
1220 if (host) { 1224 if (host) {
1221 clk_enable(host->clk); 1225 clk_prepare_enable(host->clk);
1222 fsmc_nand_setup(host->regs_va, host->bank, 1226 fsmc_nand_setup(host->regs_va, host->bank,
1223 host->nand.options & NAND_BUSWIDTH_16, 1227 host->nand.options & NAND_BUSWIDTH_16,
1224 host->dev_timings); 1228 host->dev_timings);
diff --git a/drivers/mtd/nand/gpmi-nand/bch-regs.h b/drivers/mtd/nand/gpmi-nand/bch-regs.h
index 4effb8c579db..a0924515c396 100644
--- a/drivers/mtd/nand/gpmi-nand/bch-regs.h
+++ b/drivers/mtd/nand/gpmi-nand/bch-regs.h
@@ -51,15 +51,26 @@
51 51
52#define BP_BCH_FLASH0LAYOUT0_ECC0 12 52#define BP_BCH_FLASH0LAYOUT0_ECC0 12
53#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0) 53#define BM_BCH_FLASH0LAYOUT0_ECC0 (0xf << BP_BCH_FLASH0LAYOUT0_ECC0)
54#define BF_BCH_FLASH0LAYOUT0_ECC0(v) \ 54#define MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0 11
55 (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) & BM_BCH_FLASH0LAYOUT0_ECC0) 55#define MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0 (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0)
56#define BF_BCH_FLASH0LAYOUT0_ECC0(v, x) \
57 (GPMI_IS_MX6Q(x) \
58 ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT0_ECC0) \
59 & MX6Q_BM_BCH_FLASH0LAYOUT0_ECC0) \
60 : (((v) << BP_BCH_FLASH0LAYOUT0_ECC0) \
61 & BM_BCH_FLASH0LAYOUT0_ECC0) \
62 )
56 63
57#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0 64#define BP_BCH_FLASH0LAYOUT0_DATA0_SIZE 0
58#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \ 65#define BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
59 (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE) 66 (0xfff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
60#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v) \ 67#define MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE \
61 (((v) << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)\ 68 (0x3ff << BP_BCH_FLASH0LAYOUT0_DATA0_SIZE)
62 & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) 69#define BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(v, x) \
70 (GPMI_IS_MX6Q(x) \
71 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
72 : ((v) & BM_BCH_FLASH0LAYOUT0_DATA0_SIZE) \
73 )
63 74
64#define HW_BCH_FLASH0LAYOUT1 0x00000090 75#define HW_BCH_FLASH0LAYOUT1 0x00000090
65 76
@@ -72,13 +83,24 @@
72 83
73#define BP_BCH_FLASH0LAYOUT1_ECCN 12 84#define BP_BCH_FLASH0LAYOUT1_ECCN 12
74#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN) 85#define BM_BCH_FLASH0LAYOUT1_ECCN (0xf << BP_BCH_FLASH0LAYOUT1_ECCN)
75#define BF_BCH_FLASH0LAYOUT1_ECCN(v) \ 86#define MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN 11
76 (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) & BM_BCH_FLASH0LAYOUT1_ECCN) 87#define MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN (0x1f << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN)
88#define BF_BCH_FLASH0LAYOUT1_ECCN(v, x) \
89 (GPMI_IS_MX6Q(x) \
90 ? (((v) << MX6Q_BP_BCH_FLASH0LAYOUT1_ECCN) \
91 & MX6Q_BM_BCH_FLASH0LAYOUT1_ECCN) \
92 : (((v) << BP_BCH_FLASH0LAYOUT1_ECCN) \
93 & BM_BCH_FLASH0LAYOUT1_ECCN) \
94 )
77 95
78#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0 96#define BP_BCH_FLASH0LAYOUT1_DATAN_SIZE 0
79#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \ 97#define BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
80 (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) 98 (0xfff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
81#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v) \ 99#define MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE \
82 (((v) << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE) \ 100 (0x3ff << BP_BCH_FLASH0LAYOUT1_DATAN_SIZE)
83 & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) 101#define BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(v, x) \
102 (GPMI_IS_MX6Q(x) \
103 ? (((v) >> 2) & MX6Q_BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
104 : ((v) & BM_BCH_FLASH0LAYOUT1_DATAN_SIZE) \
105 )
84#endif 106#endif
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
index e8ea7107932e..a1f43329ad43 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-lib.c
@@ -21,7 +21,6 @@
21#include <linux/mtd/gpmi-nand.h> 21#include <linux/mtd/gpmi-nand.h>
22#include <linux/delay.h> 22#include <linux/delay.h>
23#include <linux/clk.h> 23#include <linux/clk.h>
24#include <mach/mxs.h>
25 24
26#include "gpmi-nand.h" 25#include "gpmi-nand.h"
27#include "gpmi-regs.h" 26#include "gpmi-regs.h"
@@ -37,6 +36,8 @@ struct timing_threshod timing_default_threshold = {
37 .max_dll_delay_in_ns = 16, 36 .max_dll_delay_in_ns = 16,
38}; 37};
39 38
39#define MXS_SET_ADDR 0x4
40#define MXS_CLR_ADDR 0x8
40/* 41/*
41 * Clear the bit and poll it cleared. This is usually called with 42 * Clear the bit and poll it cleared. This is usually called with
42 * a reset address and mask being either SFTRST(bit 31) or CLKGATE 43 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
@@ -47,7 +48,7 @@ static int clear_poll_bit(void __iomem *addr, u32 mask)
47 int timeout = 0x400; 48 int timeout = 0x400;
48 49
49 /* clear the bit */ 50 /* clear the bit */
50 __mxs_clrl(mask, addr); 51 writel(mask, addr + MXS_CLR_ADDR);
51 52
52 /* 53 /*
53 * SFTRST needs 3 GPMI clocks to settle, the reference manual 54 * SFTRST needs 3 GPMI clocks to settle, the reference manual
@@ -92,11 +93,11 @@ static int gpmi_reset_block(void __iomem *reset_addr, bool just_enable)
92 goto error; 93 goto error;
93 94
94 /* clear CLKGATE */ 95 /* clear CLKGATE */
95 __mxs_clrl(MODULE_CLKGATE, reset_addr); 96 writel(MODULE_CLKGATE, reset_addr + MXS_CLR_ADDR);
96 97
97 if (!just_enable) { 98 if (!just_enable) {
98 /* set SFTRST to reset the block */ 99 /* set SFTRST to reset the block */
99 __mxs_setl(MODULE_SFTRST, reset_addr); 100 writel(MODULE_SFTRST, reset_addr + MXS_SET_ADDR);
100 udelay(1); 101 udelay(1);
101 102
102 /* poll CLKGATE becoming set */ 103 /* poll CLKGATE becoming set */
@@ -223,13 +224,13 @@ int bch_set_geometry(struct gpmi_nand_data *this)
223 /* Configure layout 0. */ 224 /* Configure layout 0. */
224 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count) 225 writel(BF_BCH_FLASH0LAYOUT0_NBLOCKS(block_count)
225 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size) 226 | BF_BCH_FLASH0LAYOUT0_META_SIZE(metadata_size)
226 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength) 227 | BF_BCH_FLASH0LAYOUT0_ECC0(ecc_strength, this)
227 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size), 228 | BF_BCH_FLASH0LAYOUT0_DATA0_SIZE(block_size, this),
228 r->bch_regs + HW_BCH_FLASH0LAYOUT0); 229 r->bch_regs + HW_BCH_FLASH0LAYOUT0);
229 230
230 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size) 231 writel(BF_BCH_FLASH0LAYOUT1_PAGE_SIZE(page_size)
231 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength) 232 | BF_BCH_FLASH0LAYOUT1_ECCN(ecc_strength, this)
232 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size), 233 | BF_BCH_FLASH0LAYOUT1_DATAN_SIZE(block_size, this),
233 r->bch_regs + HW_BCH_FLASH0LAYOUT1); 234 r->bch_regs + HW_BCH_FLASH0LAYOUT1);
234 235
235 /* Set *all* chip selects to use layout 0. */ 236 /* Set *all* chip selects to use layout 0. */
@@ -255,11 +256,12 @@ static unsigned int ns_to_cycles(unsigned int time,
255 return max(k, min); 256 return max(k, min);
256} 257}
257 258
259#define DEF_MIN_PROP_DELAY 5
260#define DEF_MAX_PROP_DELAY 9
258/* Apply timing to current hardware conditions. */ 261/* Apply timing to current hardware conditions. */
259static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this, 262static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
260 struct gpmi_nfc_hardware_timing *hw) 263 struct gpmi_nfc_hardware_timing *hw)
261{ 264{
262 struct gpmi_nand_platform_data *pdata = this->pdata;
263 struct timing_threshod *nfc = &timing_default_threshold; 265 struct timing_threshod *nfc = &timing_default_threshold;
264 struct nand_chip *nand = &this->nand; 266 struct nand_chip *nand = &this->nand;
265 struct nand_timing target = this->timing; 267 struct nand_timing target = this->timing;
@@ -276,8 +278,8 @@ static int gpmi_nfc_compute_hardware_timing(struct gpmi_nand_data *this,
276 int ideal_sample_delay_in_ns; 278 int ideal_sample_delay_in_ns;
277 unsigned int sample_delay_factor; 279 unsigned int sample_delay_factor;
278 int tEYE; 280 int tEYE;
279 unsigned int min_prop_delay_in_ns = pdata->min_prop_delay_in_ns; 281 unsigned int min_prop_delay_in_ns = DEF_MIN_PROP_DELAY;
280 unsigned int max_prop_delay_in_ns = pdata->max_prop_delay_in_ns; 282 unsigned int max_prop_delay_in_ns = DEF_MAX_PROP_DELAY;
281 283
282 /* 284 /*
283 * If there are multiple chips, we need to relax the timings to allow 285 * If there are multiple chips, we need to relax the timings to allow
@@ -803,7 +805,8 @@ int gpmi_is_ready(struct gpmi_nand_data *this, unsigned chip)
803 if (GPMI_IS_MX23(this)) { 805 if (GPMI_IS_MX23(this)) {
804 mask = MX23_BM_GPMI_DEBUG_READY0 << chip; 806 mask = MX23_BM_GPMI_DEBUG_READY0 << chip;
805 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG); 807 reg = readl(r->gpmi_regs + HW_GPMI_DEBUG);
806 } else if (GPMI_IS_MX28(this)) { 808 } else if (GPMI_IS_MX28(this) || GPMI_IS_MX6Q(this)) {
809 /* MX28 shares the same R/B register as MX6Q. */
807 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip); 810 mask = MX28_BF_GPMI_STAT_READY_BUSY(1 << chip);
808 reg = readl(r->gpmi_regs + HW_GPMI_STAT); 811 reg = readl(r->gpmi_regs + HW_GPMI_STAT);
809 } else 812 } else
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
index b68e04310bd8..a05b7b444d4f 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.c
@@ -25,6 +25,8 @@
25#include <linux/mtd/gpmi-nand.h> 25#include <linux/mtd/gpmi-nand.h>
26#include <linux/mtd/partitions.h> 26#include <linux/mtd/partitions.h>
27#include <linux/pinctrl/consumer.h> 27#include <linux/pinctrl/consumer.h>
28#include <linux/of.h>
29#include <linux/of_device.h>
28#include "gpmi-nand.h" 30#include "gpmi-nand.h"
29 31
30/* add our owner bbt descriptor */ 32/* add our owner bbt descriptor */
@@ -387,7 +389,7 @@ static void release_bch_irq(struct gpmi_nand_data *this)
387static bool gpmi_dma_filter(struct dma_chan *chan, void *param) 389static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
388{ 390{
389 struct gpmi_nand_data *this = param; 391 struct gpmi_nand_data *this = param;
390 struct resource *r = this->private; 392 int dma_channel = (int)this->private;
391 393
392 if (!mxs_dma_is_apbh(chan)) 394 if (!mxs_dma_is_apbh(chan))
393 return false; 395 return false;
@@ -399,7 +401,7 @@ static bool gpmi_dma_filter(struct dma_chan *chan, void *param)
399 * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7 401 * for mx28 : MX28_DMA_GPMI0 ~ MX28_DMA_GPMI7
400 * (These eight channels share the same IRQ!) 402 * (These eight channels share the same IRQ!)
401 */ 403 */
402 if (r->start <= chan->chan_id && chan->chan_id <= r->end) { 404 if (dma_channel == chan->chan_id) {
403 chan->private = &this->dma_data; 405 chan->private = &this->dma_data;
404 return true; 406 return true;
405 } 407 }
@@ -419,57 +421,45 @@ static void release_dma_channels(struct gpmi_nand_data *this)
419static int __devinit acquire_dma_channels(struct gpmi_nand_data *this) 421static int __devinit acquire_dma_channels(struct gpmi_nand_data *this)
420{ 422{
421 struct platform_device *pdev = this->pdev; 423 struct platform_device *pdev = this->pdev;
422 struct gpmi_nand_platform_data *pdata = this->pdata; 424 struct resource *r_dma;
423 struct resources *res = &this->resources; 425 struct device_node *dn;
424 struct resource *r, *r_dma; 426 int dma_channel;
425 unsigned int i; 427 unsigned int ret;
428 struct dma_chan *dma_chan;
429 dma_cap_mask_t mask;
430
431 /* dma channel, we only use the first one. */
432 dn = pdev->dev.of_node;
433 ret = of_property_read_u32(dn, "fsl,gpmi-dma-channel", &dma_channel);
434 if (ret) {
435 pr_err("unable to get DMA channel from dt.\n");
436 goto acquire_err;
437 }
438 this->private = (void *)dma_channel;
426 439
427 r = platform_get_resource_byname(pdev, IORESOURCE_DMA, 440 /* gpmi dma interrupt */
428 GPMI_NAND_DMA_CHANNELS_RES_NAME);
429 r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ, 441 r_dma = platform_get_resource_byname(pdev, IORESOURCE_IRQ,
430 GPMI_NAND_DMA_INTERRUPT_RES_NAME); 442 GPMI_NAND_DMA_INTERRUPT_RES_NAME);
431 if (!r || !r_dma) { 443 if (!r_dma) {
432 pr_err("Can't get resource for DMA\n"); 444 pr_err("Can't get resource for DMA\n");
433 return -ENXIO; 445 goto acquire_err;
434 } 446 }
447 this->dma_data.chan_irq = r_dma->start;
435 448
436 /* used in gpmi_dma_filter() */ 449 /* request dma channel */
437 this->private = r; 450 dma_cap_zero(mask);
438 451 dma_cap_set(DMA_SLAVE, mask);
439 for (i = r->start; i <= r->end; i++) {
440 struct dma_chan *dma_chan;
441 dma_cap_mask_t mask;
442 452
443 if (i - r->start >= pdata->max_chip_count) 453 dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
444 break; 454 if (!dma_chan) {
445 455 pr_err("dma_request_channel failed.\n");
446 dma_cap_zero(mask); 456 goto acquire_err;
447 dma_cap_set(DMA_SLAVE, mask);
448
449 /* get the DMA interrupt */
450 if (r_dma->start == r_dma->end) {
451 /* only register the first. */
452 if (i == r->start)
453 this->dma_data.chan_irq = r_dma->start;
454 else
455 this->dma_data.chan_irq = NO_IRQ;
456 } else
457 this->dma_data.chan_irq = r_dma->start + (i - r->start);
458
459 dma_chan = dma_request_channel(mask, gpmi_dma_filter, this);
460 if (!dma_chan)
461 goto acquire_err;
462
463 /* fill the first empty item */
464 this->dma_chans[i - r->start] = dma_chan;
465 } 457 }
466 458
467 res->dma_low_channel = r->start; 459 this->dma_chans[0] = dma_chan;
468 res->dma_high_channel = i;
469 return 0; 460 return 0;
470 461
471acquire_err: 462acquire_err:
472 pr_err("Can't acquire DMA channel %u\n", i);
473 release_dma_channels(this); 463 release_dma_channels(this);
474 return -EINVAL; 464 return -EINVAL;
475} 465}
@@ -851,7 +841,7 @@ static void block_mark_swapping(struct gpmi_nand_data *this,
851} 841}
852 842
853static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, 843static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
854 uint8_t *buf, int page) 844 uint8_t *buf, int oob_required, int page)
855{ 845{
856 struct gpmi_nand_data *this = chip->priv; 846 struct gpmi_nand_data *this = chip->priv;
857 struct bch_geometry *nfc_geo = &this->bch_geometry; 847 struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -917,28 +907,31 @@ static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip,
917 mtd->ecc_stats.corrected += corrected; 907 mtd->ecc_stats.corrected += corrected;
918 } 908 }
919 909
920 /* 910 if (oob_required) {
921 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() for 911 /*
922 * details about our policy for delivering the OOB. 912 * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob()
923 * 913 * for details about our policy for delivering the OOB.
924 * We fill the caller's buffer with set bits, and then copy the block 914 *
925 * mark to th caller's buffer. Note that, if block mark swapping was 915 * We fill the caller's buffer with set bits, and then copy the
926 * necessary, it has already been done, so we can rely on the first 916 * block mark to th caller's buffer. Note that, if block mark
927 * byte of the auxiliary buffer to contain the block mark. 917 * swapping was necessary, it has already been done, so we can
928 */ 918 * rely on the first byte of the auxiliary buffer to contain
929 memset(chip->oob_poi, ~0, mtd->oobsize); 919 * the block mark.
930 chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0]; 920 */
921 memset(chip->oob_poi, ~0, mtd->oobsize);
922 chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0];
931 923
932 read_page_swap_end(this, buf, mtd->writesize, 924 read_page_swap_end(this, buf, mtd->writesize,
933 this->payload_virt, this->payload_phys, 925 this->payload_virt, this->payload_phys,
934 nfc_geo->payload_size, 926 nfc_geo->payload_size,
935 payload_virt, payload_phys); 927 payload_virt, payload_phys);
928 }
936exit_nfc: 929exit_nfc:
937 return ret; 930 return ret;
938} 931}
939 932
940static void gpmi_ecc_write_page(struct mtd_info *mtd, 933static void gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip,
941 struct nand_chip *chip, const uint8_t *buf) 934 const uint8_t *buf, int oob_required)
942{ 935{
943 struct gpmi_nand_data *this = chip->priv; 936 struct gpmi_nand_data *this = chip->priv;
944 struct bch_geometry *nfc_geo = &this->bch_geometry; 937 struct bch_geometry *nfc_geo = &this->bch_geometry;
@@ -1077,7 +1070,7 @@ exit_auxiliary:
1077 * this driver. 1070 * this driver.
1078 */ 1071 */
1079static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 1072static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1080 int page, int sndcmd) 1073 int page)
1081{ 1074{
1082 struct gpmi_nand_data *this = chip->priv; 1075 struct gpmi_nand_data *this = chip->priv;
1083 1076
@@ -1100,11 +1093,7 @@ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
1100 chip->oob_poi[0] = chip->read_byte(mtd); 1093 chip->oob_poi[0] = chip->read_byte(mtd);
1101 } 1094 }
1102 1095
1103 /* 1096 return 0;
1104 * Return true, indicating that the next call to this function must send
1105 * a command.
1106 */
1107 return true;
1108} 1097}
1109 1098
1110static int 1099static int
@@ -1318,7 +1307,7 @@ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this)
1318 /* Write the first page of the current stride. */ 1307 /* Write the first page of the current stride. */
1319 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); 1308 dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page);
1320 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 1309 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
1321 chip->ecc.write_page_raw(mtd, chip, buffer); 1310 chip->ecc.write_page_raw(mtd, chip, buffer, 0);
1322 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 1311 chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1);
1323 1312
1324 /* Wait for the write to finish. */ 1313 /* Wait for the write to finish. */
@@ -1444,6 +1433,10 @@ static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this)
1444 if (ret) 1433 if (ret)
1445 return ret; 1434 return ret;
1446 1435
1436 /* Adjust the ECC strength according to the chip. */
1437 this->nand.ecc.strength = this->bch_geometry.ecc_strength;
1438 this->mtd.ecc_strength = this->bch_geometry.ecc_strength;
1439
1447 /* NAND boot init, depends on the gpmi_set_geometry(). */ 1440 /* NAND boot init, depends on the gpmi_set_geometry(). */
1448 return nand_boot_init(this); 1441 return nand_boot_init(this);
1449} 1442}
@@ -1471,9 +1464,9 @@ void gpmi_nfc_exit(struct gpmi_nand_data *this)
1471 1464
1472static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this) 1465static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
1473{ 1466{
1474 struct gpmi_nand_platform_data *pdata = this->pdata;
1475 struct mtd_info *mtd = &this->mtd; 1467 struct mtd_info *mtd = &this->mtd;
1476 struct nand_chip *chip = &this->nand; 1468 struct nand_chip *chip = &this->nand;
1469 struct mtd_part_parser_data ppdata = {};
1477 int ret; 1470 int ret;
1478 1471
1479 /* init current chip */ 1472 /* init current chip */
@@ -1502,6 +1495,7 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
1502 chip->options |= NAND_NO_SUBPAGE_WRITE; 1495 chip->options |= NAND_NO_SUBPAGE_WRITE;
1503 chip->ecc.mode = NAND_ECC_HW; 1496 chip->ecc.mode = NAND_ECC_HW;
1504 chip->ecc.size = 1; 1497 chip->ecc.size = 1;
1498 chip->ecc.strength = 8;
1505 chip->ecc.layout = &gpmi_hw_ecclayout; 1499 chip->ecc.layout = &gpmi_hw_ecclayout;
1506 1500
1507 /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */ 1501 /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */
@@ -1511,14 +1505,14 @@ static int __devinit gpmi_nfc_init(struct gpmi_nand_data *this)
1511 if (ret) 1505 if (ret)
1512 goto err_out; 1506 goto err_out;
1513 1507
1514 ret = nand_scan(mtd, pdata->max_chip_count); 1508 ret = nand_scan(mtd, 1);
1515 if (ret) { 1509 if (ret) {
1516 pr_err("Chip scan failed\n"); 1510 pr_err("Chip scan failed\n");
1517 goto err_out; 1511 goto err_out;
1518 } 1512 }
1519 1513
1520 ret = mtd_device_parse_register(mtd, NULL, NULL, 1514 ppdata.of_node = this->pdev->dev.of_node;
1521 pdata->partitions, pdata->partition_count); 1515 ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0);
1522 if (ret) 1516 if (ret)
1523 goto err_out; 1517 goto err_out;
1524 return 0; 1518 return 0;
@@ -1528,12 +1522,41 @@ err_out:
1528 return ret; 1522 return ret;
1529} 1523}
1530 1524
1525static const struct platform_device_id gpmi_ids[] = {
1526 { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, },
1527 { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, },
1528 { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, },
1529 {},
1530};
1531
1532static const struct of_device_id gpmi_nand_id_table[] = {
1533 {
1534 .compatible = "fsl,imx23-gpmi-nand",
1535 .data = (void *)&gpmi_ids[IS_MX23]
1536 }, {
1537 .compatible = "fsl,imx28-gpmi-nand",
1538 .data = (void *)&gpmi_ids[IS_MX28]
1539 }, {
1540 .compatible = "fsl,imx6q-gpmi-nand",
1541 .data = (void *)&gpmi_ids[IS_MX6Q]
1542 }, {}
1543};
1544MODULE_DEVICE_TABLE(of, gpmi_nand_id_table);
1545
1531static int __devinit gpmi_nand_probe(struct platform_device *pdev) 1546static int __devinit gpmi_nand_probe(struct platform_device *pdev)
1532{ 1547{
1533 struct gpmi_nand_platform_data *pdata = pdev->dev.platform_data;
1534 struct gpmi_nand_data *this; 1548 struct gpmi_nand_data *this;
1549 const struct of_device_id *of_id;
1535 int ret; 1550 int ret;
1536 1551
1552 of_id = of_match_device(gpmi_nand_id_table, &pdev->dev);
1553 if (of_id) {
1554 pdev->id_entry = of_id->data;
1555 } else {
1556 pr_err("Failed to find the right device id.\n");
1557 return -ENOMEM;
1558 }
1559
1537 this = kzalloc(sizeof(*this), GFP_KERNEL); 1560 this = kzalloc(sizeof(*this), GFP_KERNEL);
1538 if (!this) { 1561 if (!this) {
1539 pr_err("Failed to allocate per-device memory\n"); 1562 pr_err("Failed to allocate per-device memory\n");
@@ -1543,13 +1566,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
1543 platform_set_drvdata(pdev, this); 1566 platform_set_drvdata(pdev, this);
1544 this->pdev = pdev; 1567 this->pdev = pdev;
1545 this->dev = &pdev->dev; 1568 this->dev = &pdev->dev;
1546 this->pdata = pdata;
1547
1548 if (pdata->platform_init) {
1549 ret = pdata->platform_init();
1550 if (ret)
1551 goto platform_init_error;
1552 }
1553 1569
1554 ret = acquire_resources(this); 1570 ret = acquire_resources(this);
1555 if (ret) 1571 if (ret)
@@ -1567,7 +1583,6 @@ static int __devinit gpmi_nand_probe(struct platform_device *pdev)
1567 1583
1568exit_nfc_init: 1584exit_nfc_init:
1569 release_resources(this); 1585 release_resources(this);
1570platform_init_error:
1571exit_acquire_resources: 1586exit_acquire_resources:
1572 platform_set_drvdata(pdev, NULL); 1587 platform_set_drvdata(pdev, NULL);
1573 kfree(this); 1588 kfree(this);
@@ -1585,19 +1600,10 @@ static int __exit gpmi_nand_remove(struct platform_device *pdev)
1585 return 0; 1600 return 0;
1586} 1601}
1587 1602
1588static const struct platform_device_id gpmi_ids[] = {
1589 {
1590 .name = "imx23-gpmi-nand",
1591 .driver_data = IS_MX23,
1592 }, {
1593 .name = "imx28-gpmi-nand",
1594 .driver_data = IS_MX28,
1595 }, {},
1596};
1597
1598static struct platform_driver gpmi_nand_driver = { 1603static struct platform_driver gpmi_nand_driver = {
1599 .driver = { 1604 .driver = {
1600 .name = "gpmi-nand", 1605 .name = "gpmi-nand",
1606 .of_match_table = gpmi_nand_id_table,
1601 }, 1607 },
1602 .probe = gpmi_nand_probe, 1608 .probe = gpmi_nand_probe,
1603 .remove = __exit_p(gpmi_nand_remove), 1609 .remove = __exit_p(gpmi_nand_remove),
diff --git a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
index ec6180d4ff8f..ce5daa160920 100644
--- a/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
+++ b/drivers/mtd/nand/gpmi-nand/gpmi-nand.h
@@ -266,8 +266,10 @@ extern int gpmi_read_page(struct gpmi_nand_data *,
266#define STATUS_UNCORRECTABLE 0xfe 266#define STATUS_UNCORRECTABLE 0xfe
267 267
268/* Use the platform_id to distinguish different Archs. */ 268/* Use the platform_id to distinguish different Archs. */
269#define IS_MX23 0x1 269#define IS_MX23 0x0
270#define IS_MX28 0x2 270#define IS_MX28 0x1
271#define IS_MX6Q 0x2
271#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23) 272#define GPMI_IS_MX23(x) ((x)->pdev->id_entry->driver_data == IS_MX23)
272#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28) 273#define GPMI_IS_MX28(x) ((x)->pdev->id_entry->driver_data == IS_MX28)
274#define GPMI_IS_MX6Q(x) ((x)->pdev->id_entry->driver_data == IS_MX6Q)
273#endif 275#endif
diff --git a/drivers/mtd/nand/h1910.c b/drivers/mtd/nand/h1910.c
index 9bf5ce5fa22d..50166e93ba96 100644
--- a/drivers/mtd/nand/h1910.c
+++ b/drivers/mtd/nand/h1910.c
@@ -124,7 +124,6 @@ static int __init h1910_init(void)
124 /* 15 us command delay time */ 124 /* 15 us command delay time */
125 this->chip_delay = 50; 125 this->chip_delay = 50;
126 this->ecc.mode = NAND_ECC_SOFT; 126 this->ecc.mode = NAND_ECC_SOFT;
127 this->options = NAND_NO_AUTOINCR;
128 127
129 /* Scan to find existence of the device */ 128 /* Scan to find existence of the device */
130 if (nand_scan(h1910_nand_mtd, 1)) { 129 if (nand_scan(h1910_nand_mtd, 1)) {
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index e4147e8acb7c..a6fa884ae49b 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -332,11 +332,7 @@ static int __devinit jz_nand_probe(struct platform_device *pdev)
332 chip->ecc.mode = NAND_ECC_HW_OOB_FIRST; 332 chip->ecc.mode = NAND_ECC_HW_OOB_FIRST;
333 chip->ecc.size = 512; 333 chip->ecc.size = 512;
334 chip->ecc.bytes = 9; 334 chip->ecc.bytes = 9;
335 chip->ecc.strength = 2; 335 chip->ecc.strength = 4;
336 /*
337 * FIXME: ecc_strength value of 2 bits per 512 bytes of data is a
338 * conservative guess, given 9 ecc bytes and reed-solomon alg.
339 */
340 336
341 if (pdata) 337 if (pdata)
342 chip->ecc.layout = pdata->ecc_layout; 338 chip->ecc.layout = pdata->ecc_layout;
diff --git a/drivers/mtd/nand/mpc5121_nfc.c b/drivers/mtd/nand/mpc5121_nfc.c
index c240cf1af961..c259c24d7986 100644
--- a/drivers/mtd/nand/mpc5121_nfc.c
+++ b/drivers/mtd/nand/mpc5121_nfc.c
@@ -734,7 +734,6 @@ static int __devinit mpc5121_nfc_probe(struct platform_device *op)
734 chip->write_buf = mpc5121_nfc_write_buf; 734 chip->write_buf = mpc5121_nfc_write_buf;
735 chip->verify_buf = mpc5121_nfc_verify_buf; 735 chip->verify_buf = mpc5121_nfc_verify_buf;
736 chip->select_chip = mpc5121_nfc_select_chip; 736 chip->select_chip = mpc5121_nfc_select_chip;
737 chip->options = NAND_NO_AUTOINCR;
738 chip->bbt_options = NAND_BBT_USE_FLASH; 737 chip->bbt_options = NAND_BBT_USE_FLASH;
739 chip->ecc.mode = NAND_ECC_SOFT; 738 chip->ecc.mode = NAND_ECC_SOFT;
740 739
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index 9e374e9bd296..c58e6a93f445 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -32,6 +32,8 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/irq.h> 33#include <linux/irq.h>
34#include <linux/completion.h> 34#include <linux/completion.h>
35#include <linux/of_device.h>
36#include <linux/of_mtd.h>
35 37
36#include <asm/mach/flash.h> 38#include <asm/mach/flash.h>
37#include <mach/mxc_nand.h> 39#include <mach/mxc_nand.h>
@@ -140,13 +142,47 @@
140 142
141#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34) 143#define NFC_V3_DELAY_LINE (host->regs_ip + 0x34)
142 144
145struct mxc_nand_host;
146
147struct mxc_nand_devtype_data {
148 void (*preset)(struct mtd_info *);
149 void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
150 void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
151 void (*send_page)(struct mtd_info *, unsigned int);
152 void (*send_read_id)(struct mxc_nand_host *);
153 uint16_t (*get_dev_status)(struct mxc_nand_host *);
154 int (*check_int)(struct mxc_nand_host *);
155 void (*irq_control)(struct mxc_nand_host *, int);
156 u32 (*get_ecc_status)(struct mxc_nand_host *);
157 struct nand_ecclayout *ecclayout_512, *ecclayout_2k, *ecclayout_4k;
158 void (*select_chip)(struct mtd_info *mtd, int chip);
159 int (*correct_data)(struct mtd_info *mtd, u_char *dat,
160 u_char *read_ecc, u_char *calc_ecc);
161
162 /*
163 * On i.MX21 the CONFIG2:INT bit cannot be read if interrupts are masked
164 * (CONFIG1:INT_MSK is set). To handle this the driver uses
165 * enable_irq/disable_irq_nosync instead of CONFIG1:INT_MSK
166 */
167 int irqpending_quirk;
168 int needs_ip;
169
170 size_t regs_offset;
171 size_t spare0_offset;
172 size_t axi_offset;
173
174 int spare_len;
175 int eccbytes;
176 int eccsize;
177};
178
143struct mxc_nand_host { 179struct mxc_nand_host {
144 struct mtd_info mtd; 180 struct mtd_info mtd;
145 struct nand_chip nand; 181 struct nand_chip nand;
146 struct device *dev; 182 struct device *dev;
147 183
148 void *spare0; 184 void __iomem *spare0;
149 void *main_area0; 185 void __iomem *main_area0;
150 186
151 void __iomem *base; 187 void __iomem *base;
152 void __iomem *regs; 188 void __iomem *regs;
@@ -163,16 +199,9 @@ struct mxc_nand_host {
163 199
164 uint8_t *data_buf; 200 uint8_t *data_buf;
165 unsigned int buf_start; 201 unsigned int buf_start;
166 int spare_len; 202
167 203 const struct mxc_nand_devtype_data *devtype_data;
168 void (*preset)(struct mtd_info *); 204 struct mxc_nand_platform_data pdata;
169 void (*send_cmd)(struct mxc_nand_host *, uint16_t, int);
170 void (*send_addr)(struct mxc_nand_host *, uint16_t, int);
171 void (*send_page)(struct mtd_info *, unsigned int);
172 void (*send_read_id)(struct mxc_nand_host *);
173 uint16_t (*get_dev_status)(struct mxc_nand_host *);
174 int (*check_int)(struct mxc_nand_host *);
175 void (*irq_control)(struct mxc_nand_host *, int);
176}; 205};
177 206
178/* OOB placement block for use with hardware ecc generation */ 207/* OOB placement block for use with hardware ecc generation */
@@ -242,21 +271,7 @@ static struct nand_ecclayout nandv2_hw_eccoob_4k = {
242 } 271 }
243}; 272};
244 273
245static const char *part_probes[] = { "RedBoot", "cmdlinepart", NULL }; 274static const char *part_probes[] = { "RedBoot", "cmdlinepart", "ofpart", NULL };
246
247static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
248{
249 struct mxc_nand_host *host = dev_id;
250
251 if (!host->check_int(host))
252 return IRQ_NONE;
253
254 host->irq_control(host, 0);
255
256 complete(&host->op_completion);
257
258 return IRQ_HANDLED;
259}
260 275
261static int check_int_v3(struct mxc_nand_host *host) 276static int check_int_v3(struct mxc_nand_host *host)
262{ 277{
@@ -280,26 +295,12 @@ static int check_int_v1_v2(struct mxc_nand_host *host)
280 if (!(tmp & NFC_V1_V2_CONFIG2_INT)) 295 if (!(tmp & NFC_V1_V2_CONFIG2_INT))
281 return 0; 296 return 0;
282 297
283 if (!cpu_is_mx21()) 298 if (!host->devtype_data->irqpending_quirk)
284 writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2); 299 writew(tmp & ~NFC_V1_V2_CONFIG2_INT, NFC_V1_V2_CONFIG2);
285 300
286 return 1; 301 return 1;
287} 302}
288 303
289/*
290 * It has been observed that the i.MX21 cannot read the CONFIG2:INT bit
291 * if interrupts are masked (CONFIG1:INT_MSK is set). To handle this, the
292 * driver can enable/disable the irq line rather than simply masking the
293 * interrupts.
294 */
295static void irq_control_mx21(struct mxc_nand_host *host, int activate)
296{
297 if (activate)
298 enable_irq(host->irq);
299 else
300 disable_irq_nosync(host->irq);
301}
302
303static void irq_control_v1_v2(struct mxc_nand_host *host, int activate) 304static void irq_control_v1_v2(struct mxc_nand_host *host, int activate)
304{ 305{
305 uint16_t tmp; 306 uint16_t tmp;
@@ -328,6 +329,47 @@ static void irq_control_v3(struct mxc_nand_host *host, int activate)
328 writel(tmp, NFC_V3_CONFIG2); 329 writel(tmp, NFC_V3_CONFIG2);
329} 330}
330 331
332static void irq_control(struct mxc_nand_host *host, int activate)
333{
334 if (host->devtype_data->irqpending_quirk) {
335 if (activate)
336 enable_irq(host->irq);
337 else
338 disable_irq_nosync(host->irq);
339 } else {
340 host->devtype_data->irq_control(host, activate);
341 }
342}
343
344static u32 get_ecc_status_v1(struct mxc_nand_host *host)
345{
346 return readw(NFC_V1_V2_ECC_STATUS_RESULT);
347}
348
349static u32 get_ecc_status_v2(struct mxc_nand_host *host)
350{
351 return readl(NFC_V1_V2_ECC_STATUS_RESULT);
352}
353
354static u32 get_ecc_status_v3(struct mxc_nand_host *host)
355{
356 return readl(NFC_V3_ECC_STATUS_RESULT);
357}
358
359static irqreturn_t mxc_nfc_irq(int irq, void *dev_id)
360{
361 struct mxc_nand_host *host = dev_id;
362
363 if (!host->devtype_data->check_int(host))
364 return IRQ_NONE;
365
366 irq_control(host, 0);
367
368 complete(&host->op_completion);
369
370 return IRQ_HANDLED;
371}
372
331/* This function polls the NANDFC to wait for the basic operation to 373/* This function polls the NANDFC to wait for the basic operation to
332 * complete by checking the INT bit of config2 register. 374 * complete by checking the INT bit of config2 register.
333 */ 375 */
@@ -336,14 +378,14 @@ static void wait_op_done(struct mxc_nand_host *host, int useirq)
336 int max_retries = 8000; 378 int max_retries = 8000;
337 379
338 if (useirq) { 380 if (useirq) {
339 if (!host->check_int(host)) { 381 if (!host->devtype_data->check_int(host)) {
340 INIT_COMPLETION(host->op_completion); 382 INIT_COMPLETION(host->op_completion);
341 host->irq_control(host, 1); 383 irq_control(host, 1);
342 wait_for_completion(&host->op_completion); 384 wait_for_completion(&host->op_completion);
343 } 385 }
344 } else { 386 } else {
345 while (max_retries-- > 0) { 387 while (max_retries-- > 0) {
346 if (host->check_int(host)) 388 if (host->devtype_data->check_int(host))
347 break; 389 break;
348 390
349 udelay(1); 391 udelay(1);
@@ -374,7 +416,7 @@ static void send_cmd_v1_v2(struct mxc_nand_host *host, uint16_t cmd, int useirq)
374 writew(cmd, NFC_V1_V2_FLASH_CMD); 416 writew(cmd, NFC_V1_V2_FLASH_CMD);
375 writew(NFC_CMD, NFC_V1_V2_CONFIG2); 417 writew(NFC_CMD, NFC_V1_V2_CONFIG2);
376 418
377 if (cpu_is_mx21() && (cmd == NAND_CMD_RESET)) { 419 if (host->devtype_data->irqpending_quirk && (cmd == NAND_CMD_RESET)) {
378 int max_retries = 100; 420 int max_retries = 100;
379 /* Reset completion is indicated by NFC_CONFIG2 */ 421 /* Reset completion is indicated by NFC_CONFIG2 */
380 /* being set to 0 */ 422 /* being set to 0 */
@@ -433,13 +475,27 @@ static void send_page_v3(struct mtd_info *mtd, unsigned int ops)
433 wait_op_done(host, false); 475 wait_op_done(host, false);
434} 476}
435 477
436static void send_page_v1_v2(struct mtd_info *mtd, unsigned int ops) 478static void send_page_v2(struct mtd_info *mtd, unsigned int ops)
479{
480 struct nand_chip *nand_chip = mtd->priv;
481 struct mxc_nand_host *host = nand_chip->priv;
482
483 /* NANDFC buffer 0 is used for page read/write */
484 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
485
486 writew(ops, NFC_V1_V2_CONFIG2);
487
488 /* Wait for operation to complete */
489 wait_op_done(host, true);
490}
491
492static void send_page_v1(struct mtd_info *mtd, unsigned int ops)
437{ 493{
438 struct nand_chip *nand_chip = mtd->priv; 494 struct nand_chip *nand_chip = mtd->priv;
439 struct mxc_nand_host *host = nand_chip->priv; 495 struct mxc_nand_host *host = nand_chip->priv;
440 int bufs, i; 496 int bufs, i;
441 497
442 if (nfc_is_v1() && mtd->writesize > 512) 498 if (mtd->writesize > 512)
443 bufs = 4; 499 bufs = 4;
444 else 500 else
445 bufs = 1; 501 bufs = 1;
@@ -463,7 +519,7 @@ static void send_read_id_v3(struct mxc_nand_host *host)
463 519
464 wait_op_done(host, true); 520 wait_op_done(host, true);
465 521
466 memcpy(host->data_buf, host->main_area0, 16); 522 memcpy_fromio(host->data_buf, host->main_area0, 16);
467} 523}
468 524
469/* Request the NANDFC to perform a read of the NAND device ID. */ 525/* Request the NANDFC to perform a read of the NAND device ID. */
@@ -479,7 +535,7 @@ static void send_read_id_v1_v2(struct mxc_nand_host *host)
479 /* Wait for operation to complete */ 535 /* Wait for operation to complete */
480 wait_op_done(host, true); 536 wait_op_done(host, true);
481 537
482 memcpy(host->data_buf, host->main_area0, 16); 538 memcpy_fromio(host->data_buf, host->main_area0, 16);
483 539
484 if (this->options & NAND_BUSWIDTH_16) { 540 if (this->options & NAND_BUSWIDTH_16) {
485 /* compress the ID info */ 541 /* compress the ID info */
@@ -555,7 +611,7 @@ static int mxc_nand_correct_data_v1(struct mtd_info *mtd, u_char *dat,
555 * additional correction. 2-Bit errors cannot be corrected by 611 * additional correction. 2-Bit errors cannot be corrected by
556 * HW ECC, so we need to return failure 612 * HW ECC, so we need to return failure
557 */ 613 */
558 uint16_t ecc_status = readw(NFC_V1_V2_ECC_STATUS_RESULT); 614 uint16_t ecc_status = get_ecc_status_v1(host);
559 615
560 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) { 616 if (((ecc_status & 0x3) == 2) || ((ecc_status >> 2) == 2)) {
561 pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n"); 617 pr_debug("MXC_NAND: HWECC uncorrectable 2-bit ECC error\n");
@@ -580,10 +636,7 @@ static int mxc_nand_correct_data_v2_v3(struct mtd_info *mtd, u_char *dat,
580 636
581 no_subpages = mtd->writesize >> 9; 637 no_subpages = mtd->writesize >> 9;
582 638
583 if (nfc_is_v21()) 639 ecc_stat = host->devtype_data->get_ecc_status(host);
584 ecc_stat = readl(NFC_V1_V2_ECC_STATUS_RESULT);
585 else
586 ecc_stat = readl(NFC_V3_ECC_STATUS_RESULT);
587 640
588 do { 641 do {
589 err = ecc_stat & ecc_bit_mask; 642 err = ecc_stat & ecc_bit_mask;
@@ -616,7 +669,7 @@ static u_char mxc_nand_read_byte(struct mtd_info *mtd)
616 669
617 /* Check for status request */ 670 /* Check for status request */
618 if (host->status_request) 671 if (host->status_request)
619 return host->get_dev_status(host) & 0xFF; 672 return host->devtype_data->get_dev_status(host) & 0xFF;
620 673
621 ret = *(uint8_t *)(host->data_buf + host->buf_start); 674 ret = *(uint8_t *)(host->data_buf + host->buf_start);
622 host->buf_start++; 675 host->buf_start++;
@@ -682,7 +735,7 @@ static int mxc_nand_verify_buf(struct mtd_info *mtd,
682 735
683/* This function is used by upper layer for select and 736/* This function is used by upper layer for select and
684 * deselect of the NAND chip */ 737 * deselect of the NAND chip */
685static void mxc_nand_select_chip(struct mtd_info *mtd, int chip) 738static void mxc_nand_select_chip_v1_v3(struct mtd_info *mtd, int chip)
686{ 739{
687 struct nand_chip *nand_chip = mtd->priv; 740 struct nand_chip *nand_chip = mtd->priv;
688 struct mxc_nand_host *host = nand_chip->priv; 741 struct mxc_nand_host *host = nand_chip->priv;
@@ -701,11 +754,30 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
701 clk_prepare_enable(host->clk); 754 clk_prepare_enable(host->clk);
702 host->clk_act = 1; 755 host->clk_act = 1;
703 } 756 }
757}
704 758
705 if (nfc_is_v21()) { 759static void mxc_nand_select_chip_v2(struct mtd_info *mtd, int chip)
706 host->active_cs = chip; 760{
707 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR); 761 struct nand_chip *nand_chip = mtd->priv;
762 struct mxc_nand_host *host = nand_chip->priv;
763
764 if (chip == -1) {
765 /* Disable the NFC clock */
766 if (host->clk_act) {
767 clk_disable(host->clk);
768 host->clk_act = 0;
769 }
770 return;
771 }
772
773 if (!host->clk_act) {
774 /* Enable the NFC clock */
775 clk_enable(host->clk);
776 host->clk_act = 1;
708 } 777 }
778
779 host->active_cs = chip;
780 writew(host->active_cs << 4, NFC_V1_V2_BUF_ADDR);
709} 781}
710 782
711/* 783/*
@@ -718,23 +790,23 @@ static void copy_spare(struct mtd_info *mtd, bool bfrom)
718 u16 i, j; 790 u16 i, j;
719 u16 n = mtd->writesize >> 9; 791 u16 n = mtd->writesize >> 9;
720 u8 *d = host->data_buf + mtd->writesize; 792 u8 *d = host->data_buf + mtd->writesize;
721 u8 *s = host->spare0; 793 u8 __iomem *s = host->spare0;
722 u16 t = host->spare_len; 794 u16 t = host->devtype_data->spare_len;
723 795
724 j = (mtd->oobsize / n >> 1) << 1; 796 j = (mtd->oobsize / n >> 1) << 1;
725 797
726 if (bfrom) { 798 if (bfrom) {
727 for (i = 0; i < n - 1; i++) 799 for (i = 0; i < n - 1; i++)
728 memcpy(d + i * j, s + i * t, j); 800 memcpy_fromio(d + i * j, s + i * t, j);
729 801
730 /* the last section */ 802 /* the last section */
731 memcpy(d + i * j, s + i * t, mtd->oobsize - i * j); 803 memcpy_fromio(d + i * j, s + i * t, mtd->oobsize - i * j);
732 } else { 804 } else {
733 for (i = 0; i < n - 1; i++) 805 for (i = 0; i < n - 1; i++)
734 memcpy(&s[i * t], &d[i * j], j); 806 memcpy_toio(&s[i * t], &d[i * j], j);
735 807
736 /* the last section */ 808 /* the last section */
737 memcpy(&s[i * t], &d[i * j], mtd->oobsize - i * j); 809 memcpy_toio(&s[i * t], &d[i * j], mtd->oobsize - i * j);
738 } 810 }
739} 811}
740 812
@@ -751,34 +823,44 @@ static void mxc_do_addr_cycle(struct mtd_info *mtd, int column, int page_addr)
751 * perform a read/write buf operation, the saved column 823 * perform a read/write buf operation, the saved column
752 * address is used to index into the full page. 824 * address is used to index into the full page.
753 */ 825 */
754 host->send_addr(host, 0, page_addr == -1); 826 host->devtype_data->send_addr(host, 0, page_addr == -1);
755 if (mtd->writesize > 512) 827 if (mtd->writesize > 512)
756 /* another col addr cycle for 2k page */ 828 /* another col addr cycle for 2k page */
757 host->send_addr(host, 0, false); 829 host->devtype_data->send_addr(host, 0, false);
758 } 830 }
759 831
760 /* Write out page address, if necessary */ 832 /* Write out page address, if necessary */
761 if (page_addr != -1) { 833 if (page_addr != -1) {
762 /* paddr_0 - p_addr_7 */ 834 /* paddr_0 - p_addr_7 */
763 host->send_addr(host, (page_addr & 0xff), false); 835 host->devtype_data->send_addr(host, (page_addr & 0xff), false);
764 836
765 if (mtd->writesize > 512) { 837 if (mtd->writesize > 512) {
766 if (mtd->size >= 0x10000000) { 838 if (mtd->size >= 0x10000000) {
767 /* paddr_8 - paddr_15 */ 839 /* paddr_8 - paddr_15 */
768 host->send_addr(host, (page_addr >> 8) & 0xff, false); 840 host->devtype_data->send_addr(host,
769 host->send_addr(host, (page_addr >> 16) & 0xff, true); 841 (page_addr >> 8) & 0xff,
842 false);
843 host->devtype_data->send_addr(host,
844 (page_addr >> 16) & 0xff,
845 true);
770 } else 846 } else
771 /* paddr_8 - paddr_15 */ 847 /* paddr_8 - paddr_15 */
772 host->send_addr(host, (page_addr >> 8) & 0xff, true); 848 host->devtype_data->send_addr(host,
849 (page_addr >> 8) & 0xff, true);
773 } else { 850 } else {
774 /* One more address cycle for higher density devices */ 851 /* One more address cycle for higher density devices */
775 if (mtd->size >= 0x4000000) { 852 if (mtd->size >= 0x4000000) {
776 /* paddr_8 - paddr_15 */ 853 /* paddr_8 - paddr_15 */
777 host->send_addr(host, (page_addr >> 8) & 0xff, false); 854 host->devtype_data->send_addr(host,
778 host->send_addr(host, (page_addr >> 16) & 0xff, true); 855 (page_addr >> 8) & 0xff,
856 false);
857 host->devtype_data->send_addr(host,
858 (page_addr >> 16) & 0xff,
859 true);
779 } else 860 } else
780 /* paddr_8 - paddr_15 */ 861 /* paddr_8 - paddr_15 */
781 host->send_addr(host, (page_addr >> 8) & 0xff, true); 862 host->devtype_data->send_addr(host,
863 (page_addr >> 8) & 0xff, true);
782 } 864 }
783 } 865 }
784} 866}
@@ -800,7 +882,35 @@ static int get_eccsize(struct mtd_info *mtd)
800 return 8; 882 return 8;
801} 883}
802 884
803static void preset_v1_v2(struct mtd_info *mtd) 885static void preset_v1(struct mtd_info *mtd)
886{
887 struct nand_chip *nand_chip = mtd->priv;
888 struct mxc_nand_host *host = nand_chip->priv;
889 uint16_t config1 = 0;
890
891 if (nand_chip->ecc.mode == NAND_ECC_HW)
892 config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
893
894 if (!host->devtype_data->irqpending_quirk)
895 config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
896
897 host->eccsize = 1;
898
899 writew(config1, NFC_V1_V2_CONFIG1);
900 /* preset operation */
901
902 /* Unlock the internal RAM Buffer */
903 writew(0x2, NFC_V1_V2_CONFIG);
904
905 /* Blocks to be unlocked */
906 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
907 writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
908
909 /* Unlock Block Command for given address range */
910 writew(0x4, NFC_V1_V2_WRPROT);
911}
912
913static void preset_v2(struct mtd_info *mtd)
804{ 914{
805 struct nand_chip *nand_chip = mtd->priv; 915 struct nand_chip *nand_chip = mtd->priv;
806 struct mxc_nand_host *host = nand_chip->priv; 916 struct mxc_nand_host *host = nand_chip->priv;
@@ -809,13 +919,12 @@ static void preset_v1_v2(struct mtd_info *mtd)
809 if (nand_chip->ecc.mode == NAND_ECC_HW) 919 if (nand_chip->ecc.mode == NAND_ECC_HW)
810 config1 |= NFC_V1_V2_CONFIG1_ECC_EN; 920 config1 |= NFC_V1_V2_CONFIG1_ECC_EN;
811 921
812 if (nfc_is_v21()) 922 config1 |= NFC_V2_CONFIG1_FP_INT;
813 config1 |= NFC_V2_CONFIG1_FP_INT;
814 923
815 if (!cpu_is_mx21()) 924 if (!host->devtype_data->irqpending_quirk)
816 config1 |= NFC_V1_V2_CONFIG1_INT_MSK; 925 config1 |= NFC_V1_V2_CONFIG1_INT_MSK;
817 926
818 if (nfc_is_v21() && mtd->writesize) { 927 if (mtd->writesize) {
819 uint16_t pages_per_block = mtd->erasesize / mtd->writesize; 928 uint16_t pages_per_block = mtd->erasesize / mtd->writesize;
820 929
821 host->eccsize = get_eccsize(mtd); 930 host->eccsize = get_eccsize(mtd);
@@ -834,20 +943,14 @@ static void preset_v1_v2(struct mtd_info *mtd)
834 writew(0x2, NFC_V1_V2_CONFIG); 943 writew(0x2, NFC_V1_V2_CONFIG);
835 944
836 /* Blocks to be unlocked */ 945 /* Blocks to be unlocked */
837 if (nfc_is_v21()) { 946 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0);
838 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR0); 947 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1);
839 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR1); 948 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2);
840 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR2); 949 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3);
841 writew(0x0, NFC_V21_UNLOCKSTART_BLKADDR3); 950 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0);
842 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR0); 951 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1);
843 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR1); 952 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2);
844 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR2); 953 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
845 writew(0xffff, NFC_V21_UNLOCKEND_BLKADDR3);
846 } else if (nfc_is_v1()) {
847 writew(0x0, NFC_V1_UNLOCKSTART_BLKADDR);
848 writew(0xffff, NFC_V1_UNLOCKEND_BLKADDR);
849 } else
850 BUG();
851 954
852 /* Unlock Block Command for given address range */ 955 /* Unlock Block Command for given address range */
853 writew(0x4, NFC_V1_V2_WRPROT); 956 writew(0x4, NFC_V1_V2_WRPROT);
@@ -937,15 +1040,15 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
937 /* Command pre-processing step */ 1040 /* Command pre-processing step */
938 switch (command) { 1041 switch (command) {
939 case NAND_CMD_RESET: 1042 case NAND_CMD_RESET:
940 host->preset(mtd); 1043 host->devtype_data->preset(mtd);
941 host->send_cmd(host, command, false); 1044 host->devtype_data->send_cmd(host, command, false);
942 break; 1045 break;
943 1046
944 case NAND_CMD_STATUS: 1047 case NAND_CMD_STATUS:
945 host->buf_start = 0; 1048 host->buf_start = 0;
946 host->status_request = true; 1049 host->status_request = true;
947 1050
948 host->send_cmd(host, command, true); 1051 host->devtype_data->send_cmd(host, command, true);
949 mxc_do_addr_cycle(mtd, column, page_addr); 1052 mxc_do_addr_cycle(mtd, column, page_addr);
950 break; 1053 break;
951 1054
@@ -958,15 +1061,16 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
958 1061
959 command = NAND_CMD_READ0; /* only READ0 is valid */ 1062 command = NAND_CMD_READ0; /* only READ0 is valid */
960 1063
961 host->send_cmd(host, command, false); 1064 host->devtype_data->send_cmd(host, command, false);
962 mxc_do_addr_cycle(mtd, column, page_addr); 1065 mxc_do_addr_cycle(mtd, column, page_addr);
963 1066
964 if (mtd->writesize > 512) 1067 if (mtd->writesize > 512)
965 host->send_cmd(host, NAND_CMD_READSTART, true); 1068 host->devtype_data->send_cmd(host,
1069 NAND_CMD_READSTART, true);
966 1070
967 host->send_page(mtd, NFC_OUTPUT); 1071 host->devtype_data->send_page(mtd, NFC_OUTPUT);
968 1072
969 memcpy(host->data_buf, host->main_area0, mtd->writesize); 1073 memcpy_fromio(host->data_buf, host->main_area0, mtd->writesize);
970 copy_spare(mtd, true); 1074 copy_spare(mtd, true);
971 break; 1075 break;
972 1076
@@ -977,28 +1081,28 @@ static void mxc_nand_command(struct mtd_info *mtd, unsigned command,
977 1081
978 host->buf_start = column; 1082 host->buf_start = column;
979 1083
980 host->send_cmd(host, command, false); 1084 host->devtype_data->send_cmd(host, command, false);
981 mxc_do_addr_cycle(mtd, column, page_addr); 1085 mxc_do_addr_cycle(mtd, column, page_addr);
982 break; 1086 break;
983 1087
984 case NAND_CMD_PAGEPROG: 1088 case NAND_CMD_PAGEPROG:
985 memcpy(host->main_area0, host->data_buf, mtd->writesize); 1089 memcpy_toio(host->main_area0, host->data_buf, mtd->writesize);
986 copy_spare(mtd, false); 1090 copy_spare(mtd, false);
987 host->send_page(mtd, NFC_INPUT); 1091 host->devtype_data->send_page(mtd, NFC_INPUT);
988 host->send_cmd(host, command, true); 1092 host->devtype_data->send_cmd(host, command, true);
989 mxc_do_addr_cycle(mtd, column, page_addr); 1093 mxc_do_addr_cycle(mtd, column, page_addr);
990 break; 1094 break;
991 1095
992 case NAND_CMD_READID: 1096 case NAND_CMD_READID:
993 host->send_cmd(host, command, true); 1097 host->devtype_data->send_cmd(host, command, true);
994 mxc_do_addr_cycle(mtd, column, page_addr); 1098 mxc_do_addr_cycle(mtd, column, page_addr);
995 host->send_read_id(host); 1099 host->devtype_data->send_read_id(host);
996 host->buf_start = column; 1100 host->buf_start = column;
997 break; 1101 break;
998 1102
999 case NAND_CMD_ERASE1: 1103 case NAND_CMD_ERASE1:
1000 case NAND_CMD_ERASE2: 1104 case NAND_CMD_ERASE2:
1001 host->send_cmd(host, command, false); 1105 host->devtype_data->send_cmd(host, command, false);
1002 mxc_do_addr_cycle(mtd, column, page_addr); 1106 mxc_do_addr_cycle(mtd, column, page_addr);
1003 1107
1004 break; 1108 break;
@@ -1032,15 +1136,191 @@ static struct nand_bbt_descr bbt_mirror_descr = {
1032 .pattern = mirror_pattern, 1136 .pattern = mirror_pattern,
1033}; 1137};
1034 1138
1139/* v1 + irqpending_quirk: i.MX21 */
1140static const struct mxc_nand_devtype_data imx21_nand_devtype_data = {
1141 .preset = preset_v1,
1142 .send_cmd = send_cmd_v1_v2,
1143 .send_addr = send_addr_v1_v2,
1144 .send_page = send_page_v1,
1145 .send_read_id = send_read_id_v1_v2,
1146 .get_dev_status = get_dev_status_v1_v2,
1147 .check_int = check_int_v1_v2,
1148 .irq_control = irq_control_v1_v2,
1149 .get_ecc_status = get_ecc_status_v1,
1150 .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
1151 .ecclayout_2k = &nandv1_hw_eccoob_largepage,
1152 .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
1153 .select_chip = mxc_nand_select_chip_v1_v3,
1154 .correct_data = mxc_nand_correct_data_v1,
1155 .irqpending_quirk = 1,
1156 .needs_ip = 0,
1157 .regs_offset = 0xe00,
1158 .spare0_offset = 0x800,
1159 .spare_len = 16,
1160 .eccbytes = 3,
1161 .eccsize = 1,
1162};
1163
1164/* v1 + !irqpending_quirk: i.MX27, i.MX31 */
1165static const struct mxc_nand_devtype_data imx27_nand_devtype_data = {
1166 .preset = preset_v1,
1167 .send_cmd = send_cmd_v1_v2,
1168 .send_addr = send_addr_v1_v2,
1169 .send_page = send_page_v1,
1170 .send_read_id = send_read_id_v1_v2,
1171 .get_dev_status = get_dev_status_v1_v2,
1172 .check_int = check_int_v1_v2,
1173 .irq_control = irq_control_v1_v2,
1174 .get_ecc_status = get_ecc_status_v1,
1175 .ecclayout_512 = &nandv1_hw_eccoob_smallpage,
1176 .ecclayout_2k = &nandv1_hw_eccoob_largepage,
1177 .ecclayout_4k = &nandv1_hw_eccoob_smallpage, /* XXX: needs fix */
1178 .select_chip = mxc_nand_select_chip_v1_v3,
1179 .correct_data = mxc_nand_correct_data_v1,
1180 .irqpending_quirk = 0,
1181 .needs_ip = 0,
1182 .regs_offset = 0xe00,
1183 .spare0_offset = 0x800,
1184 .axi_offset = 0,
1185 .spare_len = 16,
1186 .eccbytes = 3,
1187 .eccsize = 1,
1188};
1189
1190/* v21: i.MX25, i.MX35 */
1191static const struct mxc_nand_devtype_data imx25_nand_devtype_data = {
1192 .preset = preset_v2,
1193 .send_cmd = send_cmd_v1_v2,
1194 .send_addr = send_addr_v1_v2,
1195 .send_page = send_page_v2,
1196 .send_read_id = send_read_id_v1_v2,
1197 .get_dev_status = get_dev_status_v1_v2,
1198 .check_int = check_int_v1_v2,
1199 .irq_control = irq_control_v1_v2,
1200 .get_ecc_status = get_ecc_status_v2,
1201 .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
1202 .ecclayout_2k = &nandv2_hw_eccoob_largepage,
1203 .ecclayout_4k = &nandv2_hw_eccoob_4k,
1204 .select_chip = mxc_nand_select_chip_v2,
1205 .correct_data = mxc_nand_correct_data_v2_v3,
1206 .irqpending_quirk = 0,
1207 .needs_ip = 0,
1208 .regs_offset = 0x1e00,
1209 .spare0_offset = 0x1000,
1210 .axi_offset = 0,
1211 .spare_len = 64,
1212 .eccbytes = 9,
1213 .eccsize = 0,
1214};
1215
1216/* v3: i.MX51, i.MX53 */
1217static const struct mxc_nand_devtype_data imx51_nand_devtype_data = {
1218 .preset = preset_v3,
1219 .send_cmd = send_cmd_v3,
1220 .send_addr = send_addr_v3,
1221 .send_page = send_page_v3,
1222 .send_read_id = send_read_id_v3,
1223 .get_dev_status = get_dev_status_v3,
1224 .check_int = check_int_v3,
1225 .irq_control = irq_control_v3,
1226 .get_ecc_status = get_ecc_status_v3,
1227 .ecclayout_512 = &nandv2_hw_eccoob_smallpage,
1228 .ecclayout_2k = &nandv2_hw_eccoob_largepage,
1229 .ecclayout_4k = &nandv2_hw_eccoob_smallpage, /* XXX: needs fix */
1230 .select_chip = mxc_nand_select_chip_v1_v3,
1231 .correct_data = mxc_nand_correct_data_v2_v3,
1232 .irqpending_quirk = 0,
1233 .needs_ip = 1,
1234 .regs_offset = 0,
1235 .spare0_offset = 0x1000,
1236 .axi_offset = 0x1e00,
1237 .spare_len = 64,
1238 .eccbytes = 0,
1239 .eccsize = 0,
1240};
1241
1242#ifdef CONFIG_OF_MTD
1243static const struct of_device_id mxcnd_dt_ids[] = {
1244 {
1245 .compatible = "fsl,imx21-nand",
1246 .data = &imx21_nand_devtype_data,
1247 }, {
1248 .compatible = "fsl,imx27-nand",
1249 .data = &imx27_nand_devtype_data,
1250 }, {
1251 .compatible = "fsl,imx25-nand",
1252 .data = &imx25_nand_devtype_data,
1253 }, {
1254 .compatible = "fsl,imx51-nand",
1255 .data = &imx51_nand_devtype_data,
1256 },
1257 { /* sentinel */ }
1258};
1259
1260static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
1261{
1262 struct device_node *np = host->dev->of_node;
1263 struct mxc_nand_platform_data *pdata = &host->pdata;
1264 const struct of_device_id *of_id =
1265 of_match_device(mxcnd_dt_ids, host->dev);
1266 int buswidth;
1267
1268 if (!np)
1269 return 1;
1270
1271 if (of_get_nand_ecc_mode(np) >= 0)
1272 pdata->hw_ecc = 1;
1273
1274 pdata->flash_bbt = of_get_nand_on_flash_bbt(np);
1275
1276 buswidth = of_get_nand_bus_width(np);
1277 if (buswidth < 0)
1278 return buswidth;
1279
1280 pdata->width = buswidth / 8;
1281
1282 host->devtype_data = of_id->data;
1283
1284 return 0;
1285}
1286#else
1287static int __init mxcnd_probe_dt(struct mxc_nand_host *host)
1288{
1289 return 1;
1290}
1291#endif
1292
1293static int __init mxcnd_probe_pdata(struct mxc_nand_host *host)
1294{
1295 struct mxc_nand_platform_data *pdata = host->dev->platform_data;
1296
1297 if (!pdata)
1298 return -ENODEV;
1299
1300 host->pdata = *pdata;
1301
1302 if (nfc_is_v1()) {
1303 if (cpu_is_mx21())
1304 host->devtype_data = &imx21_nand_devtype_data;
1305 else
1306 host->devtype_data = &imx27_nand_devtype_data;
1307 } else if (nfc_is_v21()) {
1308 host->devtype_data = &imx25_nand_devtype_data;
1309 } else if (nfc_is_v3_2()) {
1310 host->devtype_data = &imx51_nand_devtype_data;
1311 } else
1312 BUG();
1313
1314 return 0;
1315}
1316
1035static int __init mxcnd_probe(struct platform_device *pdev) 1317static int __init mxcnd_probe(struct platform_device *pdev)
1036{ 1318{
1037 struct nand_chip *this; 1319 struct nand_chip *this;
1038 struct mtd_info *mtd; 1320 struct mtd_info *mtd;
1039 struct mxc_nand_platform_data *pdata = pdev->dev.platform_data;
1040 struct mxc_nand_host *host; 1321 struct mxc_nand_host *host;
1041 struct resource *res; 1322 struct resource *res;
1042 int err = 0; 1323 int err = 0;
1043 struct nand_ecclayout *oob_smallpage, *oob_largepage;
1044 1324
1045 /* Allocate memory for MTD device structure and private data */ 1325 /* Allocate memory for MTD device structure and private data */
1046 host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE + 1326 host = kzalloc(sizeof(struct mxc_nand_host) + NAND_MAX_PAGESIZE +
@@ -1065,7 +1345,6 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1065 this->priv = host; 1345 this->priv = host;
1066 this->dev_ready = mxc_nand_dev_ready; 1346 this->dev_ready = mxc_nand_dev_ready;
1067 this->cmdfunc = mxc_nand_command; 1347 this->cmdfunc = mxc_nand_command;
1068 this->select_chip = mxc_nand_select_chip;
1069 this->read_byte = mxc_nand_read_byte; 1348 this->read_byte = mxc_nand_read_byte;
1070 this->read_word = mxc_nand_read_word; 1349 this->read_word = mxc_nand_read_word;
1071 this->write_buf = mxc_nand_write_buf; 1350 this->write_buf = mxc_nand_write_buf;
@@ -1095,36 +1374,26 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1095 1374
1096 host->main_area0 = host->base; 1375 host->main_area0 = host->base;
1097 1376
1098 if (nfc_is_v1() || nfc_is_v21()) { 1377 err = mxcnd_probe_dt(host);
1099 host->preset = preset_v1_v2; 1378 if (err > 0)
1100 host->send_cmd = send_cmd_v1_v2; 1379 err = mxcnd_probe_pdata(host);
1101 host->send_addr = send_addr_v1_v2; 1380 if (err < 0)
1102 host->send_page = send_page_v1_v2; 1381 goto eirq;
1103 host->send_read_id = send_read_id_v1_v2;
1104 host->get_dev_status = get_dev_status_v1_v2;
1105 host->check_int = check_int_v1_v2;
1106 if (cpu_is_mx21())
1107 host->irq_control = irq_control_mx21;
1108 else
1109 host->irq_control = irq_control_v1_v2;
1110 }
1111 1382
1112 if (nfc_is_v21()) { 1383 if (host->devtype_data->regs_offset)
1113 host->regs = host->base + 0x1e00; 1384 host->regs = host->base + host->devtype_data->regs_offset;
1114 host->spare0 = host->base + 0x1000; 1385 host->spare0 = host->base + host->devtype_data->spare0_offset;
1115 host->spare_len = 64; 1386 if (host->devtype_data->axi_offset)
1116 oob_smallpage = &nandv2_hw_eccoob_smallpage; 1387 host->regs_axi = host->base + host->devtype_data->axi_offset;
1117 oob_largepage = &nandv2_hw_eccoob_largepage; 1388
1118 this->ecc.bytes = 9; 1389 this->ecc.bytes = host->devtype_data->eccbytes;
1119 } else if (nfc_is_v1()) { 1390 host->eccsize = host->devtype_data->eccsize;
1120 host->regs = host->base + 0xe00; 1391
1121 host->spare0 = host->base + 0x800; 1392 this->select_chip = host->devtype_data->select_chip;
1122 host->spare_len = 16; 1393 this->ecc.size = 512;
1123 oob_smallpage = &nandv1_hw_eccoob_smallpage; 1394 this->ecc.layout = host->devtype_data->ecclayout_512;
1124 oob_largepage = &nandv1_hw_eccoob_largepage; 1395
1125 this->ecc.bytes = 3; 1396 if (host->devtype_data->needs_ip) {
1126 host->eccsize = 1;
1127 } else if (nfc_is_v3_2()) {
1128 res = platform_get_resource(pdev, IORESOURCE_MEM, 1); 1397 res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1129 if (!res) { 1398 if (!res) {
1130 err = -ENODEV; 1399 err = -ENODEV;
@@ -1135,42 +1404,22 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1135 err = -ENOMEM; 1404 err = -ENOMEM;
1136 goto eirq; 1405 goto eirq;
1137 } 1406 }
1138 host->regs_axi = host->base + 0x1e00; 1407 }
1139 host->spare0 = host->base + 0x1000;
1140 host->spare_len = 64;
1141 host->preset = preset_v3;
1142 host->send_cmd = send_cmd_v3;
1143 host->send_addr = send_addr_v3;
1144 host->send_page = send_page_v3;
1145 host->send_read_id = send_read_id_v3;
1146 host->check_int = check_int_v3;
1147 host->get_dev_status = get_dev_status_v3;
1148 host->irq_control = irq_control_v3;
1149 oob_smallpage = &nandv2_hw_eccoob_smallpage;
1150 oob_largepage = &nandv2_hw_eccoob_largepage;
1151 } else
1152 BUG();
1153
1154 this->ecc.size = 512;
1155 this->ecc.layout = oob_smallpage;
1156 1408
1157 if (pdata->hw_ecc) { 1409 if (host->pdata.hw_ecc) {
1158 this->ecc.calculate = mxc_nand_calculate_ecc; 1410 this->ecc.calculate = mxc_nand_calculate_ecc;
1159 this->ecc.hwctl = mxc_nand_enable_hwecc; 1411 this->ecc.hwctl = mxc_nand_enable_hwecc;
1160 if (nfc_is_v1()) 1412 this->ecc.correct = host->devtype_data->correct_data;
1161 this->ecc.correct = mxc_nand_correct_data_v1;
1162 else
1163 this->ecc.correct = mxc_nand_correct_data_v2_v3;
1164 this->ecc.mode = NAND_ECC_HW; 1413 this->ecc.mode = NAND_ECC_HW;
1165 } else { 1414 } else {
1166 this->ecc.mode = NAND_ECC_SOFT; 1415 this->ecc.mode = NAND_ECC_SOFT;
1167 } 1416 }
1168 1417
1169 /* NAND bus width determines access funtions used by upper layer */ 1418 /* NAND bus width determines access functions used by upper layer */
1170 if (pdata->width == 2) 1419 if (host->pdata.width == 2)
1171 this->options |= NAND_BUSWIDTH_16; 1420 this->options |= NAND_BUSWIDTH_16;
1172 1421
1173 if (pdata->flash_bbt) { 1422 if (host->pdata.flash_bbt) {
1174 this->bbt_td = &bbt_main_descr; 1423 this->bbt_td = &bbt_main_descr;
1175 this->bbt_md = &bbt_mirror_descr; 1424 this->bbt_md = &bbt_mirror_descr;
1176 /* update flash based bbt */ 1425 /* update flash based bbt */
@@ -1182,28 +1431,25 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1182 host->irq = platform_get_irq(pdev, 0); 1431 host->irq = platform_get_irq(pdev, 0);
1183 1432
1184 /* 1433 /*
1185 * mask the interrupt. For i.MX21 explicitely call 1434 * Use host->devtype_data->irq_control() here instead of irq_control()
1186 * irq_control_v1_v2 to use the mask bit. We can't call 1435 * because we must not disable_irq_nosync without having requested the
1187 * disable_irq_nosync() for an interrupt we do not own yet. 1436 * irq.
1188 */ 1437 */
1189 if (cpu_is_mx21()) 1438 host->devtype_data->irq_control(host, 0);
1190 irq_control_v1_v2(host, 0);
1191 else
1192 host->irq_control(host, 0);
1193 1439
1194 err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host); 1440 err = request_irq(host->irq, mxc_nfc_irq, IRQF_DISABLED, DRIVER_NAME, host);
1195 if (err) 1441 if (err)
1196 goto eirq; 1442 goto eirq;
1197 1443
1198 host->irq_control(host, 0);
1199
1200 /* 1444 /*
1201 * Now that the interrupt is disabled make sure the interrupt 1445 * Now that we "own" the interrupt make sure the interrupt mask bit is
1202 * mask bit is cleared on i.MX21. Otherwise we can't read 1446 * cleared on i.MX21. Otherwise we can't read the interrupt status bit
1203 * the interrupt status bit on this machine. 1447 * on this machine.
1204 */ 1448 */
1205 if (cpu_is_mx21()) 1449 if (host->devtype_data->irqpending_quirk) {
1206 irq_control_v1_v2(host, 1); 1450 disable_irq_nosync(host->irq);
1451 host->devtype_data->irq_control(host, 1);
1452 }
1207 1453
1208 /* first scan to find the device and get the page size */ 1454 /* first scan to find the device and get the page size */
1209 if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) { 1455 if (nand_scan_ident(mtd, nfc_is_v21() ? 4 : 1, NULL)) {
@@ -1212,18 +1458,12 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1212 } 1458 }
1213 1459
1214 /* Call preset again, with correct writesize this time */ 1460 /* Call preset again, with correct writesize this time */
1215 host->preset(mtd); 1461 host->devtype_data->preset(mtd);
1216 1462
1217 if (mtd->writesize == 2048) 1463 if (mtd->writesize == 2048)
1218 this->ecc.layout = oob_largepage; 1464 this->ecc.layout = host->devtype_data->ecclayout_2k;
1219 if (nfc_is_v21() && mtd->writesize == 4096) 1465 else if (mtd->writesize == 4096)
1220 this->ecc.layout = &nandv2_hw_eccoob_4k; 1466 this->ecc.layout = host->devtype_data->ecclayout_4k;
1221
1222 /* second phase scan */
1223 if (nand_scan_tail(mtd)) {
1224 err = -ENXIO;
1225 goto escan;
1226 }
1227 1467
1228 if (this->ecc.mode == NAND_ECC_HW) { 1468 if (this->ecc.mode == NAND_ECC_HW) {
1229 if (nfc_is_v1()) 1469 if (nfc_is_v1())
@@ -1232,9 +1472,19 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1232 this->ecc.strength = (host->eccsize == 4) ? 4 : 8; 1472 this->ecc.strength = (host->eccsize == 4) ? 4 : 8;
1233 } 1473 }
1234 1474
1475 /* second phase scan */
1476 if (nand_scan_tail(mtd)) {
1477 err = -ENXIO;
1478 goto escan;
1479 }
1480
1235 /* Register the partitions */ 1481 /* Register the partitions */
1236 mtd_device_parse_register(mtd, part_probes, NULL, pdata->parts, 1482 mtd_device_parse_register(mtd, part_probes,
1237 pdata->nr_parts); 1483 &(struct mtd_part_parser_data){
1484 .of_node = pdev->dev.of_node,
1485 },
1486 host->pdata.parts,
1487 host->pdata.nr_parts);
1238 1488
1239 platform_set_drvdata(pdev, host); 1489 platform_set_drvdata(pdev, host);
1240 1490
@@ -1275,6 +1525,8 @@ static int __devexit mxcnd_remove(struct platform_device *pdev)
1275static struct platform_driver mxcnd_driver = { 1525static struct platform_driver mxcnd_driver = {
1276 .driver = { 1526 .driver = {
1277 .name = DRIVER_NAME, 1527 .name = DRIVER_NAME,
1528 .owner = THIS_MODULE,
1529 .of_match_table = of_match_ptr(mxcnd_dt_ids),
1278 }, 1530 },
1279 .remove = __devexit_p(mxcnd_remove), 1531 .remove = __devexit_p(mxcnd_remove),
1280}; 1532};
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 47b19c0bb070..d47586cf64ce 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1066,15 +1066,17 @@ EXPORT_SYMBOL(nand_lock);
1066 * @mtd: mtd info structure 1066 * @mtd: mtd info structure
1067 * @chip: nand chip info structure 1067 * @chip: nand chip info structure
1068 * @buf: buffer to store read data 1068 * @buf: buffer to store read data
1069 * @oob_required: caller requires OOB data read to chip->oob_poi
1069 * @page: page number to read 1070 * @page: page number to read
1070 * 1071 *
1071 * Not for syndrome calculating ECC controllers, which use a special oob layout. 1072 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1072 */ 1073 */
1073static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1074static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1074 uint8_t *buf, int page) 1075 uint8_t *buf, int oob_required, int page)
1075{ 1076{
1076 chip->read_buf(mtd, buf, mtd->writesize); 1077 chip->read_buf(mtd, buf, mtd->writesize);
1077 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1078 if (oob_required)
1079 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1078 return 0; 1080 return 0;
1079} 1081}
1080 1082
@@ -1083,13 +1085,14 @@ static int nand_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1083 * @mtd: mtd info structure 1085 * @mtd: mtd info structure
1084 * @chip: nand chip info structure 1086 * @chip: nand chip info structure
1085 * @buf: buffer to store read data 1087 * @buf: buffer to store read data
1088 * @oob_required: caller requires OOB data read to chip->oob_poi
1086 * @page: page number to read 1089 * @page: page number to read
1087 * 1090 *
1088 * We need a special oob layout and handling even when OOB isn't used. 1091 * We need a special oob layout and handling even when OOB isn't used.
1089 */ 1092 */
1090static int nand_read_page_raw_syndrome(struct mtd_info *mtd, 1093static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1091 struct nand_chip *chip, 1094 struct nand_chip *chip, uint8_t *buf,
1092 uint8_t *buf, int page) 1095 int oob_required, int page)
1093{ 1096{
1094 int eccsize = chip->ecc.size; 1097 int eccsize = chip->ecc.size;
1095 int eccbytes = chip->ecc.bytes; 1098 int eccbytes = chip->ecc.bytes;
@@ -1126,10 +1129,11 @@ static int nand_read_page_raw_syndrome(struct mtd_info *mtd,
1126 * @mtd: mtd info structure 1129 * @mtd: mtd info structure
1127 * @chip: nand chip info structure 1130 * @chip: nand chip info structure
1128 * @buf: buffer to store read data 1131 * @buf: buffer to store read data
1132 * @oob_required: caller requires OOB data read to chip->oob_poi
1129 * @page: page number to read 1133 * @page: page number to read
1130 */ 1134 */
1131static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1135static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1132 uint8_t *buf, int page) 1136 uint8_t *buf, int oob_required, int page)
1133{ 1137{
1134 int i, eccsize = chip->ecc.size; 1138 int i, eccsize = chip->ecc.size;
1135 int eccbytes = chip->ecc.bytes; 1139 int eccbytes = chip->ecc.bytes;
@@ -1138,8 +1142,9 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1138 uint8_t *ecc_calc = chip->buffers->ecccalc; 1142 uint8_t *ecc_calc = chip->buffers->ecccalc;
1139 uint8_t *ecc_code = chip->buffers->ecccode; 1143 uint8_t *ecc_code = chip->buffers->ecccode;
1140 uint32_t *eccpos = chip->ecc.layout->eccpos; 1144 uint32_t *eccpos = chip->ecc.layout->eccpos;
1145 unsigned int max_bitflips = 0;
1141 1146
1142 chip->ecc.read_page_raw(mtd, chip, buf, page); 1147 chip->ecc.read_page_raw(mtd, chip, buf, 1, page);
1143 1148
1144 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) 1149 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
1145 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 1150 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
@@ -1154,12 +1159,14 @@ static int nand_read_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1154 int stat; 1159 int stat;
1155 1160
1156 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 1161 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1157 if (stat < 0) 1162 if (stat < 0) {
1158 mtd->ecc_stats.failed++; 1163 mtd->ecc_stats.failed++;
1159 else 1164 } else {
1160 mtd->ecc_stats.corrected += stat; 1165 mtd->ecc_stats.corrected += stat;
1166 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1167 }
1161 } 1168 }
1162 return 0; 1169 return max_bitflips;
1163} 1170}
1164 1171
1165/** 1172/**
@@ -1180,6 +1187,7 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1180 int datafrag_len, eccfrag_len, aligned_len, aligned_pos; 1187 int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
1181 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1; 1188 int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
1182 int index = 0; 1189 int index = 0;
1190 unsigned int max_bitflips = 0;
1183 1191
1184 /* Column address within the page aligned to ECC size (256bytes) */ 1192 /* Column address within the page aligned to ECC size (256bytes) */
1185 start_step = data_offs / chip->ecc.size; 1193 start_step = data_offs / chip->ecc.size;
@@ -1244,12 +1252,14 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1244 1252
1245 stat = chip->ecc.correct(mtd, p, 1253 stat = chip->ecc.correct(mtd, p,
1246 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]); 1254 &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
1247 if (stat < 0) 1255 if (stat < 0) {
1248 mtd->ecc_stats.failed++; 1256 mtd->ecc_stats.failed++;
1249 else 1257 } else {
1250 mtd->ecc_stats.corrected += stat; 1258 mtd->ecc_stats.corrected += stat;
1259 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1260 }
1251 } 1261 }
1252 return 0; 1262 return max_bitflips;
1253} 1263}
1254 1264
1255/** 1265/**
@@ -1257,12 +1267,13 @@ static int nand_read_subpage(struct mtd_info *mtd, struct nand_chip *chip,
1257 * @mtd: mtd info structure 1267 * @mtd: mtd info structure
1258 * @chip: nand chip info structure 1268 * @chip: nand chip info structure
1259 * @buf: buffer to store read data 1269 * @buf: buffer to store read data
1270 * @oob_required: caller requires OOB data read to chip->oob_poi
1260 * @page: page number to read 1271 * @page: page number to read
1261 * 1272 *
1262 * Not for syndrome calculating ECC controllers which need a special oob layout. 1273 * Not for syndrome calculating ECC controllers which need a special oob layout.
1263 */ 1274 */
1264static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1275static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1265 uint8_t *buf, int page) 1276 uint8_t *buf, int oob_required, int page)
1266{ 1277{
1267 int i, eccsize = chip->ecc.size; 1278 int i, eccsize = chip->ecc.size;
1268 int eccbytes = chip->ecc.bytes; 1279 int eccbytes = chip->ecc.bytes;
@@ -1271,6 +1282,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1271 uint8_t *ecc_calc = chip->buffers->ecccalc; 1282 uint8_t *ecc_calc = chip->buffers->ecccalc;
1272 uint8_t *ecc_code = chip->buffers->ecccode; 1283 uint8_t *ecc_code = chip->buffers->ecccode;
1273 uint32_t *eccpos = chip->ecc.layout->eccpos; 1284 uint32_t *eccpos = chip->ecc.layout->eccpos;
1285 unsigned int max_bitflips = 0;
1274 1286
1275 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 1287 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1276 chip->ecc.hwctl(mtd, NAND_ECC_READ); 1288 chip->ecc.hwctl(mtd, NAND_ECC_READ);
@@ -1289,12 +1301,14 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1289 int stat; 1301 int stat;
1290 1302
1291 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]); 1303 stat = chip->ecc.correct(mtd, p, &ecc_code[i], &ecc_calc[i]);
1292 if (stat < 0) 1304 if (stat < 0) {
1293 mtd->ecc_stats.failed++; 1305 mtd->ecc_stats.failed++;
1294 else 1306 } else {
1295 mtd->ecc_stats.corrected += stat; 1307 mtd->ecc_stats.corrected += stat;
1308 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1309 }
1296 } 1310 }
1297 return 0; 1311 return max_bitflips;
1298} 1312}
1299 1313
1300/** 1314/**
@@ -1302,6 +1316,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1302 * @mtd: mtd info structure 1316 * @mtd: mtd info structure
1303 * @chip: nand chip info structure 1317 * @chip: nand chip info structure
1304 * @buf: buffer to store read data 1318 * @buf: buffer to store read data
1319 * @oob_required: caller requires OOB data read to chip->oob_poi
1305 * @page: page number to read 1320 * @page: page number to read
1306 * 1321 *
1307 * Hardware ECC for large page chips, require OOB to be read first. For this 1322 * Hardware ECC for large page chips, require OOB to be read first. For this
@@ -1311,7 +1326,7 @@ static int nand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
1311 * the data area, by overwriting the NAND manufacturer bad block markings. 1326 * the data area, by overwriting the NAND manufacturer bad block markings.
1312 */ 1327 */
1313static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd, 1328static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1314 struct nand_chip *chip, uint8_t *buf, int page) 1329 struct nand_chip *chip, uint8_t *buf, int oob_required, int page)
1315{ 1330{
1316 int i, eccsize = chip->ecc.size; 1331 int i, eccsize = chip->ecc.size;
1317 int eccbytes = chip->ecc.bytes; 1332 int eccbytes = chip->ecc.bytes;
@@ -1320,6 +1335,7 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1320 uint8_t *ecc_code = chip->buffers->ecccode; 1335 uint8_t *ecc_code = chip->buffers->ecccode;
1321 uint32_t *eccpos = chip->ecc.layout->eccpos; 1336 uint32_t *eccpos = chip->ecc.layout->eccpos;
1322 uint8_t *ecc_calc = chip->buffers->ecccalc; 1337 uint8_t *ecc_calc = chip->buffers->ecccalc;
1338 unsigned int max_bitflips = 0;
1323 1339
1324 /* Read the OOB area first */ 1340 /* Read the OOB area first */
1325 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page); 1341 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
@@ -1337,12 +1353,14 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1337 chip->ecc.calculate(mtd, p, &ecc_calc[i]); 1353 chip->ecc.calculate(mtd, p, &ecc_calc[i]);
1338 1354
1339 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL); 1355 stat = chip->ecc.correct(mtd, p, &ecc_code[i], NULL);
1340 if (stat < 0) 1356 if (stat < 0) {
1341 mtd->ecc_stats.failed++; 1357 mtd->ecc_stats.failed++;
1342 else 1358 } else {
1343 mtd->ecc_stats.corrected += stat; 1359 mtd->ecc_stats.corrected += stat;
1360 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1361 }
1344 } 1362 }
1345 return 0; 1363 return max_bitflips;
1346} 1364}
1347 1365
1348/** 1366/**
@@ -1350,19 +1368,21 @@ static int nand_read_page_hwecc_oob_first(struct mtd_info *mtd,
1350 * @mtd: mtd info structure 1368 * @mtd: mtd info structure
1351 * @chip: nand chip info structure 1369 * @chip: nand chip info structure
1352 * @buf: buffer to store read data 1370 * @buf: buffer to store read data
1371 * @oob_required: caller requires OOB data read to chip->oob_poi
1353 * @page: page number to read 1372 * @page: page number to read
1354 * 1373 *
1355 * The hw generator calculates the error syndrome automatically. Therefore we 1374 * The hw generator calculates the error syndrome automatically. Therefore we
1356 * need a special oob layout and handling. 1375 * need a special oob layout and handling.
1357 */ 1376 */
1358static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1377static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1359 uint8_t *buf, int page) 1378 uint8_t *buf, int oob_required, int page)
1360{ 1379{
1361 int i, eccsize = chip->ecc.size; 1380 int i, eccsize = chip->ecc.size;
1362 int eccbytes = chip->ecc.bytes; 1381 int eccbytes = chip->ecc.bytes;
1363 int eccsteps = chip->ecc.steps; 1382 int eccsteps = chip->ecc.steps;
1364 uint8_t *p = buf; 1383 uint8_t *p = buf;
1365 uint8_t *oob = chip->oob_poi; 1384 uint8_t *oob = chip->oob_poi;
1385 unsigned int max_bitflips = 0;
1366 1386
1367 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) { 1387 for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
1368 int stat; 1388 int stat;
@@ -1379,10 +1399,12 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1379 chip->read_buf(mtd, oob, eccbytes); 1399 chip->read_buf(mtd, oob, eccbytes);
1380 stat = chip->ecc.correct(mtd, p, oob, NULL); 1400 stat = chip->ecc.correct(mtd, p, oob, NULL);
1381 1401
1382 if (stat < 0) 1402 if (stat < 0) {
1383 mtd->ecc_stats.failed++; 1403 mtd->ecc_stats.failed++;
1384 else 1404 } else {
1385 mtd->ecc_stats.corrected += stat; 1405 mtd->ecc_stats.corrected += stat;
1406 max_bitflips = max_t(unsigned int, max_bitflips, stat);
1407 }
1386 1408
1387 oob += eccbytes; 1409 oob += eccbytes;
1388 1410
@@ -1397,7 +1419,7 @@ static int nand_read_page_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1397 if (i) 1419 if (i)
1398 chip->read_buf(mtd, oob, i); 1420 chip->read_buf(mtd, oob, i);
1399 1421
1400 return 0; 1422 return max_bitflips;
1401} 1423}
1402 1424
1403/** 1425/**
@@ -1459,11 +1481,9 @@ static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
1459static int nand_do_read_ops(struct mtd_info *mtd, loff_t from, 1481static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1460 struct mtd_oob_ops *ops) 1482 struct mtd_oob_ops *ops)
1461{ 1483{
1462 int chipnr, page, realpage, col, bytes, aligned; 1484 int chipnr, page, realpage, col, bytes, aligned, oob_required;
1463 struct nand_chip *chip = mtd->priv; 1485 struct nand_chip *chip = mtd->priv;
1464 struct mtd_ecc_stats stats; 1486 struct mtd_ecc_stats stats;
1465 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
1466 int sndcmd = 1;
1467 int ret = 0; 1487 int ret = 0;
1468 uint32_t readlen = ops->len; 1488 uint32_t readlen = ops->len;
1469 uint32_t oobreadlen = ops->ooblen; 1489 uint32_t oobreadlen = ops->ooblen;
@@ -1471,6 +1491,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1471 mtd->oobavail : mtd->oobsize; 1491 mtd->oobavail : mtd->oobsize;
1472 1492
1473 uint8_t *bufpoi, *oob, *buf; 1493 uint8_t *bufpoi, *oob, *buf;
1494 unsigned int max_bitflips = 0;
1474 1495
1475 stats = mtd->ecc_stats; 1496 stats = mtd->ecc_stats;
1476 1497
@@ -1484,6 +1505,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1484 1505
1485 buf = ops->datbuf; 1506 buf = ops->datbuf;
1486 oob = ops->oobbuf; 1507 oob = ops->oobbuf;
1508 oob_required = oob ? 1 : 0;
1487 1509
1488 while (1) { 1510 while (1) {
1489 bytes = min(mtd->writesize - col, readlen); 1511 bytes = min(mtd->writesize - col, readlen);
@@ -1493,21 +1515,22 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1493 if (realpage != chip->pagebuf || oob) { 1515 if (realpage != chip->pagebuf || oob) {
1494 bufpoi = aligned ? buf : chip->buffers->databuf; 1516 bufpoi = aligned ? buf : chip->buffers->databuf;
1495 1517
1496 if (likely(sndcmd)) { 1518 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1497 chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page);
1498 sndcmd = 0;
1499 }
1500 1519
1501 /* Now read the page into the buffer */ 1520 /*
1521 * Now read the page into the buffer. Absent an error,
1522 * the read methods return max bitflips per ecc step.
1523 */
1502 if (unlikely(ops->mode == MTD_OPS_RAW)) 1524 if (unlikely(ops->mode == MTD_OPS_RAW))
1503 ret = chip->ecc.read_page_raw(mtd, chip, 1525 ret = chip->ecc.read_page_raw(mtd, chip, bufpoi,
1504 bufpoi, page); 1526 oob_required,
1527 page);
1505 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob) 1528 else if (!aligned && NAND_SUBPAGE_READ(chip) && !oob)
1506 ret = chip->ecc.read_subpage(mtd, chip, 1529 ret = chip->ecc.read_subpage(mtd, chip,
1507 col, bytes, bufpoi); 1530 col, bytes, bufpoi);
1508 else 1531 else
1509 ret = chip->ecc.read_page(mtd, chip, bufpoi, 1532 ret = chip->ecc.read_page(mtd, chip, bufpoi,
1510 page); 1533 oob_required, page);
1511 if (ret < 0) { 1534 if (ret < 0) {
1512 if (!aligned) 1535 if (!aligned)
1513 /* Invalidate page cache */ 1536 /* Invalidate page cache */
@@ -1515,22 +1538,25 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1515 break; 1538 break;
1516 } 1539 }
1517 1540
1541 max_bitflips = max_t(unsigned int, max_bitflips, ret);
1542
1518 /* Transfer not aligned data */ 1543 /* Transfer not aligned data */
1519 if (!aligned) { 1544 if (!aligned) {
1520 if (!NAND_SUBPAGE_READ(chip) && !oob && 1545 if (!NAND_SUBPAGE_READ(chip) && !oob &&
1521 !(mtd->ecc_stats.failed - stats.failed) && 1546 !(mtd->ecc_stats.failed - stats.failed) &&
1522 (ops->mode != MTD_OPS_RAW)) 1547 (ops->mode != MTD_OPS_RAW)) {
1523 chip->pagebuf = realpage; 1548 chip->pagebuf = realpage;
1524 else 1549 chip->pagebuf_bitflips = ret;
1550 } else {
1525 /* Invalidate page cache */ 1551 /* Invalidate page cache */
1526 chip->pagebuf = -1; 1552 chip->pagebuf = -1;
1553 }
1527 memcpy(buf, chip->buffers->databuf + col, bytes); 1554 memcpy(buf, chip->buffers->databuf + col, bytes);
1528 } 1555 }
1529 1556
1530 buf += bytes; 1557 buf += bytes;
1531 1558
1532 if (unlikely(oob)) { 1559 if (unlikely(oob)) {
1533
1534 int toread = min(oobreadlen, max_oobsize); 1560 int toread = min(oobreadlen, max_oobsize);
1535 1561
1536 if (toread) { 1562 if (toread) {
@@ -1541,13 +1567,7 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1541 } 1567 }
1542 1568
1543 if (!(chip->options & NAND_NO_READRDY)) { 1569 if (!(chip->options & NAND_NO_READRDY)) {
1544 /* 1570 /* Apply delay or wait for ready/busy pin */
1545 * Apply delay or wait for ready/busy pin. Do
1546 * this before the AUTOINCR check, so no
1547 * problems arise if a chip which does auto
1548 * increment is marked as NOAUTOINCR by the
1549 * board driver.
1550 */
1551 if (!chip->dev_ready) 1571 if (!chip->dev_ready)
1552 udelay(chip->chip_delay); 1572 udelay(chip->chip_delay);
1553 else 1573 else
@@ -1556,6 +1576,8 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1556 } else { 1576 } else {
1557 memcpy(buf, chip->buffers->databuf + col, bytes); 1577 memcpy(buf, chip->buffers->databuf + col, bytes);
1558 buf += bytes; 1578 buf += bytes;
1579 max_bitflips = max_t(unsigned int, max_bitflips,
1580 chip->pagebuf_bitflips);
1559 } 1581 }
1560 1582
1561 readlen -= bytes; 1583 readlen -= bytes;
@@ -1575,26 +1597,19 @@ static int nand_do_read_ops(struct mtd_info *mtd, loff_t from,
1575 chip->select_chip(mtd, -1); 1597 chip->select_chip(mtd, -1);
1576 chip->select_chip(mtd, chipnr); 1598 chip->select_chip(mtd, chipnr);
1577 } 1599 }
1578
1579 /*
1580 * Check, if the chip supports auto page increment or if we
1581 * have hit a block boundary.
1582 */
1583 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
1584 sndcmd = 1;
1585 } 1600 }
1586 1601
1587 ops->retlen = ops->len - (size_t) readlen; 1602 ops->retlen = ops->len - (size_t) readlen;
1588 if (oob) 1603 if (oob)
1589 ops->oobretlen = ops->ooblen - oobreadlen; 1604 ops->oobretlen = ops->ooblen - oobreadlen;
1590 1605
1591 if (ret) 1606 if (ret < 0)
1592 return ret; 1607 return ret;
1593 1608
1594 if (mtd->ecc_stats.failed - stats.failed) 1609 if (mtd->ecc_stats.failed - stats.failed)
1595 return -EBADMSG; 1610 return -EBADMSG;
1596 1611
1597 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 1612 return max_bitflips;
1598} 1613}
1599 1614
1600/** 1615/**
@@ -1630,17 +1645,13 @@ static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
1630 * @mtd: mtd info structure 1645 * @mtd: mtd info structure
1631 * @chip: nand chip info structure 1646 * @chip: nand chip info structure
1632 * @page: page number to read 1647 * @page: page number to read
1633 * @sndcmd: flag whether to issue read command or not
1634 */ 1648 */
1635static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip, 1649static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1636 int page, int sndcmd) 1650 int page)
1637{ 1651{
1638 if (sndcmd) { 1652 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1639 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
1640 sndcmd = 0;
1641 }
1642 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 1653 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
1643 return sndcmd; 1654 return 0;
1644} 1655}
1645 1656
1646/** 1657/**
@@ -1649,10 +1660,9 @@ static int nand_read_oob_std(struct mtd_info *mtd, struct nand_chip *chip,
1649 * @mtd: mtd info structure 1660 * @mtd: mtd info structure
1650 * @chip: nand chip info structure 1661 * @chip: nand chip info structure
1651 * @page: page number to read 1662 * @page: page number to read
1652 * @sndcmd: flag whether to issue read command or not
1653 */ 1663 */
1654static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip, 1664static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1655 int page, int sndcmd) 1665 int page)
1656{ 1666{
1657 uint8_t *buf = chip->oob_poi; 1667 uint8_t *buf = chip->oob_poi;
1658 int length = mtd->oobsize; 1668 int length = mtd->oobsize;
@@ -1679,7 +1689,7 @@ static int nand_read_oob_syndrome(struct mtd_info *mtd, struct nand_chip *chip,
1679 if (length > 0) 1689 if (length > 0)
1680 chip->read_buf(mtd, bufpoi, length); 1690 chip->read_buf(mtd, bufpoi, length);
1681 1691
1682 return 1; 1692 return 0;
1683} 1693}
1684 1694
1685/** 1695/**
@@ -1775,13 +1785,13 @@ static int nand_write_oob_syndrome(struct mtd_info *mtd,
1775static int nand_do_read_oob(struct mtd_info *mtd, loff_t from, 1785static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1776 struct mtd_oob_ops *ops) 1786 struct mtd_oob_ops *ops)
1777{ 1787{
1778 int page, realpage, chipnr, sndcmd = 1; 1788 int page, realpage, chipnr;
1779 struct nand_chip *chip = mtd->priv; 1789 struct nand_chip *chip = mtd->priv;
1780 struct mtd_ecc_stats stats; 1790 struct mtd_ecc_stats stats;
1781 int blkcheck = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
1782 int readlen = ops->ooblen; 1791 int readlen = ops->ooblen;
1783 int len; 1792 int len;
1784 uint8_t *buf = ops->oobbuf; 1793 uint8_t *buf = ops->oobbuf;
1794 int ret = 0;
1785 1795
1786 pr_debug("%s: from = 0x%08Lx, len = %i\n", 1796 pr_debug("%s: from = 0x%08Lx, len = %i\n",
1787 __func__, (unsigned long long)from, readlen); 1797 __func__, (unsigned long long)from, readlen);
@@ -1817,20 +1827,18 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1817 1827
1818 while (1) { 1828 while (1) {
1819 if (ops->mode == MTD_OPS_RAW) 1829 if (ops->mode == MTD_OPS_RAW)
1820 sndcmd = chip->ecc.read_oob_raw(mtd, chip, page, sndcmd); 1830 ret = chip->ecc.read_oob_raw(mtd, chip, page);
1821 else 1831 else
1822 sndcmd = chip->ecc.read_oob(mtd, chip, page, sndcmd); 1832 ret = chip->ecc.read_oob(mtd, chip, page);
1833
1834 if (ret < 0)
1835 break;
1823 1836
1824 len = min(len, readlen); 1837 len = min(len, readlen);
1825 buf = nand_transfer_oob(chip, buf, ops, len); 1838 buf = nand_transfer_oob(chip, buf, ops, len);
1826 1839
1827 if (!(chip->options & NAND_NO_READRDY)) { 1840 if (!(chip->options & NAND_NO_READRDY)) {
1828 /* 1841 /* Apply delay or wait for ready/busy pin */
1829 * Apply delay or wait for ready/busy pin. Do this
1830 * before the AUTOINCR check, so no problems arise if a
1831 * chip which does auto increment is marked as
1832 * NOAUTOINCR by the board driver.
1833 */
1834 if (!chip->dev_ready) 1842 if (!chip->dev_ready)
1835 udelay(chip->chip_delay); 1843 udelay(chip->chip_delay);
1836 else 1844 else
@@ -1851,16 +1859,12 @@ static int nand_do_read_oob(struct mtd_info *mtd, loff_t from,
1851 chip->select_chip(mtd, -1); 1859 chip->select_chip(mtd, -1);
1852 chip->select_chip(mtd, chipnr); 1860 chip->select_chip(mtd, chipnr);
1853 } 1861 }
1854
1855 /*
1856 * Check, if the chip supports auto page increment or if we
1857 * have hit a block boundary.
1858 */
1859 if (!NAND_CANAUTOINCR(chip) || !(page & blkcheck))
1860 sndcmd = 1;
1861 } 1862 }
1862 1863
1863 ops->oobretlen = ops->ooblen; 1864 ops->oobretlen = ops->ooblen - readlen;
1865
1866 if (ret < 0)
1867 return ret;
1864 1868
1865 if (mtd->ecc_stats.failed - stats.failed) 1869 if (mtd->ecc_stats.failed - stats.failed)
1866 return -EBADMSG; 1870 return -EBADMSG;
@@ -1919,14 +1923,16 @@ out:
1919 * @mtd: mtd info structure 1923 * @mtd: mtd info structure
1920 * @chip: nand chip info structure 1924 * @chip: nand chip info structure
1921 * @buf: data buffer 1925 * @buf: data buffer
1926 * @oob_required: must write chip->oob_poi to OOB
1922 * 1927 *
1923 * Not for syndrome calculating ECC controllers, which use a special oob layout. 1928 * Not for syndrome calculating ECC controllers, which use a special oob layout.
1924 */ 1929 */
1925static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1930static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1926 const uint8_t *buf) 1931 const uint8_t *buf, int oob_required)
1927{ 1932{
1928 chip->write_buf(mtd, buf, mtd->writesize); 1933 chip->write_buf(mtd, buf, mtd->writesize);
1929 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 1934 if (oob_required)
1935 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
1930} 1936}
1931 1937
1932/** 1938/**
@@ -1934,12 +1940,13 @@ static void nand_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
1934 * @mtd: mtd info structure 1940 * @mtd: mtd info structure
1935 * @chip: nand chip info structure 1941 * @chip: nand chip info structure
1936 * @buf: data buffer 1942 * @buf: data buffer
1943 * @oob_required: must write chip->oob_poi to OOB
1937 * 1944 *
1938 * We need a special oob layout and handling even when ECC isn't checked. 1945 * We need a special oob layout and handling even when ECC isn't checked.
1939 */ 1946 */
1940static void nand_write_page_raw_syndrome(struct mtd_info *mtd, 1947static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
1941 struct nand_chip *chip, 1948 struct nand_chip *chip,
1942 const uint8_t *buf) 1949 const uint8_t *buf, int oob_required)
1943{ 1950{
1944 int eccsize = chip->ecc.size; 1951 int eccsize = chip->ecc.size;
1945 int eccbytes = chip->ecc.bytes; 1952 int eccbytes = chip->ecc.bytes;
@@ -1973,9 +1980,10 @@ static void nand_write_page_raw_syndrome(struct mtd_info *mtd,
1973 * @mtd: mtd info structure 1980 * @mtd: mtd info structure
1974 * @chip: nand chip info structure 1981 * @chip: nand chip info structure
1975 * @buf: data buffer 1982 * @buf: data buffer
1983 * @oob_required: must write chip->oob_poi to OOB
1976 */ 1984 */
1977static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip, 1985static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1978 const uint8_t *buf) 1986 const uint8_t *buf, int oob_required)
1979{ 1987{
1980 int i, eccsize = chip->ecc.size; 1988 int i, eccsize = chip->ecc.size;
1981 int eccbytes = chip->ecc.bytes; 1989 int eccbytes = chip->ecc.bytes;
@@ -1991,7 +1999,7 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1991 for (i = 0; i < chip->ecc.total; i++) 1999 for (i = 0; i < chip->ecc.total; i++)
1992 chip->oob_poi[eccpos[i]] = ecc_calc[i]; 2000 chip->oob_poi[eccpos[i]] = ecc_calc[i];
1993 2001
1994 chip->ecc.write_page_raw(mtd, chip, buf); 2002 chip->ecc.write_page_raw(mtd, chip, buf, 1);
1995} 2003}
1996 2004
1997/** 2005/**
@@ -1999,9 +2007,10 @@ static void nand_write_page_swecc(struct mtd_info *mtd, struct nand_chip *chip,
1999 * @mtd: mtd info structure 2007 * @mtd: mtd info structure
2000 * @chip: nand chip info structure 2008 * @chip: nand chip info structure
2001 * @buf: data buffer 2009 * @buf: data buffer
2010 * @oob_required: must write chip->oob_poi to OOB
2002 */ 2011 */
2003static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 2012static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2004 const uint8_t *buf) 2013 const uint8_t *buf, int oob_required)
2005{ 2014{
2006 int i, eccsize = chip->ecc.size; 2015 int i, eccsize = chip->ecc.size;
2007 int eccbytes = chip->ecc.bytes; 2016 int eccbytes = chip->ecc.bytes;
@@ -2027,12 +2036,14 @@ static void nand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
2027 * @mtd: mtd info structure 2036 * @mtd: mtd info structure
2028 * @chip: nand chip info structure 2037 * @chip: nand chip info structure
2029 * @buf: data buffer 2038 * @buf: data buffer
2039 * @oob_required: must write chip->oob_poi to OOB
2030 * 2040 *
2031 * The hw generator calculates the error syndrome automatically. Therefore we 2041 * The hw generator calculates the error syndrome automatically. Therefore we
2032 * need a special oob layout and handling. 2042 * need a special oob layout and handling.
2033 */ 2043 */
2034static void nand_write_page_syndrome(struct mtd_info *mtd, 2044static void nand_write_page_syndrome(struct mtd_info *mtd,
2035 struct nand_chip *chip, const uint8_t *buf) 2045 struct nand_chip *chip,
2046 const uint8_t *buf, int oob_required)
2036{ 2047{
2037 int i, eccsize = chip->ecc.size; 2048 int i, eccsize = chip->ecc.size;
2038 int eccbytes = chip->ecc.bytes; 2049 int eccbytes = chip->ecc.bytes;
@@ -2071,21 +2082,23 @@ static void nand_write_page_syndrome(struct mtd_info *mtd,
2071 * @mtd: MTD device structure 2082 * @mtd: MTD device structure
2072 * @chip: NAND chip descriptor 2083 * @chip: NAND chip descriptor
2073 * @buf: the data to write 2084 * @buf: the data to write
2085 * @oob_required: must write chip->oob_poi to OOB
2074 * @page: page number to write 2086 * @page: page number to write
2075 * @cached: cached programming 2087 * @cached: cached programming
2076 * @raw: use _raw version of write_page 2088 * @raw: use _raw version of write_page
2077 */ 2089 */
2078static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 2090static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2079 const uint8_t *buf, int page, int cached, int raw) 2091 const uint8_t *buf, int oob_required, int page,
2092 int cached, int raw)
2080{ 2093{
2081 int status; 2094 int status;
2082 2095
2083 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); 2096 chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page);
2084 2097
2085 if (unlikely(raw)) 2098 if (unlikely(raw))
2086 chip->ecc.write_page_raw(mtd, chip, buf); 2099 chip->ecc.write_page_raw(mtd, chip, buf, oob_required);
2087 else 2100 else
2088 chip->ecc.write_page(mtd, chip, buf); 2101 chip->ecc.write_page(mtd, chip, buf, oob_required);
2089 2102
2090 /* 2103 /*
2091 * Cached progamming disabled for now. Not sure if it's worth the 2104 * Cached progamming disabled for now. Not sure if it's worth the
@@ -2118,6 +2131,9 @@ static int nand_write_page(struct mtd_info *mtd, struct nand_chip *chip,
2118 2131
2119 if (chip->verify_buf(mtd, buf, mtd->writesize)) 2132 if (chip->verify_buf(mtd, buf, mtd->writesize))
2120 return -EIO; 2133 return -EIO;
2134
2135 /* Make sure the next page prog is preceded by a status read */
2136 chip->cmdfunc(mtd, NAND_CMD_STATUS, -1, -1);
2121#endif 2137#endif
2122 return 0; 2138 return 0;
2123} 2139}
@@ -2202,6 +2218,7 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2202 uint8_t *oob = ops->oobbuf; 2218 uint8_t *oob = ops->oobbuf;
2203 uint8_t *buf = ops->datbuf; 2219 uint8_t *buf = ops->datbuf;
2204 int ret, subpage; 2220 int ret, subpage;
2221 int oob_required = oob ? 1 : 0;
2205 2222
2206 ops->retlen = 0; 2223 ops->retlen = 0;
2207 if (!writelen) 2224 if (!writelen)
@@ -2264,8 +2281,8 @@ static int nand_do_write_ops(struct mtd_info *mtd, loff_t to,
2264 memset(chip->oob_poi, 0xff, mtd->oobsize); 2281 memset(chip->oob_poi, 0xff, mtd->oobsize);
2265 } 2282 }
2266 2283
2267 ret = chip->write_page(mtd, chip, wbuf, page, cached, 2284 ret = chip->write_page(mtd, chip, wbuf, oob_required, page,
2268 (ops->mode == MTD_OPS_RAW)); 2285 cached, (ops->mode == MTD_OPS_RAW));
2269 if (ret) 2286 if (ret)
2270 break; 2287 break;
2271 2288
@@ -2898,8 +2915,7 @@ static int nand_flash_detect_onfi(struct mtd_info *mtd, struct nand_chip *chip,
2898 *busw = NAND_BUSWIDTH_16; 2915 *busw = NAND_BUSWIDTH_16;
2899 2916
2900 chip->options &= ~NAND_CHIPOPTIONS_MSK; 2917 chip->options &= ~NAND_CHIPOPTIONS_MSK;
2901 chip->options |= (NAND_NO_READRDY | 2918 chip->options |= NAND_NO_READRDY & NAND_CHIPOPTIONS_MSK;
2902 NAND_NO_AUTOINCR) & NAND_CHIPOPTIONS_MSK;
2903 2919
2904 pr_info("ONFI flash detected\n"); 2920 pr_info("ONFI flash detected\n");
2905 return 1; 2921 return 1;
@@ -3076,11 +3092,6 @@ static struct nand_flash_dev *nand_get_flash_type(struct mtd_info *mtd,
3076 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS; 3092 chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
3077ident_done: 3093ident_done:
3078 3094
3079 /*
3080 * Set chip as a default. Board drivers can override it, if necessary.
3081 */
3082 chip->options |= NAND_NO_AUTOINCR;
3083
3084 /* Try to identify manufacturer */ 3095 /* Try to identify manufacturer */
3085 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) { 3096 for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
3086 if (nand_manuf_ids[maf_idx].id == *maf_id) 3097 if (nand_manuf_ids[maf_idx].id == *maf_id)
@@ -3154,10 +3165,11 @@ ident_done:
3154 if (mtd->writesize > 512 && chip->cmdfunc == nand_command) 3165 if (mtd->writesize > 512 && chip->cmdfunc == nand_command)
3155 chip->cmdfunc = nand_command_lp; 3166 chip->cmdfunc = nand_command_lp;
3156 3167
3157 pr_info("NAND device: Manufacturer ID:" 3168 pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
3158 " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id, *dev_id, 3169 " page size: %d, OOB size: %d\n",
3159 nand_manuf_ids[maf_idx].name, 3170 *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
3160 chip->onfi_version ? chip->onfi_params.model : type->name); 3171 chip->onfi_version ? chip->onfi_params.model : type->name,
3172 mtd->writesize, mtd->oobsize);
3161 3173
3162 return type; 3174 return type;
3163} 3175}
@@ -3329,8 +3341,13 @@ int nand_scan_tail(struct mtd_info *mtd)
3329 if (!chip->ecc.write_oob) 3341 if (!chip->ecc.write_oob)
3330 chip->ecc.write_oob = nand_write_oob_syndrome; 3342 chip->ecc.write_oob = nand_write_oob_syndrome;
3331 3343
3332 if (mtd->writesize >= chip->ecc.size) 3344 if (mtd->writesize >= chip->ecc.size) {
3345 if (!chip->ecc.strength) {
3346 pr_warn("Driver must set ecc.strength when using hardware ECC\n");
3347 BUG();
3348 }
3333 break; 3349 break;
3350 }
3334 pr_warn("%d byte HW ECC not possible on " 3351 pr_warn("%d byte HW ECC not possible on "
3335 "%d byte page size, fallback to SW ECC\n", 3352 "%d byte page size, fallback to SW ECC\n",
3336 chip->ecc.size, mtd->writesize); 3353 chip->ecc.size, mtd->writesize);
@@ -3385,7 +3402,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3385 BUG(); 3402 BUG();
3386 } 3403 }
3387 chip->ecc.strength = 3404 chip->ecc.strength =
3388 chip->ecc.bytes*8 / fls(8*chip->ecc.size); 3405 chip->ecc.bytes * 8 / fls(8 * chip->ecc.size);
3389 break; 3406 break;
3390 3407
3391 case NAND_ECC_NONE: 3408 case NAND_ECC_NONE:
@@ -3483,7 +3500,7 @@ int nand_scan_tail(struct mtd_info *mtd)
3483 3500
3484 /* propagate ecc info to mtd_info */ 3501 /* propagate ecc info to mtd_info */
3485 mtd->ecclayout = chip->ecc.layout; 3502 mtd->ecclayout = chip->ecc.layout;
3486 mtd->ecc_strength = chip->ecc.strength * chip->ecc.steps; 3503 mtd->ecc_strength = chip->ecc.strength;
3487 3504
3488 /* Check, if we should skip the bad block table scan */ 3505 /* Check, if we should skip the bad block table scan */
3489 if (chip->options & NAND_SKIP_BBTSCAN) 3506 if (chip->options & NAND_SKIP_BBTSCAN)
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 20a112f591fe..30d1319ff065 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -324,6 +324,7 @@ static int scan_read_raw_oob(struct mtd_info *mtd, uint8_t *buf, loff_t offs,
324 324
325 buf += mtd->oobsize + mtd->writesize; 325 buf += mtd->oobsize + mtd->writesize;
326 len -= mtd->writesize; 326 len -= mtd->writesize;
327 offs += mtd->writesize;
327 } 328 }
328 return 0; 329 return 0;
329} 330}
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index af4fe8ca7b5e..621b70b7a159 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -70,7 +70,7 @@ struct nand_flash_dev nand_flash_ids[] = {
70 * These are the new chips with large page size. The pagesize and the 70 * These are the new chips with large page size. The pagesize and the
71 * erasesize is determined from the extended id bytes 71 * erasesize is determined from the extended id bytes
72 */ 72 */
73#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY | NAND_NO_AUTOINCR) 73#define LP_OPTIONS (NAND_SAMSUNG_LP_OPTIONS | NAND_NO_READRDY)
74#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16) 74#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
75 75
76 /* 512 Megabit */ 76 /* 512 Megabit */
@@ -157,9 +157,7 @@ struct nand_flash_dev nand_flash_ids[] = {
157 * writes possible, but not implemented now 157 * writes possible, but not implemented now
158 */ 158 */
159 {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, 159 {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000,
160 NAND_IS_AND | NAND_NO_AUTOINCR |NAND_NO_READRDY | NAND_4PAGE_ARRAY | 160 NAND_IS_AND | NAND_NO_READRDY | NAND_4PAGE_ARRAY | BBT_AUTO_REFRESH},
161 BBT_AUTO_REFRESH
162 },
163 161
164 {NULL,} 162 {NULL,}
165}; 163};
diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 261f478f8cc3..6cc8fbfabb8e 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -268,7 +268,6 @@ MODULE_PARM_DESC(bch, "Enable BCH ecc and set how many bits should "
268#define OPT_PAGE512 0x00000002 /* 512-byte page chips */ 268#define OPT_PAGE512 0x00000002 /* 512-byte page chips */
269#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */ 269#define OPT_PAGE2048 0x00000008 /* 2048-byte page chips */
270#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */ 270#define OPT_SMARTMEDIA 0x00000010 /* SmartMedia technology chips */
271#define OPT_AUTOINCR 0x00000020 /* page number auto incrementation is possible */
272#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */ 271#define OPT_PAGE512_8BIT 0x00000040 /* 512-byte page chips with 8-bit bus width */
273#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */ 272#define OPT_PAGE4096 0x00000080 /* 4096-byte page chips */
274#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */ 273#define OPT_LARGEPAGE (OPT_PAGE2048 | OPT_PAGE4096) /* 2048 & 4096-byte page chips */
@@ -594,7 +593,7 @@ static int init_nandsim(struct mtd_info *mtd)
594 ns->options |= OPT_PAGE256; 593 ns->options |= OPT_PAGE256;
595 } 594 }
596 else if (ns->geom.pgsz == 512) { 595 else if (ns->geom.pgsz == 512) {
597 ns->options |= (OPT_PAGE512 | OPT_AUTOINCR); 596 ns->options |= OPT_PAGE512;
598 if (ns->busw == 8) 597 if (ns->busw == 8)
599 ns->options |= OPT_PAGE512_8BIT; 598 ns->options |= OPT_PAGE512_8BIT;
600 } else if (ns->geom.pgsz == 2048) { 599 } else if (ns->geom.pgsz == 2048) {
@@ -663,8 +662,6 @@ static int init_nandsim(struct mtd_info *mtd)
663 for (i = 0; nand_flash_ids[i].name != NULL; i++) { 662 for (i = 0; nand_flash_ids[i].name != NULL; i++) {
664 if (second_id_byte != nand_flash_ids[i].id) 663 if (second_id_byte != nand_flash_ids[i].id)
665 continue; 664 continue;
666 if (!(nand_flash_ids[i].options & NAND_NO_AUTOINCR))
667 ns->options |= OPT_AUTOINCR;
668 } 665 }
669 666
670 if (ns->busw == 16) 667 if (ns->busw == 16)
@@ -1936,20 +1933,8 @@ static u_char ns_nand_read_byte(struct mtd_info *mtd)
1936 if (ns->regs.count == ns->regs.num) { 1933 if (ns->regs.count == ns->regs.num) {
1937 NS_DBG("read_byte: all bytes were read\n"); 1934 NS_DBG("read_byte: all bytes were read\n");
1938 1935
1939 /* 1936 if (NS_STATE(ns->nxstate) == STATE_READY)
1940 * The OPT_AUTOINCR allows to read next consecutive pages without
1941 * new read operation cycle.
1942 */
1943 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) {
1944 ns->regs.count = 0;
1945 if (ns->regs.row + 1 < ns->geom.pgnum)
1946 ns->regs.row += 1;
1947 NS_DBG("read_byte: switch to the next page (%#x)\n", ns->regs.row);
1948 do_state_action(ns, ACTION_CPY);
1949 }
1950 else if (NS_STATE(ns->nxstate) == STATE_READY)
1951 switch_state(ns); 1937 switch_state(ns);
1952
1953 } 1938 }
1954 1939
1955 return outb; 1940 return outb;
@@ -2203,14 +2188,7 @@ static void ns_nand_read_buf(struct mtd_info *mtd, u_char *buf, int len)
2203 ns->regs.count += len; 2188 ns->regs.count += len;
2204 2189
2205 if (ns->regs.count == ns->regs.num) { 2190 if (ns->regs.count == ns->regs.num) {
2206 if ((ns->options & OPT_AUTOINCR) && NS_STATE(ns->state) == STATE_DATAOUT) { 2191 if (NS_STATE(ns->nxstate) == STATE_READY)
2207 ns->regs.count = 0;
2208 if (ns->regs.row + 1 < ns->geom.pgnum)
2209 ns->regs.row += 1;
2210 NS_DBG("read_buf: switch to the next page (%#x)\n", ns->regs.row);
2211 do_state_action(ns, ACTION_CPY);
2212 }
2213 else if (NS_STATE(ns->nxstate) == STATE_READY)
2214 switch_state(ns); 2192 switch_state(ns);
2215 } 2193 }
2216 2194
diff --git a/drivers/mtd/nand/omap2.c b/drivers/mtd/nand/omap2.c
index c2b0bba9d8b3..d7f681d0c9b9 100644
--- a/drivers/mtd/nand/omap2.c
+++ b/drivers/mtd/nand/omap2.c
@@ -21,6 +21,10 @@
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/slab.h> 22#include <linux/slab.h>
23 23
24#ifdef CONFIG_MTD_NAND_OMAP_BCH
25#include <linux/bch.h>
26#endif
27
24#include <plat/dma.h> 28#include <plat/dma.h>
25#include <plat/gpmc.h> 29#include <plat/gpmc.h>
26#include <plat/nand.h> 30#include <plat/nand.h>
@@ -127,6 +131,11 @@ struct omap_nand_info {
127 } iomode; 131 } iomode;
128 u_char *buf; 132 u_char *buf;
129 int buf_len; 133 int buf_len;
134
135#ifdef CONFIG_MTD_NAND_OMAP_BCH
136 struct bch_control *bch;
137 struct nand_ecclayout ecclayout;
138#endif
130}; 139};
131 140
132/** 141/**
@@ -402,7 +411,7 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
402 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write); 411 PREFETCH_FIFOTHRESHOLD_MAX, 0x1, len, is_write);
403 if (ret) 412 if (ret)
404 /* PFPW engine is busy, use cpu copy method */ 413 /* PFPW engine is busy, use cpu copy method */
405 goto out_copy; 414 goto out_copy_unmap;
406 415
407 init_completion(&info->comp); 416 init_completion(&info->comp);
408 417
@@ -421,6 +430,8 @@ static inline int omap_nand_dma_transfer(struct mtd_info *mtd, void *addr,
421 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir); 430 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
422 return 0; 431 return 0;
423 432
433out_copy_unmap:
434 dma_unmap_single(&info->pdev->dev, dma_addr, len, dir);
424out_copy: 435out_copy:
425 if (info->nand.options & NAND_BUSWIDTH_16) 436 if (info->nand.options & NAND_BUSWIDTH_16)
426 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len) 437 is_write == 0 ? omap_read_buf16(mtd, (u_char *) addr, len)
@@ -879,7 +890,7 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
879 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 890 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
880 mtd); 891 mtd);
881 unsigned long timeo = jiffies; 892 unsigned long timeo = jiffies;
882 int status = NAND_STATUS_FAIL, state = this->state; 893 int status, state = this->state;
883 894
884 if (state == FL_ERASING) 895 if (state == FL_ERASING)
885 timeo += (HZ * 400) / 1000; 896 timeo += (HZ * 400) / 1000;
@@ -894,6 +905,8 @@ static int omap_wait(struct mtd_info *mtd, struct nand_chip *chip)
894 break; 905 break;
895 cond_resched(); 906 cond_resched();
896 } 907 }
908
909 status = gpmc_nand_read(info->gpmc_cs, GPMC_NAND_DATA);
897 return status; 910 return status;
898} 911}
899 912
@@ -925,6 +938,226 @@ static int omap_dev_ready(struct mtd_info *mtd)
925 return 1; 938 return 1;
926} 939}
927 940
941#ifdef CONFIG_MTD_NAND_OMAP_BCH
942
943/**
944 * omap3_enable_hwecc_bch - Program OMAP3 GPMC to perform BCH ECC correction
945 * @mtd: MTD device structure
946 * @mode: Read/Write mode
947 */
948static void omap3_enable_hwecc_bch(struct mtd_info *mtd, int mode)
949{
950 int nerrors;
951 unsigned int dev_width;
952 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
953 mtd);
954 struct nand_chip *chip = mtd->priv;
955
956 nerrors = (info->nand.ecc.bytes == 13) ? 8 : 4;
957 dev_width = (chip->options & NAND_BUSWIDTH_16) ? 1 : 0;
958 /*
959 * Program GPMC to perform correction on one 512-byte sector at a time.
960 * Using 4 sectors at a time (i.e. ecc.size = 2048) is also possible and
961 * gives a slight (5%) performance gain (but requires additional code).
962 */
963 (void)gpmc_enable_hwecc_bch(info->gpmc_cs, mode, dev_width, 1, nerrors);
964}
965
966/**
967 * omap3_calculate_ecc_bch4 - Generate 7 bytes of ECC bytes
968 * @mtd: MTD device structure
969 * @dat: The pointer to data on which ecc is computed
970 * @ecc_code: The ecc_code buffer
971 */
972static int omap3_calculate_ecc_bch4(struct mtd_info *mtd, const u_char *dat,
973 u_char *ecc_code)
974{
975 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
976 mtd);
977 return gpmc_calculate_ecc_bch4(info->gpmc_cs, dat, ecc_code);
978}
979
980/**
981 * omap3_calculate_ecc_bch8 - Generate 13 bytes of ECC bytes
982 * @mtd: MTD device structure
983 * @dat: The pointer to data on which ecc is computed
984 * @ecc_code: The ecc_code buffer
985 */
986static int omap3_calculate_ecc_bch8(struct mtd_info *mtd, const u_char *dat,
987 u_char *ecc_code)
988{
989 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
990 mtd);
991 return gpmc_calculate_ecc_bch8(info->gpmc_cs, dat, ecc_code);
992}
993
994/**
995 * omap3_correct_data_bch - Decode received data and correct errors
996 * @mtd: MTD device structure
997 * @data: page data
998 * @read_ecc: ecc read from nand flash
999 * @calc_ecc: ecc read from HW ECC registers
1000 */
1001static int omap3_correct_data_bch(struct mtd_info *mtd, u_char *data,
1002 u_char *read_ecc, u_char *calc_ecc)
1003{
1004 int i, count;
1005 /* cannot correct more than 8 errors */
1006 unsigned int errloc[8];
1007 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1008 mtd);
1009
1010 count = decode_bch(info->bch, NULL, 512, read_ecc, calc_ecc, NULL,
1011 errloc);
1012 if (count > 0) {
1013 /* correct errors */
1014 for (i = 0; i < count; i++) {
1015 /* correct data only, not ecc bytes */
1016 if (errloc[i] < 8*512)
1017 data[errloc[i]/8] ^= 1 << (errloc[i] & 7);
1018 pr_debug("corrected bitflip %u\n", errloc[i]);
1019 }
1020 } else if (count < 0) {
1021 pr_err("ecc unrecoverable error\n");
1022 }
1023 return count;
1024}
1025
1026/**
1027 * omap3_free_bch - Release BCH ecc resources
1028 * @mtd: MTD device structure
1029 */
1030static void omap3_free_bch(struct mtd_info *mtd)
1031{
1032 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1033 mtd);
1034 if (info->bch) {
1035 free_bch(info->bch);
1036 info->bch = NULL;
1037 }
1038}
1039
1040/**
1041 * omap3_init_bch - Initialize BCH ECC
1042 * @mtd: MTD device structure
1043 * @ecc_opt: OMAP ECC mode (OMAP_ECC_BCH4_CODE_HW or OMAP_ECC_BCH8_CODE_HW)
1044 */
1045static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1046{
1047 int ret, max_errors;
1048 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1049 mtd);
1050#ifdef CONFIG_MTD_NAND_OMAP_BCH8
1051 const int hw_errors = 8;
1052#else
1053 const int hw_errors = 4;
1054#endif
1055 info->bch = NULL;
1056
1057 max_errors = (ecc_opt == OMAP_ECC_BCH8_CODE_HW) ? 8 : 4;
1058 if (max_errors != hw_errors) {
1059 pr_err("cannot configure %d-bit BCH ecc, only %d-bit supported",
1060 max_errors, hw_errors);
1061 goto fail;
1062 }
1063
1064 /* initialize GPMC BCH engine */
1065 ret = gpmc_init_hwecc_bch(info->gpmc_cs, 1, max_errors);
1066 if (ret)
1067 goto fail;
1068
1069 /* software bch library is only used to detect and locate errors */
1070 info->bch = init_bch(13, max_errors, 0x201b /* hw polynomial */);
1071 if (!info->bch)
1072 goto fail;
1073
1074 info->nand.ecc.size = 512;
1075 info->nand.ecc.hwctl = omap3_enable_hwecc_bch;
1076 info->nand.ecc.correct = omap3_correct_data_bch;
1077 info->nand.ecc.mode = NAND_ECC_HW;
1078
1079 /*
1080 * The number of corrected errors in an ecc block that will trigger
1081 * block scrubbing defaults to the ecc strength (4 or 8).
1082 * Set mtd->bitflip_threshold here to define a custom threshold.
1083 */
1084
1085 if (max_errors == 8) {
1086 info->nand.ecc.strength = 8;
1087 info->nand.ecc.bytes = 13;
1088 info->nand.ecc.calculate = omap3_calculate_ecc_bch8;
1089 } else {
1090 info->nand.ecc.strength = 4;
1091 info->nand.ecc.bytes = 7;
1092 info->nand.ecc.calculate = omap3_calculate_ecc_bch4;
1093 }
1094
1095 pr_info("enabling NAND BCH ecc with %d-bit correction\n", max_errors);
1096 return 0;
1097fail:
1098 omap3_free_bch(mtd);
1099 return -1;
1100}
1101
1102/**
1103 * omap3_init_bch_tail - Build an oob layout for BCH ECC correction.
1104 * @mtd: MTD device structure
1105 */
1106static int omap3_init_bch_tail(struct mtd_info *mtd)
1107{
1108 int i, steps;
1109 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1110 mtd);
1111 struct nand_ecclayout *layout = &info->ecclayout;
1112
1113 /* build oob layout */
1114 steps = mtd->writesize/info->nand.ecc.size;
1115 layout->eccbytes = steps*info->nand.ecc.bytes;
1116
1117 /* do not bother creating special oob layouts for small page devices */
1118 if (mtd->oobsize < 64) {
1119 pr_err("BCH ecc is not supported on small page devices\n");
1120 goto fail;
1121 }
1122
1123 /* reserve 2 bytes for bad block marker */
1124 if (layout->eccbytes+2 > mtd->oobsize) {
1125 pr_err("no oob layout available for oobsize %d eccbytes %u\n",
1126 mtd->oobsize, layout->eccbytes);
1127 goto fail;
1128 }
1129
1130 /* put ecc bytes at oob tail */
1131 for (i = 0; i < layout->eccbytes; i++)
1132 layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
1133
1134 layout->oobfree[0].offset = 2;
1135 layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
1136 info->nand.ecc.layout = layout;
1137
1138 if (!(info->nand.options & NAND_BUSWIDTH_16))
1139 info->nand.badblock_pattern = &bb_descrip_flashbased;
1140 return 0;
1141fail:
1142 omap3_free_bch(mtd);
1143 return -1;
1144}
1145
1146#else
1147static int omap3_init_bch(struct mtd_info *mtd, int ecc_opt)
1148{
1149 pr_err("CONFIG_MTD_NAND_OMAP_BCH is not enabled\n");
1150 return -1;
1151}
1152static int omap3_init_bch_tail(struct mtd_info *mtd)
1153{
1154 return -1;
1155}
1156static void omap3_free_bch(struct mtd_info *mtd)
1157{
1158}
1159#endif /* CONFIG_MTD_NAND_OMAP_BCH */
1160
928static int __devinit omap_nand_probe(struct platform_device *pdev) 1161static int __devinit omap_nand_probe(struct platform_device *pdev)
929{ 1162{
930 struct omap_nand_info *info; 1163 struct omap_nand_info *info;
@@ -1063,6 +1296,13 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1063 info->nand.ecc.hwctl = omap_enable_hwecc; 1296 info->nand.ecc.hwctl = omap_enable_hwecc;
1064 info->nand.ecc.correct = omap_correct_data; 1297 info->nand.ecc.correct = omap_correct_data;
1065 info->nand.ecc.mode = NAND_ECC_HW; 1298 info->nand.ecc.mode = NAND_ECC_HW;
1299 } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
1300 (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
1301 err = omap3_init_bch(&info->mtd, pdata->ecc_opt);
1302 if (err) {
1303 err = -EINVAL;
1304 goto out_release_mem_region;
1305 }
1066 } 1306 }
1067 1307
1068 /* DIP switches on some boards change between 8 and 16 bit 1308 /* DIP switches on some boards change between 8 and 16 bit
@@ -1094,6 +1334,14 @@ static int __devinit omap_nand_probe(struct platform_device *pdev)
1094 (offset + omap_oobinfo.eccbytes); 1334 (offset + omap_oobinfo.eccbytes);
1095 1335
1096 info->nand.ecc.layout = &omap_oobinfo; 1336 info->nand.ecc.layout = &omap_oobinfo;
1337 } else if ((pdata->ecc_opt == OMAP_ECC_BCH4_CODE_HW) ||
1338 (pdata->ecc_opt == OMAP_ECC_BCH8_CODE_HW)) {
1339 /* build OOB layout for BCH ECC correction */
1340 err = omap3_init_bch_tail(&info->mtd);
1341 if (err) {
1342 err = -EINVAL;
1343 goto out_release_mem_region;
1344 }
1097 } 1345 }
1098 1346
1099 /* second phase scan */ 1347 /* second phase scan */
@@ -1122,6 +1370,7 @@ static int omap_nand_remove(struct platform_device *pdev)
1122 struct mtd_info *mtd = platform_get_drvdata(pdev); 1370 struct mtd_info *mtd = platform_get_drvdata(pdev);
1123 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info, 1371 struct omap_nand_info *info = container_of(mtd, struct omap_nand_info,
1124 mtd); 1372 mtd);
1373 omap3_free_bch(&info->mtd);
1125 1374
1126 platform_set_drvdata(pdev, NULL); 1375 platform_set_drvdata(pdev, NULL);
1127 if (info->dma_ch != -1) 1376 if (info->dma_ch != -1)
diff --git a/drivers/mtd/nand/pasemi_nand.c b/drivers/mtd/nand/pasemi_nand.c
index 974dbf8251c9..1440e51cedcc 100644
--- a/drivers/mtd/nand/pasemi_nand.c
+++ b/drivers/mtd/nand/pasemi_nand.c
@@ -155,7 +155,6 @@ static int __devinit pasemi_nand_probe(struct platform_device *ofdev)
155 chip->ecc.mode = NAND_ECC_SOFT; 155 chip->ecc.mode = NAND_ECC_SOFT;
156 156
157 /* Enable the following for a flash based bad block table */ 157 /* Enable the following for a flash based bad block table */
158 chip->options = NAND_NO_AUTOINCR;
159 chip->bbt_options = NAND_BBT_USE_FLASH; 158 chip->bbt_options = NAND_BBT_USE_FLASH;
160 159
161 /* Scan to find existence of the device */ 160 /* Scan to find existence of the device */
diff --git a/drivers/mtd/nand/plat_nand.c b/drivers/mtd/nand/plat_nand.c
index 6404e6e81b10..1bcb52040422 100644
--- a/drivers/mtd/nand/plat_nand.c
+++ b/drivers/mtd/nand/plat_nand.c
@@ -23,14 +23,18 @@ struct plat_nand_data {
23 void __iomem *io_base; 23 void __iomem *io_base;
24}; 24};
25 25
26static const char *part_probe_types[] = { "cmdlinepart", NULL };
27
26/* 28/*
27 * Probe for the NAND device. 29 * Probe for the NAND device.
28 */ 30 */
29static int __devinit plat_nand_probe(struct platform_device *pdev) 31static int __devinit plat_nand_probe(struct platform_device *pdev)
30{ 32{
31 struct platform_nand_data *pdata = pdev->dev.platform_data; 33 struct platform_nand_data *pdata = pdev->dev.platform_data;
34 struct mtd_part_parser_data ppdata;
32 struct plat_nand_data *data; 35 struct plat_nand_data *data;
33 struct resource *res; 36 struct resource *res;
37 const char **part_types;
34 int err = 0; 38 int err = 0;
35 39
36 if (pdata->chip.nr_chips < 1) { 40 if (pdata->chip.nr_chips < 1) {
@@ -75,6 +79,7 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
75 data->chip.select_chip = pdata->ctrl.select_chip; 79 data->chip.select_chip = pdata->ctrl.select_chip;
76 data->chip.write_buf = pdata->ctrl.write_buf; 80 data->chip.write_buf = pdata->ctrl.write_buf;
77 data->chip.read_buf = pdata->ctrl.read_buf; 81 data->chip.read_buf = pdata->ctrl.read_buf;
82 data->chip.read_byte = pdata->ctrl.read_byte;
78 data->chip.chip_delay = pdata->chip.chip_delay; 83 data->chip.chip_delay = pdata->chip.chip_delay;
79 data->chip.options |= pdata->chip.options; 84 data->chip.options |= pdata->chip.options;
80 data->chip.bbt_options |= pdata->chip.bbt_options; 85 data->chip.bbt_options |= pdata->chip.bbt_options;
@@ -98,8 +103,10 @@ static int __devinit plat_nand_probe(struct platform_device *pdev)
98 goto out; 103 goto out;
99 } 104 }
100 105
101 err = mtd_device_parse_register(&data->mtd, 106 part_types = pdata->chip.part_probe_types ? : part_probe_types;
102 pdata->chip.part_probe_types, NULL, 107
108 ppdata.of_node = pdev->dev.of_node;
109 err = mtd_device_parse_register(&data->mtd, part_types, &ppdata,
103 pdata->chip.partitions, 110 pdata->chip.partitions,
104 pdata->chip.nr_partitions); 111 pdata->chip.nr_partitions);
105 112
@@ -140,12 +147,19 @@ static int __devexit plat_nand_remove(struct platform_device *pdev)
140 return 0; 147 return 0;
141} 148}
142 149
150static const struct of_device_id plat_nand_match[] = {
151 { .compatible = "gen_nand" },
152 {},
153};
154MODULE_DEVICE_TABLE(of, plat_nand_match);
155
143static struct platform_driver plat_nand_driver = { 156static struct platform_driver plat_nand_driver = {
144 .probe = plat_nand_probe, 157 .probe = plat_nand_probe,
145 .remove = __devexit_p(plat_nand_remove), 158 .remove = __devexit_p(plat_nand_remove),
146 .driver = { 159 .driver = {
147 .name = "gen_nand", 160 .name = "gen_nand",
148 .owner = THIS_MODULE, 161 .owner = THIS_MODULE,
162 .of_match_table = plat_nand_match,
149 }, 163 },
150}; 164};
151 165
diff --git a/drivers/mtd/nand/pxa3xx_nand.c b/drivers/mtd/nand/pxa3xx_nand.c
index def50caa6f84..252aaefcacfa 100644
--- a/drivers/mtd/nand/pxa3xx_nand.c
+++ b/drivers/mtd/nand/pxa3xx_nand.c
@@ -682,14 +682,15 @@ static void pxa3xx_nand_cmdfunc(struct mtd_info *mtd, unsigned command,
682} 682}
683 683
684static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd, 684static void pxa3xx_nand_write_page_hwecc(struct mtd_info *mtd,
685 struct nand_chip *chip, const uint8_t *buf) 685 struct nand_chip *chip, const uint8_t *buf, int oob_required)
686{ 686{
687 chip->write_buf(mtd, buf, mtd->writesize); 687 chip->write_buf(mtd, buf, mtd->writesize);
688 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize); 688 chip->write_buf(mtd, chip->oob_poi, mtd->oobsize);
689} 689}
690 690
691static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd, 691static int pxa3xx_nand_read_page_hwecc(struct mtd_info *mtd,
692 struct nand_chip *chip, uint8_t *buf, int page) 692 struct nand_chip *chip, uint8_t *buf, int oob_required,
693 int page)
693{ 694{
694 struct pxa3xx_nand_host *host = mtd->priv; 695 struct pxa3xx_nand_host *host = mtd->priv;
695 struct pxa3xx_nand_info *info = host->info_data; 696 struct pxa3xx_nand_info *info = host->info_data;
@@ -1004,7 +1005,6 @@ KEEP_CONFIG:
1004 chip->ecc.size = host->page_size; 1005 chip->ecc.size = host->page_size;
1005 chip->ecc.strength = 1; 1006 chip->ecc.strength = 1;
1006 1007
1007 chip->options = NAND_NO_AUTOINCR;
1008 chip->options |= NAND_NO_READRDY; 1008 chip->options |= NAND_NO_READRDY;
1009 if (host->reg_ndcr & NDCR_DWIDTH_M) 1009 if (host->reg_ndcr & NDCR_DWIDTH_M)
1010 chip->options |= NAND_BUSWIDTH_16; 1010 chip->options |= NAND_BUSWIDTH_16;
diff --git a/drivers/mtd/nand/r852.c b/drivers/mtd/nand/r852.c
index c2040187c813..8cb627751c9c 100644
--- a/drivers/mtd/nand/r852.c
+++ b/drivers/mtd/nand/r852.c
@@ -539,14 +539,11 @@ exit:
539 * nand_read_oob_syndrome assumes we can send column address - we can't 539 * nand_read_oob_syndrome assumes we can send column address - we can't
540 */ 540 */
541static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 541static int r852_read_oob(struct mtd_info *mtd, struct nand_chip *chip,
542 int page, int sndcmd) 542 int page)
543{ 543{
544 if (sndcmd) { 544 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
545 chip->cmdfunc(mtd, NAND_CMD_READOOB, 0, page);
546 sndcmd = 0;
547 }
548 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); 545 chip->read_buf(mtd, chip->oob_poi, mtd->oobsize);
549 return sndcmd; 546 return 0;
550} 547}
551 548
552/* 549/*
@@ -1104,18 +1101,7 @@ static struct pci_driver r852_pci_driver = {
1104 .driver.pm = &r852_pm_ops, 1101 .driver.pm = &r852_pm_ops,
1105}; 1102};
1106 1103
1107static __init int r852_module_init(void) 1104module_pci_driver(r852_pci_driver);
1108{
1109 return pci_register_driver(&r852_pci_driver);
1110}
1111
1112static void __exit r852_module_exit(void)
1113{
1114 pci_unregister_driver(&r852_pci_driver);
1115}
1116
1117module_init(r852_module_init);
1118module_exit(r852_module_exit);
1119 1105
1120MODULE_LICENSE("GPL"); 1106MODULE_LICENSE("GPL");
1121MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>"); 1107MODULE_AUTHOR("Maxim Levitsky <maximlevitsky@gmail.com>");
diff --git a/drivers/mtd/nand/sh_flctl.c b/drivers/mtd/nand/sh_flctl.c
index e9b2b260de3a..aa9b8a5e0b8f 100644
--- a/drivers/mtd/nand/sh_flctl.c
+++ b/drivers/mtd/nand/sh_flctl.c
@@ -344,7 +344,7 @@ static void set_cmd_regs(struct mtd_info *mtd, uint32_t cmd, uint32_t flcmcdr_va
344} 344}
345 345
346static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 346static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
347 uint8_t *buf, int page) 347 uint8_t *buf, int oob_required, int page)
348{ 348{
349 int i, eccsize = chip->ecc.size; 349 int i, eccsize = chip->ecc.size;
350 int eccbytes = chip->ecc.bytes; 350 int eccbytes = chip->ecc.bytes;
@@ -359,14 +359,14 @@ static int flctl_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
359 if (flctl->hwecc_cant_correct[i]) 359 if (flctl->hwecc_cant_correct[i])
360 mtd->ecc_stats.failed++; 360 mtd->ecc_stats.failed++;
361 else 361 else
362 mtd->ecc_stats.corrected += 0; 362 mtd->ecc_stats.corrected += 0; /* FIXME */
363 } 363 }
364 364
365 return 0; 365 return 0;
366} 366}
367 367
368static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 368static void flctl_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip,
369 const uint8_t *buf) 369 const uint8_t *buf, int oob_required)
370{ 370{
371 int i, eccsize = chip->ecc.size; 371 int i, eccsize = chip->ecc.size;
372 int eccbytes = chip->ecc.bytes; 372 int eccbytes = chip->ecc.bytes;
@@ -881,8 +881,6 @@ static int __devinit flctl_probe(struct platform_device *pdev)
881 flctl->hwecc = pdata->has_hwecc; 881 flctl->hwecc = pdata->has_hwecc;
882 flctl->holden = pdata->use_holden; 882 flctl->holden = pdata->use_holden;
883 883
884 nand->options = NAND_NO_AUTOINCR;
885
886 /* Set address of hardware control function */ 884 /* Set address of hardware control function */
887 /* 20 us command delay time */ 885 /* 20 us command delay time */
888 nand->chip_delay = 20; 886 nand->chip_delay = 20;
diff --git a/drivers/mtd/nand/sm_common.c b/drivers/mtd/nand/sm_common.c
index 774c3c266713..082bcdcd6bcf 100644
--- a/drivers/mtd/nand/sm_common.c
+++ b/drivers/mtd/nand/sm_common.c
@@ -94,17 +94,16 @@ static struct nand_flash_dev nand_smartmedia_flash_ids[] = {
94 {NULL,} 94 {NULL,}
95}; 95};
96 96
97#define XD_TYPEM (NAND_NO_AUTOINCR | NAND_BROKEN_XD)
98static struct nand_flash_dev nand_xd_flash_ids[] = { 97static struct nand_flash_dev nand_xd_flash_ids[] = {
99 98
100 {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0}, 99 {"xD 16MiB 3,3V", 0x73, 512, 16, 0x4000, 0},
101 {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0}, 100 {"xD 32MiB 3,3V", 0x75, 512, 32, 0x4000, 0},
102 {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0}, 101 {"xD 64MiB 3,3V", 0x76, 512, 64, 0x4000, 0},
103 {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0}, 102 {"xD 128MiB 3,3V", 0x79, 512, 128, 0x4000, 0},
104 {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, XD_TYPEM}, 103 {"xD 256MiB 3,3V", 0x71, 512, 256, 0x4000, NAND_BROKEN_XD},
105 {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, XD_TYPEM}, 104 {"xD 512MiB 3,3V", 0xdc, 512, 512, 0x4000, NAND_BROKEN_XD},
106 {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, XD_TYPEM}, 105 {"xD 1GiB 3,3V", 0xd3, 512, 1024, 0x4000, NAND_BROKEN_XD},
107 {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, XD_TYPEM}, 106 {"xD 2GiB 3,3V", 0xd5, 512, 2048, 0x4000, NAND_BROKEN_XD},
108 {NULL,} 107 {NULL,}
109}; 108};
110 109
diff --git a/drivers/mtd/onenand/onenand_base.c b/drivers/mtd/onenand/onenand_base.c
index b3ce12ef359e..7153e0d27101 100644
--- a/drivers/mtd/onenand/onenand_base.c
+++ b/drivers/mtd/onenand/onenand_base.c
@@ -1201,7 +1201,8 @@ static int onenand_mlc_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1201 if (mtd->ecc_stats.failed - stats.failed) 1201 if (mtd->ecc_stats.failed - stats.failed)
1202 return -EBADMSG; 1202 return -EBADMSG;
1203 1203
1204 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 1204 /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
1205 return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
1205} 1206}
1206 1207
1207/** 1208/**
@@ -1333,7 +1334,8 @@ static int onenand_read_ops_nolock(struct mtd_info *mtd, loff_t from,
1333 if (mtd->ecc_stats.failed - stats.failed) 1334 if (mtd->ecc_stats.failed - stats.failed)
1334 return -EBADMSG; 1335 return -EBADMSG;
1335 1336
1336 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 1337 /* return max bitflips per ecc step; ONENANDs correct 1 bit only */
1338 return mtd->ecc_stats.corrected != stats.corrected ? 1 : 0;
1337} 1339}
1338 1340
1339/** 1341/**
diff --git a/drivers/mtd/ubi/debug.c b/drivers/mtd/ubi/debug.c
index 9f957c2d48e9..09d4f8d9d592 100644
--- a/drivers/mtd/ubi/debug.c
+++ b/drivers/mtd/ubi/debug.c
@@ -264,6 +264,9 @@ static struct dentry *dfs_rootdir;
264 */ 264 */
265int ubi_debugfs_init(void) 265int ubi_debugfs_init(void)
266{ 266{
267 if (!IS_ENABLED(DEBUG_FS))
268 return 0;
269
267 dfs_rootdir = debugfs_create_dir("ubi", NULL); 270 dfs_rootdir = debugfs_create_dir("ubi", NULL);
268 if (IS_ERR_OR_NULL(dfs_rootdir)) { 271 if (IS_ERR_OR_NULL(dfs_rootdir)) {
269 int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir); 272 int err = dfs_rootdir ? -ENODEV : PTR_ERR(dfs_rootdir);
@@ -281,7 +284,8 @@ int ubi_debugfs_init(void)
281 */ 284 */
282void ubi_debugfs_exit(void) 285void ubi_debugfs_exit(void)
283{ 286{
284 debugfs_remove(dfs_rootdir); 287 if (IS_ENABLED(DEBUG_FS))
288 debugfs_remove(dfs_rootdir);
285} 289}
286 290
287/* Read an UBI debugfs file */ 291/* Read an UBI debugfs file */
@@ -403,6 +407,9 @@ int ubi_debugfs_init_dev(struct ubi_device *ubi)
403 struct dentry *dent; 407 struct dentry *dent;
404 struct ubi_debug_info *d = ubi->dbg; 408 struct ubi_debug_info *d = ubi->dbg;
405 409
410 if (!IS_ENABLED(DEBUG_FS))
411 return 0;
412
406 n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME, 413 n = snprintf(d->dfs_dir_name, UBI_DFS_DIR_LEN + 1, UBI_DFS_DIR_NAME,
407 ubi->ubi_num); 414 ubi->ubi_num);
408 if (n == UBI_DFS_DIR_LEN) { 415 if (n == UBI_DFS_DIR_LEN) {
@@ -470,5 +477,6 @@ out:
470 */ 477 */
471void ubi_debugfs_exit_dev(struct ubi_device *ubi) 478void ubi_debugfs_exit_dev(struct ubi_device *ubi)
472{ 479{
473 debugfs_remove_recursive(ubi->dbg->dfs_dir); 480 if (IS_ENABLED(DEBUG_FS))
481 debugfs_remove_recursive(ubi->dbg->dfs_dir);
474} 482}
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index 9df100a4ec38..b6be644e7b85 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -1262,11 +1262,11 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1262 dbg_wl("flush pending work for LEB %d:%d (%d pending works)", 1262 dbg_wl("flush pending work for LEB %d:%d (%d pending works)",
1263 vol_id, lnum, ubi->works_count); 1263 vol_id, lnum, ubi->works_count);
1264 1264
1265 down_write(&ubi->work_sem);
1266 while (found) { 1265 while (found) {
1267 struct ubi_work *wrk; 1266 struct ubi_work *wrk;
1268 found = 0; 1267 found = 0;
1269 1268
1269 down_read(&ubi->work_sem);
1270 spin_lock(&ubi->wl_lock); 1270 spin_lock(&ubi->wl_lock);
1271 list_for_each_entry(wrk, &ubi->works, list) { 1271 list_for_each_entry(wrk, &ubi->works, list) {
1272 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) && 1272 if ((vol_id == UBI_ALL || wrk->vol_id == vol_id) &&
@@ -1277,18 +1277,27 @@ int ubi_wl_flush(struct ubi_device *ubi, int vol_id, int lnum)
1277 spin_unlock(&ubi->wl_lock); 1277 spin_unlock(&ubi->wl_lock);
1278 1278
1279 err = wrk->func(ubi, wrk, 0); 1279 err = wrk->func(ubi, wrk, 0);
1280 if (err) 1280 if (err) {
1281 goto out; 1281 up_read(&ubi->work_sem);
1282 return err;
1283 }
1284
1282 spin_lock(&ubi->wl_lock); 1285 spin_lock(&ubi->wl_lock);
1283 found = 1; 1286 found = 1;
1284 break; 1287 break;
1285 } 1288 }
1286 } 1289 }
1287 spin_unlock(&ubi->wl_lock); 1290 spin_unlock(&ubi->wl_lock);
1291 up_read(&ubi->work_sem);
1288 } 1292 }
1289 1293
1290out: 1294 /*
1295 * Make sure all the works which have been done in parallel are
1296 * finished.
1297 */
1298 down_write(&ubi->work_sem);
1291 up_write(&ubi->work_sem); 1299 up_write(&ubi->work_sem);
1300
1292 return err; 1301 return err;
1293} 1302}
1294 1303
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 2ee8cf9e8a3b..b9c2ae62166d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -76,6 +76,7 @@
76#include <net/route.h> 76#include <net/route.h>
77#include <net/net_namespace.h> 77#include <net/net_namespace.h>
78#include <net/netns/generic.h> 78#include <net/netns/generic.h>
79#include <net/pkt_sched.h>
79#include "bonding.h" 80#include "bonding.h"
80#include "bond_3ad.h" 81#include "bond_3ad.h"
81#include "bond_alb.h" 82#include "bond_alb.h"
@@ -381,8 +382,6 @@ struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr)
381 return next; 382 return next;
382} 383}
383 384
384#define bond_queue_mapping(skb) (*(u16 *)((skb)->cb))
385
386/** 385/**
387 * bond_dev_queue_xmit - Prepare skb for xmit. 386 * bond_dev_queue_xmit - Prepare skb for xmit.
388 * 387 *
@@ -395,7 +394,9 @@ int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb,
395{ 394{
396 skb->dev = slave_dev; 395 skb->dev = slave_dev;
397 396
398 skb->queue_mapping = bond_queue_mapping(skb); 397 BUILD_BUG_ON(sizeof(skb->queue_mapping) !=
398 sizeof(qdisc_skb_cb(skb)->bond_queue_mapping));
399 skb->queue_mapping = qdisc_skb_cb(skb)->bond_queue_mapping;
399 400
400 if (unlikely(netpoll_tx_running(slave_dev))) 401 if (unlikely(netpoll_tx_running(slave_dev)))
401 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb); 402 bond_netpoll_send_skb(bond_get_slave_by_dev(bond, slave_dev), skb);
@@ -4171,7 +4172,7 @@ static u16 bond_select_queue(struct net_device *dev, struct sk_buff *skb)
4171 /* 4172 /*
4172 * Save the original txq to restore before passing to the driver 4173 * Save the original txq to restore before passing to the driver
4173 */ 4174 */
4174 bond_queue_mapping(skb) = skb->queue_mapping; 4175 qdisc_skb_cb(skb)->bond_queue_mapping = skb->queue_mapping;
4175 4176
4176 if (unlikely(txq >= dev->real_num_tx_queues)) { 4177 if (unlikely(txq >= dev->real_num_tx_queues)) {
4177 do { 4178 do {
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index aef42f045320..485bedb8278c 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -1082,8 +1082,12 @@ static ssize_t bonding_store_primary(struct device *d,
1082 } 1082 }
1083 } 1083 }
1084 1084
1085 pr_info("%s: Unable to set %.*s as primary slave.\n", 1085 strncpy(bond->params.primary, ifname, IFNAMSIZ);
1086 bond->dev->name, (int)strlen(buf) - 1, buf); 1086 bond->params.primary[IFNAMSIZ - 1] = 0;
1087
1088 pr_info("%s: Recording %s as primary, "
1089 "but it has not been enslaved to %s yet.\n",
1090 bond->dev->name, ifname, bond->dev->name);
1087out: 1091out:
1088 write_unlock_bh(&bond->curr_slave_lock); 1092 write_unlock_bh(&bond->curr_slave_lock);
1089 read_unlock(&bond->lock); 1093 read_unlock(&bond->lock);
diff --git a/drivers/net/can/c_can/c_can.c b/drivers/net/can/c_can/c_can.c
index 536bda072a16..8dc84d66eea1 100644
--- a/drivers/net/can/c_can/c_can.c
+++ b/drivers/net/can/c_can/c_can.c
@@ -686,7 +686,7 @@ static int c_can_get_berr_counter(const struct net_device *dev,
686 * 686 *
687 * We iterate from priv->tx_echo to priv->tx_next and check if the 687 * We iterate from priv->tx_echo to priv->tx_next and check if the
688 * packet has been transmitted, echo it back to the CAN framework. 688 * packet has been transmitted, echo it back to the CAN framework.
689 * If we discover a not yet transmitted package, stop looking for more. 689 * If we discover a not yet transmitted packet, stop looking for more.
690 */ 690 */
691static void c_can_do_tx(struct net_device *dev) 691static void c_can_do_tx(struct net_device *dev)
692{ 692{
@@ -698,7 +698,7 @@ static void c_can_do_tx(struct net_device *dev)
698 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { 698 for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) {
699 msg_obj_no = get_tx_echo_msg_obj(priv); 699 msg_obj_no = get_tx_echo_msg_obj(priv);
700 val = c_can_read_reg32(priv, &priv->regs->txrqst1); 700 val = c_can_read_reg32(priv, &priv->regs->txrqst1);
701 if (!(val & (1 << msg_obj_no))) { 701 if (!(val & (1 << (msg_obj_no - 1)))) {
702 can_get_echo_skb(dev, 702 can_get_echo_skb(dev,
703 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST); 703 msg_obj_no - C_CAN_MSG_OBJ_TX_FIRST);
704 stats->tx_bytes += priv->read_reg(priv, 704 stats->tx_bytes += priv->read_reg(priv,
@@ -706,6 +706,8 @@ static void c_can_do_tx(struct net_device *dev)
706 & IF_MCONT_DLC_MASK; 706 & IF_MCONT_DLC_MASK;
707 stats->tx_packets++; 707 stats->tx_packets++;
708 c_can_inval_msg_object(dev, 0, msg_obj_no); 708 c_can_inval_msg_object(dev, 0, msg_obj_no);
709 } else {
710 break;
709 } 711 }
710 } 712 }
711 713
@@ -950,7 +952,7 @@ static int c_can_poll(struct napi_struct *napi, int quota)
950 struct net_device *dev = napi->dev; 952 struct net_device *dev = napi->dev;
951 struct c_can_priv *priv = netdev_priv(dev); 953 struct c_can_priv *priv = netdev_priv(dev);
952 954
953 irqstatus = priv->read_reg(priv, &priv->regs->interrupt); 955 irqstatus = priv->irqstatus;
954 if (!irqstatus) 956 if (!irqstatus)
955 goto end; 957 goto end;
956 958
@@ -1028,12 +1030,11 @@ end:
1028 1030
1029static irqreturn_t c_can_isr(int irq, void *dev_id) 1031static irqreturn_t c_can_isr(int irq, void *dev_id)
1030{ 1032{
1031 u16 irqstatus;
1032 struct net_device *dev = (struct net_device *)dev_id; 1033 struct net_device *dev = (struct net_device *)dev_id;
1033 struct c_can_priv *priv = netdev_priv(dev); 1034 struct c_can_priv *priv = netdev_priv(dev);
1034 1035
1035 irqstatus = priv->read_reg(priv, &priv->regs->interrupt); 1036 priv->irqstatus = priv->read_reg(priv, &priv->regs->interrupt);
1036 if (!irqstatus) 1037 if (!priv->irqstatus)
1037 return IRQ_NONE; 1038 return IRQ_NONE;
1038 1039
1039 /* disable all interrupts and schedule the NAPI */ 1040 /* disable all interrupts and schedule the NAPI */
@@ -1063,10 +1064,11 @@ static int c_can_open(struct net_device *dev)
1063 goto exit_irq_fail; 1064 goto exit_irq_fail;
1064 } 1065 }
1065 1066
1067 napi_enable(&priv->napi);
1068
1066 /* start the c_can controller */ 1069 /* start the c_can controller */
1067 c_can_start(dev); 1070 c_can_start(dev);
1068 1071
1069 napi_enable(&priv->napi);
1070 netif_start_queue(dev); 1072 netif_start_queue(dev);
1071 1073
1072 return 0; 1074 return 0;
diff --git a/drivers/net/can/c_can/c_can.h b/drivers/net/can/c_can/c_can.h
index 9b7fbef3d09a..5f32d34af507 100644
--- a/drivers/net/can/c_can/c_can.h
+++ b/drivers/net/can/c_can/c_can.h
@@ -76,6 +76,7 @@ struct c_can_priv {
76 unsigned int tx_next; 76 unsigned int tx_next;
77 unsigned int tx_echo; 77 unsigned int tx_echo;
78 void *priv; /* for board-specific data */ 78 void *priv; /* for board-specific data */
79 u16 irqstatus;
79}; 80};
80 81
81struct net_device *alloc_c_can_dev(void); 82struct net_device *alloc_c_can_dev(void);
diff --git a/drivers/net/can/cc770/cc770_platform.c b/drivers/net/can/cc770/cc770_platform.c
index 53115eee8075..688371cda37a 100644
--- a/drivers/net/can/cc770/cc770_platform.c
+++ b/drivers/net/can/cc770/cc770_platform.c
@@ -154,7 +154,7 @@ static int __devinit cc770_get_platform_data(struct platform_device *pdev,
154 struct cc770_platform_data *pdata = pdev->dev.platform_data; 154 struct cc770_platform_data *pdata = pdev->dev.platform_data;
155 155
156 priv->can.clock.freq = pdata->osc_freq; 156 priv->can.clock.freq = pdata->osc_freq;
157 if (priv->cpu_interface | CPUIF_DSC) 157 if (priv->cpu_interface & CPUIF_DSC)
158 priv->can.clock.freq /= 2; 158 priv->can.clock.freq /= 2;
159 priv->clkout = pdata->cor; 159 priv->clkout = pdata->cor;
160 priv->bus_config = pdata->bcr; 160 priv->bus_config = pdata->bcr;
diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c
index 442d91a2747b..bab0158f1cc3 100644
--- a/drivers/net/dummy.c
+++ b/drivers/net/dummy.c
@@ -187,8 +187,10 @@ static int __init dummy_init_module(void)
187 rtnl_lock(); 187 rtnl_lock();
188 err = __rtnl_link_register(&dummy_link_ops); 188 err = __rtnl_link_register(&dummy_link_ops);
189 189
190 for (i = 0; i < numdummies && !err; i++) 190 for (i = 0; i < numdummies && !err; i++) {
191 err = dummy_init_one(); 191 err = dummy_init_one();
192 cond_resched();
193 }
192 if (err < 0) 194 if (err < 0)
193 __rtnl_link_unregister(&dummy_link_ops); 195 __rtnl_link_unregister(&dummy_link_ops);
194 rtnl_unlock(); 196 rtnl_unlock();
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
index e30e2a2f354c..7de824184979 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
@@ -747,21 +747,6 @@ struct bnx2x_fastpath {
747 747
748#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG 748#define ETH_RX_ERROR_FALGS ETH_FAST_PATH_RX_CQE_PHY_DECODE_ERR_FLG
749 749
750#define BNX2X_IP_CSUM_ERR(cqe) \
751 (!((cqe)->fast_path_cqe.status_flags & \
752 ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG) && \
753 ((cqe)->fast_path_cqe.type_error_flags & \
754 ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG))
755
756#define BNX2X_L4_CSUM_ERR(cqe) \
757 (!((cqe)->fast_path_cqe.status_flags & \
758 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG) && \
759 ((cqe)->fast_path_cqe.type_error_flags & \
760 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
761
762#define BNX2X_RX_CSUM_OK(cqe) \
763 (!(BNX2X_L4_CSUM_ERR(cqe) || BNX2X_IP_CSUM_ERR(cqe)))
764
765#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \ 750#define BNX2X_PRS_FLAG_OVERETH_IPV4(flags) \
766 (((le16_to_cpu(flags) & \ 751 (((le16_to_cpu(flags) & \
767 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \ 752 PARSING_FLAGS_OVER_ETHERNET_PROTOCOL) >> \
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
index ad0743bf4bde..cbc56f274e0c 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
@@ -617,6 +617,25 @@ static int bnx2x_alloc_rx_data(struct bnx2x *bp,
617 return 0; 617 return 0;
618} 618}
619 619
620static void bnx2x_csum_validate(struct sk_buff *skb, union eth_rx_cqe *cqe,
621 struct bnx2x_fastpath *fp)
622{
623 /* Do nothing if no IP/L4 csum validation was done */
624
625 if (cqe->fast_path_cqe.status_flags &
626 (ETH_FAST_PATH_RX_CQE_IP_XSUM_NO_VALIDATION_FLG |
627 ETH_FAST_PATH_RX_CQE_L4_XSUM_NO_VALIDATION_FLG))
628 return;
629
630 /* If both IP/L4 validation were done, check if an error was found. */
631
632 if (cqe->fast_path_cqe.type_error_flags &
633 (ETH_FAST_PATH_RX_CQE_IP_BAD_XSUM_FLG |
634 ETH_FAST_PATH_RX_CQE_L4_BAD_XSUM_FLG))
635 fp->eth_q_stats.hw_csum_err++;
636 else
637 skb->ip_summed = CHECKSUM_UNNECESSARY;
638}
620 639
621int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget) 640int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
622{ 641{
@@ -806,13 +825,9 @@ reuse_rx:
806 825
807 skb_checksum_none_assert(skb); 826 skb_checksum_none_assert(skb);
808 827
809 if (bp->dev->features & NETIF_F_RXCSUM) { 828 if (bp->dev->features & NETIF_F_RXCSUM)
829 bnx2x_csum_validate(skb, cqe, fp);
810 830
811 if (likely(BNX2X_RX_CSUM_OK(cqe)))
812 skb->ip_summed = CHECKSUM_UNNECESSARY;
813 else
814 fp->eth_q_stats.hw_csum_err++;
815 }
816 831
817 skb_record_rx_queue(skb, fp->rx_queue); 832 skb_record_rx_queue(skb, fp->rx_queue);
818 833
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index edeeb516807a..e47ff8be1d7b 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -14275,7 +14275,8 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
14275 } 14275 }
14276 } 14276 }
14277 14277
14278 if (tg3_flag(tp, 5755_PLUS)) 14278 if (tg3_flag(tp, 5755_PLUS) ||
14279 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14279 tg3_flag_set(tp, SHORT_DMA_BUG); 14280 tg3_flag_set(tp, SHORT_DMA_BUG);
14280 14281
14281 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719) 14282 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index 08efd308d78a..fdb50cec6b51 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -736,6 +736,8 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
736 736
737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb); 737 copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
738 if (copied) { 738 if (copied) {
739 int gso_segs = skb_shinfo(skb)->gso_segs;
740
739 /* record the sent skb in the sent_skb table */ 741 /* record the sent skb in the sent_skb table */
740 BUG_ON(txo->sent_skb_list[start]); 742 BUG_ON(txo->sent_skb_list[start]);
741 txo->sent_skb_list[start] = skb; 743 txo->sent_skb_list[start] = skb;
@@ -753,8 +755,7 @@ static netdev_tx_t be_xmit(struct sk_buff *skb,
753 755
754 be_txq_notify(adapter, txq->id, wrb_cnt); 756 be_txq_notify(adapter, txq->id, wrb_cnt);
755 757
756 be_tx_stats_update(txo, wrb_cnt, copied, 758 be_tx_stats_update(txo, wrb_cnt, copied, gso_segs, stopped);
757 skb_shinfo(skb)->gso_segs, stopped);
758 } else { 759 } else {
759 txq->head = start; 760 txq->head = start;
760 dev_kfree_skb_any(skb); 761 dev_kfree_skb_any(skb);
diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c
index 97f947b3d94a..2933d08b036e 100644
--- a/drivers/net/ethernet/freescale/fec_mpc52xx.c
+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c
@@ -437,7 +437,7 @@ static irqreturn_t mpc52xx_fec_rx_interrupt(int irq, void *dev_id)
437 length = status & BCOM_FEC_RX_BD_LEN_MASK; 437 length = status & BCOM_FEC_RX_BD_LEN_MASK;
438 skb_put(rskb, length - 4); /* length without CRC32 */ 438 skb_put(rskb, length - 4); /* length without CRC32 */
439 rskb->protocol = eth_type_trans(rskb, dev); 439 rskb->protocol = eth_type_trans(rskb, dev);
440 if (!skb_defer_rx_timestamp(skb)) 440 if (!skb_defer_rx_timestamp(rskb))
441 netif_rx(rskb); 441 netif_rx(rskb);
442 442
443 spin_lock(&priv->lock); 443 spin_lock(&priv->lock);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_main.c b/drivers/net/ethernet/intel/e1000/e1000_main.c
index 95731c841044..7483ca0a6282 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_main.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_main.c
@@ -4080,7 +4080,7 @@ static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
4080 spin_lock_irqsave(&adapter->stats_lock, 4080 spin_lock_irqsave(&adapter->stats_lock,
4081 irq_flags); 4081 irq_flags);
4082 e1000_tbi_adjust_stats(hw, &adapter->stats, 4082 e1000_tbi_adjust_stats(hw, &adapter->stats,
4083 length, skb->data); 4083 length, mapped);
4084 spin_unlock_irqrestore(&adapter->stats_lock, 4084 spin_unlock_irqrestore(&adapter->stats_lock,
4085 irq_flags); 4085 irq_flags);
4086 length--; 4086 length--;
diff --git a/drivers/net/ethernet/intel/e1000e/ethtool.c b/drivers/net/ethernet/intel/e1000e/ethtool.c
index d863075df7a4..905e2147d918 100644
--- a/drivers/net/ethernet/intel/e1000e/ethtool.c
+++ b/drivers/net/ethernet/intel/e1000e/ethtool.c
@@ -258,7 +258,8 @@ static int e1000_set_settings(struct net_device *netdev,
258 * When SoL/IDER sessions are active, autoneg/speed/duplex 258 * When SoL/IDER sessions are active, autoneg/speed/duplex
259 * cannot be changed 259 * cannot be changed
260 */ 260 */
261 if (hw->phy.ops.check_reset_block(hw)) { 261 if (hw->phy.ops.check_reset_block &&
262 hw->phy.ops.check_reset_block(hw)) {
262 e_err("Cannot change link characteristics when SoL/IDER is active.\n"); 263 e_err("Cannot change link characteristics when SoL/IDER is active.\n");
263 return -EINVAL; 264 return -EINVAL;
264 } 265 }
@@ -1615,7 +1616,8 @@ static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
1615 * PHY loopback cannot be performed if SoL/IDER 1616 * PHY loopback cannot be performed if SoL/IDER
1616 * sessions are active 1617 * sessions are active
1617 */ 1618 */
1618 if (hw->phy.ops.check_reset_block(hw)) { 1619 if (hw->phy.ops.check_reset_block &&
1620 hw->phy.ops.check_reset_block(hw)) {
1619 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n"); 1621 e_err("Cannot do PHY loopback test when SoL/IDER is active.\n");
1620 *data = 0; 1622 *data = 0;
1621 goto out; 1623 goto out;
diff --git a/drivers/net/ethernet/intel/e1000e/ich8lan.c b/drivers/net/ethernet/intel/e1000e/ich8lan.c
index bbf70ba367da..238ab2f8a5e7 100644
--- a/drivers/net/ethernet/intel/e1000e/ich8lan.c
+++ b/drivers/net/ethernet/intel/e1000e/ich8lan.c
@@ -165,14 +165,14 @@
165#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */ 165#define I217_EEE_100_SUPPORTED (1 << 1) /* 100BaseTx EEE supported */
166 166
167/* Intel Rapid Start Technology Support */ 167/* Intel Rapid Start Technology Support */
168#define I217_PROXY_CTRL PHY_REG(BM_WUC_PAGE, 70) 168#define I217_PROXY_CTRL BM_PHY_REG(BM_WUC_PAGE, 70)
169#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080 169#define I217_PROXY_CTRL_AUTO_DISABLE 0x0080
170#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28) 170#define I217_SxCTRL PHY_REG(BM_PORT_CTRL_PAGE, 28)
171#define I217_SxCTRL_MASK 0x1000 171#define I217_SxCTRL_ENABLE_LPI_RESET 0x1000
172#define I217_CGFREG PHY_REG(772, 29) 172#define I217_CGFREG PHY_REG(772, 29)
173#define I217_CGFREG_MASK 0x0002 173#define I217_CGFREG_ENABLE_MTA_RESET 0x0002
174#define I217_MEMPWR PHY_REG(772, 26) 174#define I217_MEMPWR PHY_REG(772, 26)
175#define I217_MEMPWR_MASK 0x0010 175#define I217_MEMPWR_DISABLE_SMB_RELEASE 0x0010
176 176
177/* Strapping Option Register - RO */ 177/* Strapping Option Register - RO */
178#define E1000_STRAP 0x0000C 178#define E1000_STRAP 0x0000C
@@ -4089,12 +4089,12 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4089 * power good. 4089 * power good.
4090 */ 4090 */
4091 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg); 4091 e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
4092 phy_reg |= I217_SxCTRL_MASK; 4092 phy_reg |= I217_SxCTRL_ENABLE_LPI_RESET;
4093 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg); 4093 e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
4094 4094
4095 /* Disable the SMB release on LCD reset. */ 4095 /* Disable the SMB release on LCD reset. */
4096 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 4096 e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4097 phy_reg &= ~I217_MEMPWR; 4097 phy_reg &= ~I217_MEMPWR_DISABLE_SMB_RELEASE;
4098 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 4098 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4099 } 4099 }
4100 4100
@@ -4103,7 +4103,7 @@ void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
4103 * Support 4103 * Support
4104 */ 4104 */
4105 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 4105 e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4106 phy_reg |= I217_CGFREG_MASK; 4106 phy_reg |= I217_CGFREG_ENABLE_MTA_RESET;
4107 e1e_wphy_locked(hw, I217_CGFREG, phy_reg); 4107 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4108 4108
4109release: 4109release:
@@ -4176,7 +4176,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4176 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg); 4176 ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
4177 if (ret_val) 4177 if (ret_val)
4178 goto release; 4178 goto release;
4179 phy_reg |= I217_MEMPWR_MASK; 4179 phy_reg |= I217_MEMPWR_DISABLE_SMB_RELEASE;
4180 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg); 4180 e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
4181 4181
4182 /* Disable Proxy */ 4182 /* Disable Proxy */
@@ -4186,7 +4186,7 @@ void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
4186 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg); 4186 ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
4187 if (ret_val) 4187 if (ret_val)
4188 goto release; 4188 goto release;
4189 phy_reg &= ~I217_CGFREG_MASK; 4189 phy_reg &= ~I217_CGFREG_ENABLE_MTA_RESET;
4190 e1e_wphy_locked(hw, I217_CGFREG, phy_reg); 4190 e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
4191release: 4191release:
4192 if (ret_val) 4192 if (ret_val)
diff --git a/drivers/net/ethernet/intel/e1000e/mac.c b/drivers/net/ethernet/intel/e1000e/mac.c
index 026e8b3ab52e..a13439928488 100644
--- a/drivers/net/ethernet/intel/e1000e/mac.c
+++ b/drivers/net/ethernet/intel/e1000e/mac.c
@@ -709,7 +709,7 @@ s32 e1000e_setup_link_generic(struct e1000_hw *hw)
709 * In the case of the phy reset being blocked, we already have a link. 709 * In the case of the phy reset being blocked, we already have a link.
710 * We do not need to set it up again. 710 * We do not need to set it up again.
711 */ 711 */
712 if (hw->phy.ops.check_reset_block(hw)) 712 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
713 return 0; 713 return 0;
714 714
715 /* 715 /*
diff --git a/drivers/net/ethernet/intel/e1000e/netdev.c b/drivers/net/ethernet/intel/e1000e/netdev.c
index a4b0435b00dc..31d37a2b5ba8 100644
--- a/drivers/net/ethernet/intel/e1000e/netdev.c
+++ b/drivers/net/ethernet/intel/e1000e/netdev.c
@@ -6237,7 +6237,7 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
6237 adapter->hw.phy.ms_type = e1000_ms_hw_default; 6237 adapter->hw.phy.ms_type = e1000_ms_hw_default;
6238 } 6238 }
6239 6239
6240 if (hw->phy.ops.check_reset_block(hw)) 6240 if (hw->phy.ops.check_reset_block && hw->phy.ops.check_reset_block(hw))
6241 e_info("PHY reset is blocked due to SOL/IDER session.\n"); 6241 e_info("PHY reset is blocked due to SOL/IDER session.\n");
6242 6242
6243 /* Set initial default active device features */ 6243 /* Set initial default active device features */
@@ -6404,7 +6404,7 @@ err_register:
6404 if (!(adapter->flags & FLAG_HAS_AMT)) 6404 if (!(adapter->flags & FLAG_HAS_AMT))
6405 e1000e_release_hw_control(adapter); 6405 e1000e_release_hw_control(adapter);
6406err_eeprom: 6406err_eeprom:
6407 if (!hw->phy.ops.check_reset_block(hw)) 6407 if (hw->phy.ops.check_reset_block && !hw->phy.ops.check_reset_block(hw))
6408 e1000_phy_hw_reset(&adapter->hw); 6408 e1000_phy_hw_reset(&adapter->hw);
6409err_hw_init: 6409err_hw_init:
6410 kfree(adapter->tx_ring); 6410 kfree(adapter->tx_ring);
diff --git a/drivers/net/ethernet/intel/e1000e/phy.c b/drivers/net/ethernet/intel/e1000e/phy.c
index 0334d013bc3c..b860d4f7ea2a 100644
--- a/drivers/net/ethernet/intel/e1000e/phy.c
+++ b/drivers/net/ethernet/intel/e1000e/phy.c
@@ -2155,9 +2155,11 @@ s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
2155 s32 ret_val; 2155 s32 ret_val;
2156 u32 ctrl; 2156 u32 ctrl;
2157 2157
2158 ret_val = phy->ops.check_reset_block(hw); 2158 if (phy->ops.check_reset_block) {
2159 if (ret_val) 2159 ret_val = phy->ops.check_reset_block(hw);
2160 return 0; 2160 if (ret_val)
2161 return 0;
2162 }
2161 2163
2162 ret_val = phy->ops.acquire(hw); 2164 ret_val = phy->ops.acquire(hw);
2163 if (ret_val) 2165 if (ret_val)
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index bf20457ea23a..17ad6a3c1be1 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -1390,6 +1390,8 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1390 union ixgbe_adv_rx_desc *rx_desc, 1390 union ixgbe_adv_rx_desc *rx_desc,
1391 struct sk_buff *skb) 1391 struct sk_buff *skb)
1392{ 1392{
1393 struct net_device *dev = rx_ring->netdev;
1394
1393 ixgbe_update_rsc_stats(rx_ring, skb); 1395 ixgbe_update_rsc_stats(rx_ring, skb);
1394 1396
1395 ixgbe_rx_hash(rx_ring, rx_desc, skb); 1397 ixgbe_rx_hash(rx_ring, rx_desc, skb);
@@ -1401,14 +1403,15 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring,
1401 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb); 1403 ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector, skb);
1402#endif 1404#endif
1403 1405
1404 if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { 1406 if ((dev->features & NETIF_F_HW_VLAN_RX) &&
1407 ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) {
1405 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); 1408 u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan);
1406 __vlan_hwaccel_put_tag(skb, vid); 1409 __vlan_hwaccel_put_tag(skb, vid);
1407 } 1410 }
1408 1411
1409 skb_record_rx_queue(skb, rx_ring->queue_index); 1412 skb_record_rx_queue(skb, rx_ring->queue_index);
1410 1413
1411 skb->protocol = eth_type_trans(skb, rx_ring->netdev); 1414 skb->protocol = eth_type_trans(skb, dev);
1412} 1415}
1413 1416
1414static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, 1417static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector,
@@ -3607,10 +3610,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
3607 if (hw->mac.type == ixgbe_mac_82598EB) 3610 if (hw->mac.type == ixgbe_mac_82598EB)
3608 netif_set_gso_max_size(adapter->netdev, 32768); 3611 netif_set_gso_max_size(adapter->netdev, 32768);
3609 3612
3610
3611 /* Enable VLAN tag insert/strip */
3612 adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
3613
3614 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true); 3613 hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
3615 3614
3616#ifdef IXGBE_FCOE 3615#ifdef IXGBE_FCOE
@@ -6701,11 +6700,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
6701{ 6700{
6702 struct ixgbe_adapter *adapter = netdev_priv(netdev); 6701 struct ixgbe_adapter *adapter = netdev_priv(netdev);
6703 6702
6704#ifdef CONFIG_DCB
6705 if (adapter->flags & IXGBE_FLAG_DCB_ENABLED)
6706 features &= ~NETIF_F_HW_VLAN_RX;
6707#endif
6708
6709 /* return error if RXHASH is being enabled when RSS is not supported */ 6703 /* return error if RXHASH is being enabled when RSS is not supported */
6710 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED)) 6704 if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
6711 features &= ~NETIF_F_RXHASH; 6705 features &= ~NETIF_F_RXHASH;
@@ -6718,7 +6712,6 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev,
6718 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) 6712 if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE))
6719 features &= ~NETIF_F_LRO; 6713 features &= ~NETIF_F_LRO;
6720 6714
6721
6722 return features; 6715 return features;
6723} 6716}
6724 6717
@@ -6766,6 +6759,11 @@ static int ixgbe_set_features(struct net_device *netdev,
6766 need_reset = true; 6759 need_reset = true;
6767 } 6760 }
6768 6761
6762 if (features & NETIF_F_HW_VLAN_RX)
6763 ixgbe_vlan_strip_enable(adapter);
6764 else
6765 ixgbe_vlan_strip_disable(adapter);
6766
6769 if (changed & NETIF_F_RXALL) 6767 if (changed & NETIF_F_RXALL)
6770 need_reset = true; 6768 need_reset = true;
6771 6769
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index 04d901d0ff63..f0f06b2bc28b 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -436,7 +436,9 @@ struct mv643xx_eth_private {
436 /* 436 /*
437 * Hardware-specific parameters. 437 * Hardware-specific parameters.
438 */ 438 */
439#if defined(CONFIG_HAVE_CLK)
439 struct clk *clk; 440 struct clk *clk;
441#endif
440 unsigned int t_clk; 442 unsigned int t_clk;
441}; 443};
442 444
@@ -2895,17 +2897,17 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2895 mp->dev = dev; 2897 mp->dev = dev;
2896 2898
2897 /* 2899 /*
2898 * Get the clk rate, if there is one, otherwise use the default. 2900 * Start with a default rate, and if there is a clock, allow
2901 * it to override the default.
2899 */ 2902 */
2903 mp->t_clk = 133000000;
2904#if defined(CONFIG_HAVE_CLK)
2900 mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0")); 2905 mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
2901 if (!IS_ERR(mp->clk)) { 2906 if (!IS_ERR(mp->clk)) {
2902 clk_prepare_enable(mp->clk); 2907 clk_prepare_enable(mp->clk);
2903 mp->t_clk = clk_get_rate(mp->clk); 2908 mp->t_clk = clk_get_rate(mp->clk);
2904 } else {
2905 mp->t_clk = 133000000;
2906 printk(KERN_WARNING "Unable to get clock");
2907 } 2909 }
2908 2910#endif
2909 set_params(mp, pd); 2911 set_params(mp, pd);
2910 netif_set_real_num_tx_queues(dev, mp->txq_count); 2912 netif_set_real_num_tx_queues(dev, mp->txq_count);
2911 netif_set_real_num_rx_queues(dev, mp->rxq_count); 2913 netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2995,10 +2997,13 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
2995 phy_detach(mp->phy); 2997 phy_detach(mp->phy);
2996 cancel_work_sync(&mp->tx_timeout_task); 2998 cancel_work_sync(&mp->tx_timeout_task);
2997 2999
3000#if defined(CONFIG_HAVE_CLK)
2998 if (!IS_ERR(mp->clk)) { 3001 if (!IS_ERR(mp->clk)) {
2999 clk_disable_unprepare(mp->clk); 3002 clk_disable_unprepare(mp->clk);
3000 clk_put(mp->clk); 3003 clk_put(mp->clk);
3001 } 3004 }
3005#endif
3006
3002 free_netdev(mp->dev); 3007 free_netdev(mp->dev);
3003 3008
3004 platform_set_drvdata(pdev, NULL); 3009 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index cace36f2ab92..28a54451a3e5 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -4381,10 +4381,12 @@ static int sky2_set_features(struct net_device *dev, netdev_features_t features)
4381 struct sky2_port *sky2 = netdev_priv(dev); 4381 struct sky2_port *sky2 = netdev_priv(dev);
4382 netdev_features_t changed = dev->features ^ features; 4382 netdev_features_t changed = dev->features ^ features;
4383 4383
4384 if (changed & NETIF_F_RXCSUM) { 4384 if ((changed & NETIF_F_RXCSUM) &&
4385 bool on = features & NETIF_F_RXCSUM; 4385 !(sky2->hw->flags & SKY2_HW_NEW_LE)) {
4386 sky2_write32(sky2->hw, Q_ADDR(rxqaddr[sky2->port], Q_CSR), 4386 sky2_write32(sky2->hw,
4387 on ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM); 4387 Q_ADDR(rxqaddr[sky2->port], Q_CSR),
4388 (features & NETIF_F_RXCSUM)
4389 ? BMU_ENA_RX_CHKSUM : BMU_DIS_RX_CHKSUM);
4388 } 4390 }
4389 4391
4390 if (changed & NETIF_F_RXHASH) 4392 if (changed & NETIF_F_RXHASH)
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index 1bcead1fa2f6..842c8ce9494e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -617,7 +617,7 @@ static struct mlx4_cmd_info cmd_info[] = {
617 .out_is_imm = false, 617 .out_is_imm = false,
618 .encode_slave_id = false, 618 .encode_slave_id = false,
619 .verify = NULL, 619 .verify = NULL,
620 .wrapper = NULL 620 .wrapper = mlx4_QUERY_FW_wrapper
621 }, 621 },
622 { 622 {
623 .opcode = MLX4_CMD_QUERY_HCA, 623 .opcode = MLX4_CMD_QUERY_HCA,
@@ -635,7 +635,7 @@ static struct mlx4_cmd_info cmd_info[] = {
635 .out_is_imm = false, 635 .out_is_imm = false,
636 .encode_slave_id = false, 636 .encode_slave_id = false,
637 .verify = NULL, 637 .verify = NULL,
638 .wrapper = NULL 638 .wrapper = mlx4_QUERY_DEV_CAP_wrapper
639 }, 639 },
640 { 640 {
641 .opcode = MLX4_CMD_QUERY_FUNC_CAP, 641 .opcode = MLX4_CMD_QUERY_FUNC_CAP,
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_main.c b/drivers/net/ethernet/mellanox/mlx4/en_main.c
index 988b2424e1c6..69ba57270481 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_main.c
@@ -136,13 +136,12 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
136 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr; 136 struct mlx4_en_dev *mdev = (struct mlx4_en_dev *) endev_ptr;
137 struct mlx4_en_priv *priv; 137 struct mlx4_en_priv *priv;
138 138
139 if (!mdev->pndev[port])
140 return;
141
142 priv = netdev_priv(mdev->pndev[port]);
143 switch (event) { 139 switch (event) {
144 case MLX4_DEV_EVENT_PORT_UP: 140 case MLX4_DEV_EVENT_PORT_UP:
145 case MLX4_DEV_EVENT_PORT_DOWN: 141 case MLX4_DEV_EVENT_PORT_DOWN:
142 if (!mdev->pndev[port])
143 return;
144 priv = netdev_priv(mdev->pndev[port]);
146 /* To prevent races, we poll the link state in a separate 145 /* To prevent races, we poll the link state in a separate
147 task rather than changing it here */ 146 task rather than changing it here */
148 priv->link_state = event; 147 priv->link_state = event;
@@ -154,7 +153,10 @@ static void mlx4_en_event(struct mlx4_dev *dev, void *endev_ptr,
154 break; 153 break;
155 154
156 default: 155 default:
157 mlx4_warn(mdev, "Unhandled event: %d\n", event); 156 if (port < 1 || port > dev->caps.num_ports ||
157 !mdev->pndev[port])
158 return;
159 mlx4_warn(mdev, "Unhandled event %d for port %d\n", event, port);
158 } 160 }
159} 161}
160 162
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 3b6f8efbf141..bce98d9c0039 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -426,7 +426,7 @@ static int mlx4_eq_int(struct mlx4_dev *dev, struct mlx4_eq *eq)
426 426
427 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave); 427 mlx4_dbg(dev, "FLR event for slave: %d\n", flr_slave);
428 428
429 if (flr_slave > dev->num_slaves) { 429 if (flr_slave >= dev->num_slaves) {
430 mlx4_warn(dev, 430 mlx4_warn(dev,
431 "Got FLR for unknown function: %d\n", 431 "Got FLR for unknown function: %d\n",
432 flr_slave); 432 flr_slave);
diff --git a/drivers/net/ethernet/mellanox/mlx4/fw.c b/drivers/net/ethernet/mellanox/mlx4/fw.c
index 68f5cd6cb3c7..9c83bb8151ea 100644
--- a/drivers/net/ethernet/mellanox/mlx4/fw.c
+++ b/drivers/net/ethernet/mellanox/mlx4/fw.c
@@ -412,7 +412,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
412 outbox = mailbox->buf; 412 outbox = mailbox->buf;
413 413
414 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP, 414 err = mlx4_cmd_box(dev, 0, mailbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
415 MLX4_CMD_TIME_CLASS_A, !mlx4_is_slave(dev)); 415 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
416 if (err) 416 if (err)
417 goto out; 417 goto out;
418 418
@@ -590,8 +590,7 @@ int mlx4_QUERY_DEV_CAP(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
590 590
591 for (i = 1; i <= dev_cap->num_ports; ++i) { 591 for (i = 1; i <= dev_cap->num_ports; ++i) {
592 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT, 592 err = mlx4_cmd_box(dev, 0, mailbox->dma, i, 0, MLX4_CMD_QUERY_PORT,
593 MLX4_CMD_TIME_CLASS_B, 593 MLX4_CMD_TIME_CLASS_B, MLX4_CMD_NATIVE);
594 !mlx4_is_slave(dev));
595 if (err) 594 if (err)
596 goto out; 595 goto out;
597 596
@@ -669,6 +668,28 @@ out:
669 return err; 668 return err;
670} 669}
671 670
671int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
672 struct mlx4_vhcr *vhcr,
673 struct mlx4_cmd_mailbox *inbox,
674 struct mlx4_cmd_mailbox *outbox,
675 struct mlx4_cmd_info *cmd)
676{
677 int err = 0;
678 u8 field;
679
680 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_DEV_CAP,
681 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
682 if (err)
683 return err;
684
685 /* For guests, report Blueflame disabled */
686 MLX4_GET(field, outbox->buf, QUERY_DEV_CAP_BF_OFFSET);
687 field &= 0x7f;
688 MLX4_PUT(outbox->buf, field, QUERY_DEV_CAP_BF_OFFSET);
689
690 return 0;
691}
692
672int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 693int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
673 struct mlx4_vhcr *vhcr, 694 struct mlx4_vhcr *vhcr,
674 struct mlx4_cmd_mailbox *inbox, 695 struct mlx4_cmd_mailbox *inbox,
@@ -860,6 +881,9 @@ int mlx4_QUERY_FW(struct mlx4_dev *dev)
860 ((fw_ver & 0xffff0000ull) >> 16) | 881 ((fw_ver & 0xffff0000ull) >> 16) |
861 ((fw_ver & 0x0000ffffull) << 16); 882 ((fw_ver & 0x0000ffffull) << 16);
862 883
884 if (mlx4_is_slave(dev))
885 goto out;
886
863 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID); 887 MLX4_GET(lg, outbox, QUERY_FW_PPF_ID);
864 dev->caps.function = lg; 888 dev->caps.function = lg;
865 889
@@ -927,6 +951,27 @@ out:
927 return err; 951 return err;
928} 952}
929 953
954int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
955 struct mlx4_vhcr *vhcr,
956 struct mlx4_cmd_mailbox *inbox,
957 struct mlx4_cmd_mailbox *outbox,
958 struct mlx4_cmd_info *cmd)
959{
960 u8 *outbuf;
961 int err;
962
963 outbuf = outbox->buf;
964 err = mlx4_cmd_box(dev, 0, outbox->dma, 0, 0, MLX4_CMD_QUERY_FW,
965 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
966 if (err)
967 return err;
968
969 /* for slaves, zero out everything except FW version */
970 outbuf[0] = outbuf[1] = 0;
971 memset(&outbuf[8], 0, QUERY_FW_OUT_SIZE - 8);
972 return 0;
973}
974
930static void get_board_id(void *vsd, char *board_id) 975static void get_board_id(void *vsd, char *board_id)
931{ 976{
932 int i; 977 int i;
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 2e024a68fa81..ee6f4fe00837 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -142,12 +142,6 @@ struct mlx4_port_config {
142 struct pci_dev *pdev; 142 struct pci_dev *pdev;
143}; 143};
144 144
145static inline int mlx4_master_get_num_eqs(struct mlx4_dev *dev)
146{
147 return dev->caps.reserved_eqs +
148 MLX4_MFUNC_EQ_NUM * (dev->num_slaves + 1);
149}
150
151int mlx4_check_port_params(struct mlx4_dev *dev, 145int mlx4_check_port_params(struct mlx4_dev *dev,
152 enum mlx4_port_type *port_type) 146 enum mlx4_port_type *port_type)
153{ 147{
@@ -217,6 +211,7 @@ static int mlx4_dev_cap(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap)
217 } 211 }
218 212
219 dev->caps.num_ports = dev_cap->num_ports; 213 dev->caps.num_ports = dev_cap->num_ports;
214 dev->phys_caps.num_phys_eqs = MLX4_MAX_EQ_NUM;
220 for (i = 1; i <= dev->caps.num_ports; ++i) { 215 for (i = 1; i <= dev->caps.num_ports; ++i) {
221 dev->caps.vl_cap[i] = dev_cap->max_vl[i]; 216 dev->caps.vl_cap[i] = dev_cap->max_vl[i];
222 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i]; 217 dev->caps.ib_mtu_cap[i] = dev_cap->ib_mtu[i];
@@ -435,12 +430,17 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
435 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz; 430 mlx4_log_num_mgm_entry_size = hca_param.log_mc_entry_sz;
436 431
437 memset(&dev_cap, 0, sizeof(dev_cap)); 432 memset(&dev_cap, 0, sizeof(dev_cap));
433 dev->caps.max_qp_dest_rdma = 1 << hca_param.log_rd_per_qp;
438 err = mlx4_dev_cap(dev, &dev_cap); 434 err = mlx4_dev_cap(dev, &dev_cap);
439 if (err) { 435 if (err) {
440 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n"); 436 mlx4_err(dev, "QUERY_DEV_CAP command failed, aborting.\n");
441 return err; 437 return err;
442 } 438 }
443 439
440 err = mlx4_QUERY_FW(dev);
441 if (err)
442 mlx4_err(dev, "QUERY_FW command failed: could not get FW version.\n");
443
444 page_size = ~dev->caps.page_size_cap + 1; 444 page_size = ~dev->caps.page_size_cap + 1;
445 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size); 445 mlx4_warn(dev, "HCA minimum page size:%d\n", page_size);
446 if (page_size > PAGE_SIZE) { 446 if (page_size > PAGE_SIZE) {
@@ -485,15 +485,15 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
485 dev->caps.num_mgms = 0; 485 dev->caps.num_mgms = 0;
486 dev->caps.num_amgms = 0; 486 dev->caps.num_amgms = 0;
487 487
488 for (i = 1; i <= dev->caps.num_ports; ++i)
489 dev->caps.port_mask[i] = dev->caps.port_type[i];
490
491 if (dev->caps.num_ports > MLX4_MAX_PORTS) { 488 if (dev->caps.num_ports > MLX4_MAX_PORTS) {
492 mlx4_err(dev, "HCA has %d ports, but we only support %d, " 489 mlx4_err(dev, "HCA has %d ports, but we only support %d, "
493 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS); 490 "aborting.\n", dev->caps.num_ports, MLX4_MAX_PORTS);
494 return -ENODEV; 491 return -ENODEV;
495 } 492 }
496 493
494 for (i = 1; i <= dev->caps.num_ports; ++i)
495 dev->caps.port_mask[i] = dev->caps.port_type[i];
496
497 if (dev->caps.uar_page_size * (dev->caps.num_uars - 497 if (dev->caps.uar_page_size * (dev->caps.num_uars -
498 dev->caps.reserved_uars) > 498 dev->caps.reserved_uars) >
499 pci_resource_len(dev->pdev, 2)) { 499 pci_resource_len(dev->pdev, 2)) {
@@ -504,18 +504,6 @@ static int mlx4_slave_cap(struct mlx4_dev *dev)
504 return -ENODEV; 504 return -ENODEV;
505 } 505 }
506 506
507#if 0
508 mlx4_warn(dev, "sqp_demux:%d\n", dev->caps.sqp_demux);
509 mlx4_warn(dev, "num_uars:%d reserved_uars:%d uar region:0x%x bar2:0x%llx\n",
510 dev->caps.num_uars, dev->caps.reserved_uars,
511 dev->caps.uar_page_size * dev->caps.num_uars,
512 pci_resource_len(dev->pdev, 2));
513 mlx4_warn(dev, "num_eqs:%d reserved_eqs:%d\n", dev->caps.num_eqs,
514 dev->caps.reserved_eqs);
515 mlx4_warn(dev, "num_pds:%d reserved_pds:%d slave_pd_shift:%d pd_base:%d\n",
516 dev->caps.num_pds, dev->caps.reserved_pds,
517 dev->caps.slave_pd_shift, dev->caps.pd_base);
518#endif
519 return 0; 507 return 0;
520} 508}
521 509
@@ -810,9 +798,8 @@ static int mlx4_init_cmpt_table(struct mlx4_dev *dev, u64 cmpt_base,
810 if (err) 798 if (err)
811 goto err_srq; 799 goto err_srq;
812 800
813 num_eqs = (mlx4_is_master(dev)) ? 801 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
814 roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : 802 dev->caps.num_eqs;
815 dev->caps.num_eqs;
816 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table, 803 err = mlx4_init_icm_table(dev, &priv->eq_table.cmpt_table,
817 cmpt_base + 804 cmpt_base +
818 ((u64) (MLX4_CMPT_TYPE_EQ * 805 ((u64) (MLX4_CMPT_TYPE_EQ *
@@ -874,9 +861,8 @@ static int mlx4_init_icm(struct mlx4_dev *dev, struct mlx4_dev_cap *dev_cap,
874 } 861 }
875 862
876 863
877 num_eqs = (mlx4_is_master(dev)) ? 864 num_eqs = (mlx4_is_master(dev)) ? dev->phys_caps.num_phys_eqs :
878 roundup_pow_of_two(mlx4_master_get_num_eqs(dev)) : 865 dev->caps.num_eqs;
879 dev->caps.num_eqs;
880 err = mlx4_init_icm_table(dev, &priv->eq_table.table, 866 err = mlx4_init_icm_table(dev, &priv->eq_table.table,
881 init_hca->eqc_base, dev_cap->eqc_entry_sz, 867 init_hca->eqc_base, dev_cap->eqc_entry_sz,
882 num_eqs, num_eqs, 0, 0); 868 num_eqs, num_eqs, 0, 0);
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 86b6e5a2fabf..e5d20220762c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1039,6 +1039,11 @@ int mlx4_init_resource_tracker(struct mlx4_dev *dev);
1039void mlx4_free_resource_tracker(struct mlx4_dev *dev, 1039void mlx4_free_resource_tracker(struct mlx4_dev *dev,
1040 enum mlx4_res_tracker_free_type type); 1040 enum mlx4_res_tracker_free_type type);
1041 1041
1042int mlx4_QUERY_FW_wrapper(struct mlx4_dev *dev, int slave,
1043 struct mlx4_vhcr *vhcr,
1044 struct mlx4_cmd_mailbox *inbox,
1045 struct mlx4_cmd_mailbox *outbox,
1046 struct mlx4_cmd_info *cmd);
1042int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave, 1047int mlx4_SET_PORT_wrapper(struct mlx4_dev *dev, int slave,
1043 struct mlx4_vhcr *vhcr, 1048 struct mlx4_vhcr *vhcr,
1044 struct mlx4_cmd_mailbox *inbox, 1049 struct mlx4_cmd_mailbox *inbox,
@@ -1054,6 +1059,11 @@ int mlx4_CLOSE_PORT_wrapper(struct mlx4_dev *dev, int slave,
1054 struct mlx4_cmd_mailbox *inbox, 1059 struct mlx4_cmd_mailbox *inbox,
1055 struct mlx4_cmd_mailbox *outbox, 1060 struct mlx4_cmd_mailbox *outbox,
1056 struct mlx4_cmd_info *cmd); 1061 struct mlx4_cmd_info *cmd);
1062int mlx4_QUERY_DEV_CAP_wrapper(struct mlx4_dev *dev, int slave,
1063 struct mlx4_vhcr *vhcr,
1064 struct mlx4_cmd_mailbox *inbox,
1065 struct mlx4_cmd_mailbox *outbox,
1066 struct mlx4_cmd_info *cmd);
1057int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave, 1067int mlx4_QUERY_PORT_wrapper(struct mlx4_dev *dev, int slave,
1058 struct mlx4_vhcr *vhcr, 1068 struct mlx4_vhcr *vhcr,
1059 struct mlx4_cmd_mailbox *inbox, 1069 struct mlx4_cmd_mailbox *inbox,
diff --git a/drivers/net/ethernet/mellanox/mlx4/port.c b/drivers/net/ethernet/mellanox/mlx4/port.c
index 1fe2c7a8b40c..a8fb52992c64 100644
--- a/drivers/net/ethernet/mellanox/mlx4/port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/port.c
@@ -697,10 +697,10 @@ static int mlx4_common_set_port(struct mlx4_dev *dev, int slave, u32 in_mod,
697 if (slave != dev->caps.function) 697 if (slave != dev->caps.function)
698 memset(inbox->buf, 0, 256); 698 memset(inbox->buf, 0, 256);
699 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) { 699 if (dev->flags & MLX4_FLAG_OLD_PORT_CMDS) {
700 *(u8 *) inbox->buf = !!reset_qkey_viols << 6; 700 *(u8 *) inbox->buf |= !!reset_qkey_viols << 6;
701 ((__be32 *) inbox->buf)[2] = agg_cap_mask; 701 ((__be32 *) inbox->buf)[2] = agg_cap_mask;
702 } else { 702 } else {
703 ((u8 *) inbox->buf)[3] = !!reset_qkey_viols; 703 ((u8 *) inbox->buf)[3] |= !!reset_qkey_viols;
704 ((__be32 *) inbox->buf)[1] = agg_cap_mask; 704 ((__be32 *) inbox->buf)[1] = agg_cap_mask;
705 } 705 }
706 706
diff --git a/drivers/net/ethernet/mellanox/mlx4/profile.c b/drivers/net/ethernet/mellanox/mlx4/profile.c
index 06e5adeb76f7..b83bc928d52a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/profile.c
+++ b/drivers/net/ethernet/mellanox/mlx4/profile.c
@@ -126,7 +126,9 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
126 profile[MLX4_RES_AUXC].num = request->num_qp; 126 profile[MLX4_RES_AUXC].num = request->num_qp;
127 profile[MLX4_RES_SRQ].num = request->num_srq; 127 profile[MLX4_RES_SRQ].num = request->num_srq;
128 profile[MLX4_RES_CQ].num = request->num_cq; 128 profile[MLX4_RES_CQ].num = request->num_cq;
129 profile[MLX4_RES_EQ].num = min_t(unsigned, dev_cap->max_eqs, MAX_MSIX); 129 profile[MLX4_RES_EQ].num = mlx4_is_mfunc(dev) ?
130 dev->phys_caps.num_phys_eqs :
131 min_t(unsigned, dev_cap->max_eqs, MAX_MSIX);
130 profile[MLX4_RES_DMPT].num = request->num_mpt; 132 profile[MLX4_RES_DMPT].num = request->num_mpt;
131 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS; 133 profile[MLX4_RES_CMPT].num = MLX4_NUM_CMPTS;
132 profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg); 134 profile[MLX4_RES_MTT].num = request->num_mtt * (1 << log_mtts_per_seg);
@@ -215,9 +217,10 @@ u64 mlx4_make_profile(struct mlx4_dev *dev,
215 init_hca->log_num_cqs = profile[i].log_num; 217 init_hca->log_num_cqs = profile[i].log_num;
216 break; 218 break;
217 case MLX4_RES_EQ: 219 case MLX4_RES_EQ:
218 dev->caps.num_eqs = profile[i].num; 220 dev->caps.num_eqs = roundup_pow_of_two(min_t(unsigned, dev_cap->max_eqs,
221 MAX_MSIX));
219 init_hca->eqc_base = profile[i].start; 222 init_hca->eqc_base = profile[i].start;
220 init_hca->log_num_eqs = profile[i].log_num; 223 init_hca->log_num_eqs = ilog2(dev->caps.num_eqs);
221 break; 224 break;
222 case MLX4_RES_DMPT: 225 case MLX4_RES_DMPT:
223 dev->caps.num_mpts = profile[i].num; 226 dev->caps.num_mpts = profile[i].num;
diff --git a/drivers/net/ethernet/nxp/lpc_eth.c b/drivers/net/ethernet/nxp/lpc_eth.c
index 8d2666fcffd7..083d6715335c 100644
--- a/drivers/net/ethernet/nxp/lpc_eth.c
+++ b/drivers/net/ethernet/nxp/lpc_eth.c
@@ -946,16 +946,16 @@ static void __lpc_handle_xmit(struct net_device *ndev)
946 /* Update stats */ 946 /* Update stats */
947 ndev->stats.tx_packets++; 947 ndev->stats.tx_packets++;
948 ndev->stats.tx_bytes += skb->len; 948 ndev->stats.tx_bytes += skb->len;
949
950 /* Free buffer */
951 dev_kfree_skb_irq(skb);
952 } 949 }
950 dev_kfree_skb_irq(skb);
953 951
954 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base)); 952 txcidx = readl(LPC_ENET_TXCONSUMEINDEX(pldat->net_base));
955 } 953 }
956 954
957 if (netif_queue_stopped(ndev)) 955 if (pldat->num_used_tx_buffs <= ENET_TX_DESC/2) {
958 netif_wake_queue(ndev); 956 if (netif_queue_stopped(ndev))
957 netif_wake_queue(ndev);
958 }
959} 959}
960 960
961static int __lpc_handle_recv(struct net_device *ndev, int budget) 961static int __lpc_handle_recv(struct net_device *ndev, int budget)
@@ -1320,6 +1320,7 @@ static const struct net_device_ops lpc_netdev_ops = {
1320 .ndo_set_rx_mode = lpc_eth_set_multicast_list, 1320 .ndo_set_rx_mode = lpc_eth_set_multicast_list,
1321 .ndo_do_ioctl = lpc_eth_ioctl, 1321 .ndo_do_ioctl = lpc_eth_ioctl,
1322 .ndo_set_mac_address = lpc_set_mac_address, 1322 .ndo_set_mac_address = lpc_set_mac_address,
1323 .ndo_change_mtu = eth_change_mtu,
1323}; 1324};
1324 1325
1325static int lpc_eth_drv_probe(struct platform_device *pdev) 1326static int lpc_eth_drv_probe(struct platform_device *pdev)
diff --git a/drivers/net/ethernet/rdc/r6040.c b/drivers/net/ethernet/rdc/r6040.c
index 4de73643fec6..d1827e887f4e 100644
--- a/drivers/net/ethernet/rdc/r6040.c
+++ b/drivers/net/ethernet/rdc/r6040.c
@@ -1096,20 +1096,20 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1096 if (err) { 1096 if (err) {
1097 dev_err(&pdev->dev, "32-bit PCI DMA addresses" 1097 dev_err(&pdev->dev, "32-bit PCI DMA addresses"
1098 "not supported by the card\n"); 1098 "not supported by the card\n");
1099 goto err_out; 1099 goto err_out_disable_dev;
1100 } 1100 }
1101 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); 1101 err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
1102 if (err) { 1102 if (err) {
1103 dev_err(&pdev->dev, "32-bit PCI DMA addresses" 1103 dev_err(&pdev->dev, "32-bit PCI DMA addresses"
1104 "not supported by the card\n"); 1104 "not supported by the card\n");
1105 goto err_out; 1105 goto err_out_disable_dev;
1106 } 1106 }
1107 1107
1108 /* IO Size check */ 1108 /* IO Size check */
1109 if (pci_resource_len(pdev, bar) < io_size) { 1109 if (pci_resource_len(pdev, bar) < io_size) {
1110 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n"); 1110 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
1111 err = -EIO; 1111 err = -EIO;
1112 goto err_out; 1112 goto err_out_disable_dev;
1113 } 1113 }
1114 1114
1115 pci_set_master(pdev); 1115 pci_set_master(pdev);
@@ -1117,7 +1117,7 @@ static int __devinit r6040_init_one(struct pci_dev *pdev,
1117 dev = alloc_etherdev(sizeof(struct r6040_private)); 1117 dev = alloc_etherdev(sizeof(struct r6040_private));
1118 if (!dev) { 1118 if (!dev) {
1119 err = -ENOMEM; 1119 err = -ENOMEM;
1120 goto err_out; 1120 goto err_out_disable_dev;
1121 } 1121 }
1122 SET_NETDEV_DEV(dev, &pdev->dev); 1122 SET_NETDEV_DEV(dev, &pdev->dev);
1123 lp = netdev_priv(dev); 1123 lp = netdev_priv(dev);
@@ -1233,11 +1233,15 @@ err_out_mdio_irq:
1233err_out_mdio: 1233err_out_mdio:
1234 mdiobus_free(lp->mii_bus); 1234 mdiobus_free(lp->mii_bus);
1235err_out_unmap: 1235err_out_unmap:
1236 netif_napi_del(&lp->napi);
1237 pci_set_drvdata(pdev, NULL);
1236 pci_iounmap(pdev, ioaddr); 1238 pci_iounmap(pdev, ioaddr);
1237err_out_free_res: 1239err_out_free_res:
1238 pci_release_regions(pdev); 1240 pci_release_regions(pdev);
1239err_out_free_dev: 1241err_out_free_dev:
1240 free_netdev(dev); 1242 free_netdev(dev);
1243err_out_disable_dev:
1244 pci_disable_device(pdev);
1241err_out: 1245err_out:
1242 return err; 1246 return err;
1243} 1247}
@@ -1251,6 +1255,9 @@ static void __devexit r6040_remove_one(struct pci_dev *pdev)
1251 mdiobus_unregister(lp->mii_bus); 1255 mdiobus_unregister(lp->mii_bus);
1252 kfree(lp->mii_bus->irq); 1256 kfree(lp->mii_bus->irq);
1253 mdiobus_free(lp->mii_bus); 1257 mdiobus_free(lp->mii_bus);
1258 netif_napi_del(&lp->napi);
1259 pci_set_drvdata(pdev, NULL);
1260 pci_iounmap(pdev, lp->base);
1254 pci_release_regions(pdev); 1261 pci_release_regions(pdev);
1255 free_netdev(dev); 1262 free_netdev(dev);
1256 pci_disable_device(pdev); 1263 pci_disable_device(pdev);
diff --git a/drivers/net/ethernet/realtek/8139cp.c b/drivers/net/ethernet/realtek/8139cp.c
index 5eef290997f9..995d0cfc4c06 100644
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -979,6 +979,17 @@ static void cp_init_hw (struct cp_private *cp)
979 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0))); 979 cpw32_f (MAC0 + 0, le32_to_cpu (*(__le32 *) (dev->dev_addr + 0)));
980 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4))); 980 cpw32_f (MAC0 + 4, le32_to_cpu (*(__le32 *) (dev->dev_addr + 4)));
981 981
982 cpw32_f(HiTxRingAddr, 0);
983 cpw32_f(HiTxRingAddr + 4, 0);
984
985 ring_dma = cp->ring_dma;
986 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
987 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
988
989 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
990 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
991 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
992
982 cp_start_hw(cp); 993 cp_start_hw(cp);
983 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */ 994 cpw8(TxThresh, 0x06); /* XXX convert magic num to a constant */
984 995
@@ -992,17 +1003,6 @@ static void cp_init_hw (struct cp_private *cp)
992 1003
993 cpw8(Config5, cpr8(Config5) & PMEStatus); 1004 cpw8(Config5, cpr8(Config5) & PMEStatus);
994 1005
995 cpw32_f(HiTxRingAddr, 0);
996 cpw32_f(HiTxRingAddr + 4, 0);
997
998 ring_dma = cp->ring_dma;
999 cpw32_f(RxRingAddr, ring_dma & 0xffffffff);
1000 cpw32_f(RxRingAddr + 4, (ring_dma >> 16) >> 16);
1001
1002 ring_dma += sizeof(struct cp_desc) * CP_RX_RING_SIZE;
1003 cpw32_f(TxRingAddr, ring_dma & 0xffffffff);
1004 cpw32_f(TxRingAddr + 4, (ring_dma >> 16) >> 16);
1005
1006 cpw16(MultiIntr, 0); 1006 cpw16(MultiIntr, 0);
1007 1007
1008 cpw8_f(Cfg9346, Cfg9346_Lock); 1008 cpw8_f(Cfg9346, Cfg9346_Lock);
@@ -1636,7 +1636,7 @@ static void eeprom_cmd(void __iomem *ee_addr, int cmd, int cmd_len)
1636 1636
1637static void eeprom_cmd_end(void __iomem *ee_addr) 1637static void eeprom_cmd_end(void __iomem *ee_addr)
1638{ 1638{
1639 writeb (~EE_CS, ee_addr); 1639 writeb(0, ee_addr);
1640 eeprom_delay (); 1640 eeprom_delay ();
1641} 1641}
1642 1642
diff --git a/drivers/net/ethernet/realtek/8139too.c b/drivers/net/ethernet/realtek/8139too.c
index 03df076ed596..1d83565cc6af 100644
--- a/drivers/net/ethernet/realtek/8139too.c
+++ b/drivers/net/ethernet/realtek/8139too.c
@@ -1173,7 +1173,7 @@ static int __devinit read_eeprom (void __iomem *ioaddr, int location, int addr_l
1173 } 1173 }
1174 1174
1175 /* Terminate the EEPROM access. */ 1175 /* Terminate the EEPROM access. */
1176 RTL_W8 (Cfg9346, ~EE_CS); 1176 RTL_W8(Cfg9346, 0);
1177 eeprom_delay (); 1177 eeprom_delay ();
1178 1178
1179 return retval; 1179 return retval;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 00b4f56a671c..7260aa79466a 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -5889,11 +5889,7 @@ static void rtl_slow_event_work(struct rtl8169_private *tp)
5889 if (status & LinkChg) 5889 if (status & LinkChg)
5890 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true); 5890 __rtl8169_check_link_status(dev, tp, tp->mmio_addr, true);
5891 5891
5892 napi_disable(&tp->napi); 5892 rtl_irq_enable_all(tp);
5893 rtl_irq_disable(tp);
5894
5895 napi_enable(&tp->napi);
5896 napi_schedule(&tp->napi);
5897} 5893}
5898 5894
5899static void rtl_task(struct work_struct *work) 5895static void rtl_task(struct work_struct *work)
@@ -6345,6 +6341,8 @@ static void __devexit rtl_remove_one(struct pci_dev *pdev)
6345 6341
6346 cancel_work_sync(&tp->wk.work); 6342 cancel_work_sync(&tp->wk.work);
6347 6343
6344 netif_napi_del(&tp->napi);
6345
6348 unregister_netdev(dev); 6346 unregister_netdev(dev);
6349 6347
6350 rtl_release_firmware(tp); 6348 rtl_release_firmware(tp);
@@ -6668,6 +6666,7 @@ out:
6668 return rc; 6666 return rc;
6669 6667
6670err_out_msi_4: 6668err_out_msi_4:
6669 netif_napi_del(&tp->napi);
6671 rtl_disable_msi(pdev, tp); 6670 rtl_disable_msi(pdev, tp);
6672 iounmap(ioaddr); 6671 iounmap(ioaddr);
6673err_out_free_res_3: 6672err_out_free_res_3:
diff --git a/drivers/net/ethernet/renesas/sh_eth.c b/drivers/net/ethernet/renesas/sh_eth.c
index be3c22179161..667169b82526 100644
--- a/drivers/net/ethernet/renesas/sh_eth.c
+++ b/drivers/net/ethernet/renesas/sh_eth.c
@@ -1101,8 +1101,12 @@ static int sh_eth_rx(struct net_device *ndev)
1101 1101
1102 /* Restart Rx engine if stopped. */ 1102 /* Restart Rx engine if stopped. */
1103 /* If we don't need to check status, don't. -KDU */ 1103 /* If we don't need to check status, don't. -KDU */
1104 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) 1104 if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R)) {
1105 /* fix the values for the next receiving */
1106 mdp->cur_rx = mdp->dirty_rx = (sh_eth_read(ndev, RDFAR) -
1107 sh_eth_read(ndev, RDLAR)) >> 4;
1105 sh_eth_write(ndev, EDRRR_R, EDRRR); 1108 sh_eth_write(ndev, EDRRR_R, EDRRR);
1109 }
1106 1110
1107 return 0; 1111 return 0;
1108} 1112}
@@ -1199,8 +1203,6 @@ static void sh_eth_error(struct net_device *ndev, int intr_status)
1199 /* Receive Descriptor Empty int */ 1203 /* Receive Descriptor Empty int */
1200 ndev->stats.rx_over_errors++; 1204 ndev->stats.rx_over_errors++;
1201 1205
1202 if (sh_eth_read(ndev, EDRRR) ^ EDRRR_R)
1203 sh_eth_write(ndev, EDRRR_R, EDRRR);
1204 if (netif_msg_rx_err(mdp)) 1206 if (netif_msg_rx_err(mdp))
1205 dev_err(&ndev->dev, "Receive Descriptor Empty\n"); 1207 dev_err(&ndev->dev, "Receive Descriptor Empty\n");
1206 } 1208 }
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index dab9c6f671ec..1466e5d2af44 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -2390,11 +2390,11 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2390 2390
2391 retval = smsc911x_request_resources(pdev); 2391 retval = smsc911x_request_resources(pdev);
2392 if (retval) 2392 if (retval)
2393 goto out_return_resources; 2393 goto out_request_resources_fail;
2394 2394
2395 retval = smsc911x_enable_resources(pdev); 2395 retval = smsc911x_enable_resources(pdev);
2396 if (retval) 2396 if (retval)
2397 goto out_disable_resources; 2397 goto out_enable_resources_fail;
2398 2398
2399 if (pdata->ioaddr == NULL) { 2399 if (pdata->ioaddr == NULL) {
2400 SMSC_WARN(pdata, probe, "Error smsc911x base address invalid"); 2400 SMSC_WARN(pdata, probe, "Error smsc911x base address invalid");
@@ -2501,8 +2501,9 @@ out_free_irq:
2501 free_irq(dev->irq, dev); 2501 free_irq(dev->irq, dev);
2502out_disable_resources: 2502out_disable_resources:
2503 (void)smsc911x_disable_resources(pdev); 2503 (void)smsc911x_disable_resources(pdev);
2504out_return_resources: 2504out_enable_resources_fail:
2505 smsc911x_free_resources(pdev); 2505 smsc911x_free_resources(pdev);
2506out_request_resources_fail:
2506 platform_set_drvdata(pdev, NULL); 2507 platform_set_drvdata(pdev, NULL);
2507 iounmap(pdata->ioaddr); 2508 iounmap(pdata->ioaddr);
2508 free_netdev(dev); 2509 free_netdev(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig
index 036428348faa..9f448279e12a 100644
--- a/drivers/net/ethernet/stmicro/stmmac/Kconfig
+++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig
@@ -13,7 +13,7 @@ config STMMAC_ETH
13if STMMAC_ETH 13if STMMAC_ETH
14 14
15config STMMAC_PLATFORM 15config STMMAC_PLATFORM
16 tristate "STMMAC platform bus support" 16 bool "STMMAC Platform bus support"
17 depends on STMMAC_ETH 17 depends on STMMAC_ETH
18 default y 18 default y
19 ---help--- 19 ---help---
@@ -26,7 +26,7 @@ config STMMAC_PLATFORM
26 If unsure, say N. 26 If unsure, say N.
27 27
28config STMMAC_PCI 28config STMMAC_PCI
29 tristate "STMMAC support on PCI bus (EXPERIMENTAL)" 29 bool "STMMAC PCI bus support (EXPERIMENTAL)"
30 depends on STMMAC_ETH && PCI && EXPERIMENTAL 30 depends on STMMAC_ETH && PCI && EXPERIMENTAL
31 ---help--- 31 ---help---
32 This is to select the Synopsys DWMAC available on PCI devices, 32 This is to select the Synopsys DWMAC available on PCI devices,
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac.h b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
index 6b5d060ee9de..dc20c56efc9d 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac.h
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac.h
@@ -26,6 +26,7 @@
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/stmmac.h> 27#include <linux/stmmac.h>
28#include <linux/phy.h> 28#include <linux/phy.h>
29#include <linux/pci.h>
29#include "common.h" 30#include "common.h"
30#ifdef CONFIG_STMMAC_TIMER 31#ifdef CONFIG_STMMAC_TIMER
31#include "stmmac_timer.h" 32#include "stmmac_timer.h"
@@ -95,7 +96,6 @@ extern int stmmac_mdio_register(struct net_device *ndev);
95extern void stmmac_set_ethtool_ops(struct net_device *netdev); 96extern void stmmac_set_ethtool_ops(struct net_device *netdev);
96extern const struct stmmac_desc_ops enh_desc_ops; 97extern const struct stmmac_desc_ops enh_desc_ops;
97extern const struct stmmac_desc_ops ndesc_ops; 98extern const struct stmmac_desc_ops ndesc_ops;
98
99int stmmac_freeze(struct net_device *ndev); 99int stmmac_freeze(struct net_device *ndev);
100int stmmac_restore(struct net_device *ndev); 100int stmmac_restore(struct net_device *ndev);
101int stmmac_resume(struct net_device *ndev); 101int stmmac_resume(struct net_device *ndev);
@@ -109,7 +109,7 @@ struct stmmac_priv *stmmac_dvr_probe(struct device *device,
109static inline int stmmac_clk_enable(struct stmmac_priv *priv) 109static inline int stmmac_clk_enable(struct stmmac_priv *priv)
110{ 110{
111 if (!IS_ERR(priv->stmmac_clk)) 111 if (!IS_ERR(priv->stmmac_clk))
112 return clk_enable(priv->stmmac_clk); 112 return clk_prepare_enable(priv->stmmac_clk);
113 113
114 return 0; 114 return 0;
115} 115}
@@ -119,7 +119,7 @@ static inline void stmmac_clk_disable(struct stmmac_priv *priv)
119 if (IS_ERR(priv->stmmac_clk)) 119 if (IS_ERR(priv->stmmac_clk))
120 return; 120 return;
121 121
122 clk_disable(priv->stmmac_clk); 122 clk_disable_unprepare(priv->stmmac_clk);
123} 123}
124static inline int stmmac_clk_get(struct stmmac_priv *priv) 124static inline int stmmac_clk_get(struct stmmac_priv *priv)
125{ 125{
@@ -143,3 +143,60 @@ static inline int stmmac_clk_get(struct stmmac_priv *priv)
143 return 0; 143 return 0;
144} 144}
145#endif /* CONFIG_HAVE_CLK */ 145#endif /* CONFIG_HAVE_CLK */
146
147
148#ifdef CONFIG_STMMAC_PLATFORM
149extern struct platform_driver stmmac_pltfr_driver;
150static inline int stmmac_register_platform(void)
151{
152 int err;
153
154 err = platform_driver_register(&stmmac_pltfr_driver);
155 if (err)
156 pr_err("stmmac: failed to register the platform driver\n");
157
158 return err;
159}
160static inline void stmmac_unregister_platform(void)
161{
162 platform_driver_register(&stmmac_pltfr_driver);
163}
164#else
165static inline int stmmac_register_platform(void)
166{
167 pr_debug("stmmac: do not register the platf driver\n");
168
169 return -EINVAL;
170}
171static inline void stmmac_unregister_platform(void)
172{
173}
174#endif /* CONFIG_STMMAC_PLATFORM */
175
176#ifdef CONFIG_STMMAC_PCI
177extern struct pci_driver stmmac_pci_driver;
178static inline int stmmac_register_pci(void)
179{
180 int err;
181
182 err = pci_register_driver(&stmmac_pci_driver);
183 if (err)
184 pr_err("stmmac: failed to register the PCI driver\n");
185
186 return err;
187}
188static inline void stmmac_unregister_pci(void)
189{
190 pci_unregister_driver(&stmmac_pci_driver);
191}
192#else
193static inline int stmmac_register_pci(void)
194{
195 pr_debug("stmmac: do not register the PCI driver\n");
196
197 return -EINVAL;
198}
199static inline void stmmac_unregister_pci(void)
200{
201}
202#endif /* CONFIG_STMMAC_PCI */
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 70966330f44e..51b3b68528ee 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -833,8 +833,9 @@ static u32 stmmac_get_synopsys_id(struct stmmac_priv *priv)
833 833
834/** 834/**
835 * stmmac_selec_desc_mode 835 * stmmac_selec_desc_mode
836 * @dev : device pointer 836 * @priv : private structure
837 * Description: select the Enhanced/Alternate or Normal descriptors */ 837 * Description: select the Enhanced/Alternate or Normal descriptors
838 */
838static void stmmac_selec_desc_mode(struct stmmac_priv *priv) 839static void stmmac_selec_desc_mode(struct stmmac_priv *priv)
839{ 840{
840 if (priv->plat->enh_desc) { 841 if (priv->plat->enh_desc) {
@@ -1861,6 +1862,8 @@ static int stmmac_hw_init(struct stmmac_priv *priv)
1861/** 1862/**
1862 * stmmac_dvr_probe 1863 * stmmac_dvr_probe
1863 * @device: device pointer 1864 * @device: device pointer
1865 * @plat_dat: platform data pointer
1866 * @addr: iobase memory address
1864 * Description: this is the main probe function used to 1867 * Description: this is the main probe function used to
1865 * call the alloc_etherdev, allocate the priv structure. 1868 * call the alloc_etherdev, allocate the priv structure.
1866 */ 1869 */
@@ -2090,6 +2093,34 @@ int stmmac_restore(struct net_device *ndev)
2090} 2093}
2091#endif /* CONFIG_PM */ 2094#endif /* CONFIG_PM */
2092 2095
2096/* Driver can be configured w/ and w/ both PCI and Platf drivers
2097 * depending on the configuration selected.
2098 */
2099static int __init stmmac_init(void)
2100{
2101 int err_plt = 0;
2102 int err_pci = 0;
2103
2104 err_plt = stmmac_register_platform();
2105 err_pci = stmmac_register_pci();
2106
2107 if ((err_pci) && (err_plt)) {
2108 pr_err("stmmac: driver registration failed\n");
2109 return -EINVAL;
2110 }
2111
2112 return 0;
2113}
2114
2115static void __exit stmmac_exit(void)
2116{
2117 stmmac_unregister_platform();
2118 stmmac_unregister_pci();
2119}
2120
2121module_init(stmmac_init);
2122module_exit(stmmac_exit);
2123
2093#ifndef MODULE 2124#ifndef MODULE
2094static int __init stmmac_cmdline_opt(char *str) 2125static int __init stmmac_cmdline_opt(char *str)
2095{ 2126{
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
index 58fab5303e9c..cf826e6b6aa1 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_pci.c
@@ -179,7 +179,7 @@ static DEFINE_PCI_DEVICE_TABLE(stmmac_id_table) = {
179 179
180MODULE_DEVICE_TABLE(pci, stmmac_id_table); 180MODULE_DEVICE_TABLE(pci, stmmac_id_table);
181 181
182static struct pci_driver stmmac_driver = { 182struct pci_driver stmmac_pci_driver = {
183 .name = STMMAC_RESOURCE_NAME, 183 .name = STMMAC_RESOURCE_NAME,
184 .id_table = stmmac_id_table, 184 .id_table = stmmac_id_table,
185 .probe = stmmac_pci_probe, 185 .probe = stmmac_pci_probe,
@@ -190,33 +190,6 @@ static struct pci_driver stmmac_driver = {
190#endif 190#endif
191}; 191};
192 192
193/**
194 * stmmac_init_module - Entry point for the driver
195 * Description: This function is the entry point for the driver.
196 */
197static int __init stmmac_init_module(void)
198{
199 int ret;
200
201 ret = pci_register_driver(&stmmac_driver);
202 if (ret < 0)
203 pr_err("%s: ERROR: driver registration failed\n", __func__);
204
205 return ret;
206}
207
208/**
209 * stmmac_cleanup_module - Cleanup routine for the driver
210 * Description: This function is the cleanup routine for the driver.
211 */
212static void __exit stmmac_cleanup_module(void)
213{
214 pci_unregister_driver(&stmmac_driver);
215}
216
217module_init(stmmac_init_module);
218module_exit(stmmac_cleanup_module);
219
220MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver"); 193MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PCI driver");
221MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>"); 194MODULE_AUTHOR("Rayagond Kokatanur <rayagond.kokatanur@vayavyalabs.com>");
222MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 195MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 3dd8f0803808..680d2b8dfe27 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -255,7 +255,7 @@ static const struct of_device_id stmmac_dt_ids[] = {
255}; 255};
256MODULE_DEVICE_TABLE(of, stmmac_dt_ids); 256MODULE_DEVICE_TABLE(of, stmmac_dt_ids);
257 257
258static struct platform_driver stmmac_driver = { 258struct platform_driver stmmac_pltfr_driver = {
259 .probe = stmmac_pltfr_probe, 259 .probe = stmmac_pltfr_probe,
260 .remove = stmmac_pltfr_remove, 260 .remove = stmmac_pltfr_remove,
261 .driver = { 261 .driver = {
@@ -266,8 +266,6 @@ static struct platform_driver stmmac_driver = {
266 }, 266 },
267}; 267};
268 268
269module_platform_driver(stmmac_driver);
270
271MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); 269MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver");
272MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); 270MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
273MODULE_LICENSE("GPL"); 271MODULE_LICENSE("GPL");
diff --git a/drivers/net/ethernet/sun/niu.c b/drivers/net/ethernet/sun/niu.c
index 703c8cce2a2c..8c726b7004d3 100644
--- a/drivers/net/ethernet/sun/niu.c
+++ b/drivers/net/ethernet/sun/niu.c
@@ -3598,7 +3598,6 @@ static int release_tx_packet(struct niu *np, struct tx_ring_info *rp, int idx)
3598static void niu_tx_work(struct niu *np, struct tx_ring_info *rp) 3598static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3599{ 3599{
3600 struct netdev_queue *txq; 3600 struct netdev_queue *txq;
3601 unsigned int tx_bytes;
3602 u16 pkt_cnt, tmp; 3601 u16 pkt_cnt, tmp;
3603 int cons, index; 3602 int cons, index;
3604 u64 cs; 3603 u64 cs;
@@ -3621,18 +3620,12 @@ static void niu_tx_work(struct niu *np, struct tx_ring_info *rp)
3621 netif_printk(np, tx_done, KERN_DEBUG, np->dev, 3620 netif_printk(np, tx_done, KERN_DEBUG, np->dev,
3622 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons); 3621 "%s() pkt_cnt[%u] cons[%d]\n", __func__, pkt_cnt, cons);
3623 3622
3624 tx_bytes = 0; 3623 while (pkt_cnt--)
3625 tmp = pkt_cnt;
3626 while (tmp--) {
3627 tx_bytes += rp->tx_buffs[cons].skb->len;
3628 cons = release_tx_packet(np, rp, cons); 3624 cons = release_tx_packet(np, rp, cons);
3629 }
3630 3625
3631 rp->cons = cons; 3626 rp->cons = cons;
3632 smp_mb(); 3627 smp_mb();
3633 3628
3634 netdev_tx_completed_queue(txq, pkt_cnt, tx_bytes);
3635
3636out: 3629out:
3637 if (unlikely(netif_tx_queue_stopped(txq) && 3630 if (unlikely(netif_tx_queue_stopped(txq) &&
3638 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) { 3631 (niu_tx_avail(rp) > NIU_TX_WAKEUP_THRESH(rp)))) {
@@ -4333,7 +4326,6 @@ static void niu_free_channels(struct niu *np)
4333 struct tx_ring_info *rp = &np->tx_rings[i]; 4326 struct tx_ring_info *rp = &np->tx_rings[i];
4334 4327
4335 niu_free_tx_ring_info(np, rp); 4328 niu_free_tx_ring_info(np, rp);
4336 netdev_tx_reset_queue(netdev_get_tx_queue(np->dev, i));
4337 } 4329 }
4338 kfree(np->tx_rings); 4330 kfree(np->tx_rings);
4339 np->tx_rings = NULL; 4331 np->tx_rings = NULL;
@@ -6739,8 +6731,6 @@ static netdev_tx_t niu_start_xmit(struct sk_buff *skb,
6739 prod = NEXT_TX(rp, prod); 6731 prod = NEXT_TX(rp, prod);
6740 } 6732 }
6741 6733
6742 netdev_tx_sent_queue(txq, skb->len);
6743
6744 if (prod < rp->prod) 6734 if (prod < rp->prod)
6745 rp->wrap_bit ^= TX_RING_KICK_WRAP; 6735 rp->wrap_bit ^= TX_RING_KICK_WRAP;
6746 rp->prod = prod; 6736 rp->prod = prod;
diff --git a/drivers/net/ethernet/tile/Kconfig b/drivers/net/ethernet/tile/Kconfig
index 2d9218f86bca..098b1c42b393 100644
--- a/drivers/net/ethernet/tile/Kconfig
+++ b/drivers/net/ethernet/tile/Kconfig
@@ -7,6 +7,8 @@ config TILE_NET
7 depends on TILE 7 depends on TILE
8 default y 8 default y
9 select CRC32 9 select CRC32
10 select TILE_GXIO_MPIPE if TILEGX
11 select HIGH_RES_TIMERS if TILEGX
10 ---help--- 12 ---help---
11 This is a standard Linux network device driver for the 13 This is a standard Linux network device driver for the
12 on-chip Tilera Gigabit Ethernet and XAUI interfaces. 14 on-chip Tilera Gigabit Ethernet and XAUI interfaces.
diff --git a/drivers/net/ethernet/tile/Makefile b/drivers/net/ethernet/tile/Makefile
index f634f142cab4..0ef9eefd3211 100644
--- a/drivers/net/ethernet/tile/Makefile
+++ b/drivers/net/ethernet/tile/Makefile
@@ -4,7 +4,7 @@
4 4
5obj-$(CONFIG_TILE_NET) += tile_net.o 5obj-$(CONFIG_TILE_NET) += tile_net.o
6ifdef CONFIG_TILEGX 6ifdef CONFIG_TILEGX
7tile_net-objs := tilegx.o mpipe.o iorpc_mpipe.o dma_queue.o 7tile_net-y := tilegx.o
8else 8else
9tile_net-objs := tilepro.o 9tile_net-y := tilepro.o
10endif 10endif
diff --git a/drivers/net/ethernet/tile/tilegx.c b/drivers/net/ethernet/tile/tilegx.c
new file mode 100644
index 000000000000..83b4b388ad49
--- /dev/null
+++ b/drivers/net/ethernet/tile/tilegx.c
@@ -0,0 +1,1898 @@
1/*
2 * Copyright 2012 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/moduleparam.h>
18#include <linux/sched.h>
19#include <linux/kernel.h> /* printk() */
20#include <linux/slab.h> /* kmalloc() */
21#include <linux/errno.h> /* error codes */
22#include <linux/types.h> /* size_t */
23#include <linux/interrupt.h>
24#include <linux/in.h>
25#include <linux/irq.h>
26#include <linux/netdevice.h> /* struct device, and other headers */
27#include <linux/etherdevice.h> /* eth_type_trans */
28#include <linux/skbuff.h>
29#include <linux/ioctl.h>
30#include <linux/cdev.h>
31#include <linux/hugetlb.h>
32#include <linux/in6.h>
33#include <linux/timer.h>
34#include <linux/hrtimer.h>
35#include <linux/ktime.h>
36#include <linux/io.h>
37#include <linux/ctype.h>
38#include <linux/ip.h>
39#include <linux/tcp.h>
40
41#include <asm/checksum.h>
42#include <asm/homecache.h>
43#include <gxio/mpipe.h>
44#include <arch/sim.h>
45
46/* Default transmit lockup timeout period, in jiffies. */
47#define TILE_NET_TIMEOUT (5 * HZ)
48
49/* The maximum number of distinct channels (idesc.channel is 5 bits). */
50#define TILE_NET_CHANNELS 32
51
52/* Maximum number of idescs to handle per "poll". */
53#define TILE_NET_BATCH 128
54
55/* Maximum number of packets to handle per "poll". */
56#define TILE_NET_WEIGHT 64
57
58/* Number of entries in each iqueue. */
59#define IQUEUE_ENTRIES 512
60
61/* Number of entries in each equeue. */
62#define EQUEUE_ENTRIES 2048
63
64/* Total header bytes per equeue slot. Must be big enough for 2 bytes
65 * of NET_IP_ALIGN alignment, plus 14 bytes (?) of L2 header, plus up to
66 * 60 bytes of actual TCP header. We round up to align to cache lines.
67 */
68#define HEADER_BYTES 128
69
70/* Maximum completions per cpu per device (must be a power of two).
71 * ISSUE: What is the right number here? If this is too small, then
72 * egress might block waiting for free space in a completions array.
73 * ISSUE: At the least, allocate these only for initialized echannels.
74 */
75#define TILE_NET_MAX_COMPS 64
76
77#define MAX_FRAGS (MAX_SKB_FRAGS + 1)
78
79/* Size of completions data to allocate.
80 * ISSUE: Probably more than needed since we don't use all the channels.
81 */
82#define COMPS_SIZE (TILE_NET_CHANNELS * sizeof(struct tile_net_comps))
83
84/* Size of NotifRing data to allocate. */
85#define NOTIF_RING_SIZE (IQUEUE_ENTRIES * sizeof(gxio_mpipe_idesc_t))
86
87/* Timeout to wake the per-device TX timer after we stop the queue.
88 * We don't want the timeout too short (adds overhead, and might end
89 * up causing stop/wake/stop/wake cycles) or too long (affects performance).
90 * For the 10 Gb NIC, 30 usec means roughly 30+ 1500-byte packets.
91 */
92#define TX_TIMER_DELAY_USEC 30
93
94/* Timeout to wake the per-cpu egress timer to free completions. */
95#define EGRESS_TIMER_DELAY_USEC 1000
96
97MODULE_AUTHOR("Tilera Corporation");
98MODULE_LICENSE("GPL");
99
100/* A "packet fragment" (a chunk of memory). */
101struct frag {
102 void *buf;
103 size_t length;
104};
105
106/* A single completion. */
107struct tile_net_comp {
108 /* The "complete_count" when the completion will be complete. */
109 s64 when;
110 /* The buffer to be freed when the completion is complete. */
111 struct sk_buff *skb;
112};
113
114/* The completions for a given cpu and echannel. */
115struct tile_net_comps {
116 /* The completions. */
117 struct tile_net_comp comp_queue[TILE_NET_MAX_COMPS];
118 /* The number of completions used. */
119 unsigned long comp_next;
120 /* The number of completions freed. */
121 unsigned long comp_last;
122};
123
124/* The transmit wake timer for a given cpu and echannel. */
125struct tile_net_tx_wake {
126 struct hrtimer timer;
127 struct net_device *dev;
128};
129
130/* Info for a specific cpu. */
131struct tile_net_info {
132 /* The NAPI struct. */
133 struct napi_struct napi;
134 /* Packet queue. */
135 gxio_mpipe_iqueue_t iqueue;
136 /* Our cpu. */
137 int my_cpu;
138 /* True if iqueue is valid. */
139 bool has_iqueue;
140 /* NAPI flags. */
141 bool napi_added;
142 bool napi_enabled;
143 /* Number of small sk_buffs which must still be provided. */
144 unsigned int num_needed_small_buffers;
145 /* Number of large sk_buffs which must still be provided. */
146 unsigned int num_needed_large_buffers;
147 /* A timer for handling egress completions. */
148 struct hrtimer egress_timer;
149 /* True if "egress_timer" is scheduled. */
150 bool egress_timer_scheduled;
151 /* Comps for each egress channel. */
152 struct tile_net_comps *comps_for_echannel[TILE_NET_CHANNELS];
153 /* Transmit wake timer for each egress channel. */
154 struct tile_net_tx_wake tx_wake[TILE_NET_CHANNELS];
155};
156
157/* Info for egress on a particular egress channel. */
158struct tile_net_egress {
159 /* The "equeue". */
160 gxio_mpipe_equeue_t *equeue;
161 /* The headers for TSO. */
162 unsigned char *headers;
163};
164
165/* Info for a specific device. */
166struct tile_net_priv {
167 /* Our network device. */
168 struct net_device *dev;
169 /* The primary link. */
170 gxio_mpipe_link_t link;
171 /* The primary channel, if open, else -1. */
172 int channel;
173 /* The "loopify" egress link, if needed. */
174 gxio_mpipe_link_t loopify_link;
175 /* The "loopify" egress channel, if open, else -1. */
176 int loopify_channel;
177 /* The egress channel (channel or loopify_channel). */
178 int echannel;
179 /* Total stats. */
180 struct net_device_stats stats;
181};
182
183/* Egress info, indexed by "priv->echannel" (lazily created as needed). */
184static struct tile_net_egress egress_for_echannel[TILE_NET_CHANNELS];
185
186/* Devices currently associated with each channel.
187 * NOTE: The array entry can become NULL after ifconfig down, but
188 * we do not free the underlying net_device structures, so it is
189 * safe to use a pointer after reading it from this array.
190 */
191static struct net_device *tile_net_devs_for_channel[TILE_NET_CHANNELS];
192
193/* A mutex for "tile_net_devs_for_channel". */
194static DEFINE_MUTEX(tile_net_devs_for_channel_mutex);
195
196/* The per-cpu info. */
197static DEFINE_PER_CPU(struct tile_net_info, per_cpu_info);
198
199/* The "context" for all devices. */
200static gxio_mpipe_context_t context;
201
202/* Buffer sizes and mpipe enum codes for buffer stacks.
203 * See arch/tile/include/gxio/mpipe.h for the set of possible values.
204 */
205#define BUFFER_SIZE_SMALL_ENUM GXIO_MPIPE_BUFFER_SIZE_128
206#define BUFFER_SIZE_SMALL 128
207#define BUFFER_SIZE_LARGE_ENUM GXIO_MPIPE_BUFFER_SIZE_1664
208#define BUFFER_SIZE_LARGE 1664
209
210/* The small/large "buffer stacks". */
211static int small_buffer_stack = -1;
212static int large_buffer_stack = -1;
213
214/* Amount of memory allocated for each buffer stack. */
215static size_t buffer_stack_size;
216
217/* The actual memory allocated for the buffer stacks. */
218static void *small_buffer_stack_va;
219static void *large_buffer_stack_va;
220
221/* The buckets. */
222static int first_bucket = -1;
223static int num_buckets = 1;
224
225/* The ingress irq. */
226static int ingress_irq = -1;
227
228/* Text value of tile_net.cpus if passed as a module parameter. */
229static char *network_cpus_string;
230
231/* The actual cpus in "network_cpus". */
232static struct cpumask network_cpus_map;
233
234/* If "loopify=LINK" was specified, this is "LINK". */
235static char *loopify_link_name;
236
237/* If "tile_net.custom" was specified, this is non-NULL. */
238static char *custom_str;
239
240/* The "tile_net.cpus" argument specifies the cpus that are dedicated
241 * to handle ingress packets.
242 *
243 * The parameter should be in the form "tile_net.cpus=m-n[,x-y]", where
244 * m, n, x, y are integer numbers that represent the cpus that can be
245 * neither a dedicated cpu nor a dataplane cpu.
246 */
247static bool network_cpus_init(void)
248{
249 char buf[1024];
250 int rc;
251
252 if (network_cpus_string == NULL)
253 return false;
254
255 rc = cpulist_parse_crop(network_cpus_string, &network_cpus_map);
256 if (rc != 0) {
257 pr_warn("tile_net.cpus=%s: malformed cpu list\n",
258 network_cpus_string);
259 return false;
260 }
261
262 /* Remove dedicated cpus. */
263 cpumask_and(&network_cpus_map, &network_cpus_map, cpu_possible_mask);
264
265 if (cpumask_empty(&network_cpus_map)) {
266 pr_warn("Ignoring empty tile_net.cpus='%s'.\n",
267 network_cpus_string);
268 return false;
269 }
270
271 cpulist_scnprintf(buf, sizeof(buf), &network_cpus_map);
272 pr_info("Linux network CPUs: %s\n", buf);
273 return true;
274}
275
276module_param_named(cpus, network_cpus_string, charp, 0444);
277MODULE_PARM_DESC(cpus, "cpulist of cores that handle network interrupts");
278
279/* The "tile_net.loopify=LINK" argument causes the named device to
280 * actually use "loop0" for ingress, and "loop1" for egress. This
281 * allows an app to sit between the actual link and linux, passing
282 * (some) packets along to linux, and forwarding (some) packets sent
283 * out by linux.
284 */
285module_param_named(loopify, loopify_link_name, charp, 0444);
286MODULE_PARM_DESC(loopify, "name the device to use loop0/1 for ingress/egress");
287
288/* The "tile_net.custom" argument causes us to ignore the "conventional"
289 * classifier metadata, in particular, the "l2_offset".
290 */
291module_param_named(custom, custom_str, charp, 0444);
292MODULE_PARM_DESC(custom, "indicates a (heavily) customized classifier");
293
294/* Atomically update a statistics field.
295 * Note that on TILE-Gx, this operation is fire-and-forget on the
296 * issuing core (single-cycle dispatch) and takes only a few cycles
297 * longer than a regular store when the request reaches the home cache.
298 * No expensive bus management overhead is required.
299 */
300static void tile_net_stats_add(unsigned long value, unsigned long *field)
301{
302 BUILD_BUG_ON(sizeof(atomic_long_t) != sizeof(unsigned long));
303 atomic_long_add(value, (atomic_long_t *)field);
304}
305
306/* Allocate and push a buffer. */
307static bool tile_net_provide_buffer(bool small)
308{
309 int stack = small ? small_buffer_stack : large_buffer_stack;
310 const unsigned long buffer_alignment = 128;
311 struct sk_buff *skb;
312 int len;
313
314 len = sizeof(struct sk_buff **) + buffer_alignment;
315 len += (small ? BUFFER_SIZE_SMALL : BUFFER_SIZE_LARGE);
316 skb = dev_alloc_skb(len);
317 if (skb == NULL)
318 return false;
319
320 /* Make room for a back-pointer to 'skb' and guarantee alignment. */
321 skb_reserve(skb, sizeof(struct sk_buff **));
322 skb_reserve(skb, -(long)skb->data & (buffer_alignment - 1));
323
324 /* Save a back-pointer to 'skb'. */
325 *(struct sk_buff **)(skb->data - sizeof(struct sk_buff **)) = skb;
326
327 /* Make sure "skb" and the back-pointer have been flushed. */
328 wmb();
329
330 gxio_mpipe_push_buffer(&context, stack,
331 (void *)va_to_tile_io_addr(skb->data));
332
333 return true;
334}
335
336/* Convert a raw mpipe buffer to its matching skb pointer. */
337static struct sk_buff *mpipe_buf_to_skb(void *va)
338{
339 /* Acquire the associated "skb". */
340 struct sk_buff **skb_ptr = va - sizeof(*skb_ptr);
341 struct sk_buff *skb = *skb_ptr;
342
343 /* Paranoia. */
344 if (skb->data != va) {
345 /* Panic here since there's a reasonable chance
346 * that corrupt buffers means generic memory
347 * corruption, with unpredictable system effects.
348 */
349 panic("Corrupt linux buffer! va=%p, skb=%p, skb->data=%p",
350 va, skb, skb->data);
351 }
352
353 return skb;
354}
355
356static void tile_net_pop_all_buffers(int stack)
357{
358 for (;;) {
359 tile_io_addr_t addr =
360 (tile_io_addr_t)gxio_mpipe_pop_buffer(&context, stack);
361 if (addr == 0)
362 break;
363 dev_kfree_skb_irq(mpipe_buf_to_skb(tile_io_addr_to_va(addr)));
364 }
365}
366
367/* Provide linux buffers to mPIPE. */
368static void tile_net_provide_needed_buffers(void)
369{
370 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
371
372 while (info->num_needed_small_buffers != 0) {
373 if (!tile_net_provide_buffer(true))
374 goto oops;
375 info->num_needed_small_buffers--;
376 }
377
378 while (info->num_needed_large_buffers != 0) {
379 if (!tile_net_provide_buffer(false))
380 goto oops;
381 info->num_needed_large_buffers--;
382 }
383
384 return;
385
386oops:
387 /* Add a description to the page allocation failure dump. */
388 pr_notice("Tile %d still needs some buffers\n", info->my_cpu);
389}
390
391static inline bool filter_packet(struct net_device *dev, void *buf)
392{
393 /* Filter packets received before we're up. */
394 if (dev == NULL || !(dev->flags & IFF_UP))
395 return true;
396
397 /* Filter out packets that aren't for us. */
398 if (!(dev->flags & IFF_PROMISC) &&
399 !is_multicast_ether_addr(buf) &&
400 compare_ether_addr(dev->dev_addr, buf) != 0)
401 return true;
402
403 return false;
404}
405
406static void tile_net_receive_skb(struct net_device *dev, struct sk_buff *skb,
407 gxio_mpipe_idesc_t *idesc, unsigned long len)
408{
409 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
410 struct tile_net_priv *priv = netdev_priv(dev);
411
412 /* Encode the actual packet length. */
413 skb_put(skb, len);
414
415 skb->protocol = eth_type_trans(skb, dev);
416
417 /* Acknowledge "good" hardware checksums. */
418 if (idesc->cs && idesc->csum_seed_val == 0xFFFF)
419 skb->ip_summed = CHECKSUM_UNNECESSARY;
420
421 netif_receive_skb(skb);
422
423 /* Update stats. */
424 tile_net_stats_add(1, &priv->stats.rx_packets);
425 tile_net_stats_add(len, &priv->stats.rx_bytes);
426
427 /* Need a new buffer. */
428 if (idesc->size == BUFFER_SIZE_SMALL_ENUM)
429 info->num_needed_small_buffers++;
430 else
431 info->num_needed_large_buffers++;
432}
433
434/* Handle a packet. Return true if "processed", false if "filtered". */
435static bool tile_net_handle_packet(gxio_mpipe_idesc_t *idesc)
436{
437 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
438 struct net_device *dev = tile_net_devs_for_channel[idesc->channel];
439 uint8_t l2_offset;
440 void *va;
441 void *buf;
442 unsigned long len;
443 bool filter;
444
445 /* Drop packets for which no buffer was available.
446 * NOTE: This happens under heavy load.
447 */
448 if (idesc->be) {
449 struct tile_net_priv *priv = netdev_priv(dev);
450 tile_net_stats_add(1, &priv->stats.rx_dropped);
451 gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
452 if (net_ratelimit())
453 pr_info("Dropping packet (insufficient buffers).\n");
454 return false;
455 }
456
457 /* Get the "l2_offset", if allowed. */
458 l2_offset = custom_str ? 0 : gxio_mpipe_idesc_get_l2_offset(idesc);
459
460 /* Get the raw buffer VA (includes "headroom"). */
461 va = tile_io_addr_to_va((unsigned long)(long)idesc->va);
462
463 /* Get the actual packet start/length. */
464 buf = va + l2_offset;
465 len = idesc->l2_size - l2_offset;
466
467 /* Point "va" at the raw buffer. */
468 va -= NET_IP_ALIGN;
469
470 filter = filter_packet(dev, buf);
471 if (filter) {
472 gxio_mpipe_iqueue_drop(&info->iqueue, idesc);
473 } else {
474 struct sk_buff *skb = mpipe_buf_to_skb(va);
475
476 /* Skip headroom, and any custom header. */
477 skb_reserve(skb, NET_IP_ALIGN + l2_offset);
478
479 tile_net_receive_skb(dev, skb, idesc, len);
480 }
481
482 gxio_mpipe_iqueue_consume(&info->iqueue, idesc);
483 return !filter;
484}
485
486/* Handle some packets for the current CPU.
487 *
488 * This function handles up to TILE_NET_BATCH idescs per call.
489 *
490 * ISSUE: Since we do not provide new buffers until this function is
491 * complete, we must initially provide enough buffers for each network
492 * cpu to fill its iqueue and also its batched idescs.
493 *
494 * ISSUE: The "rotting packet" race condition occurs if a packet
495 * arrives after the queue appears to be empty, and before the
496 * hypervisor interrupt is re-enabled.
497 */
498static int tile_net_poll(struct napi_struct *napi, int budget)
499{
500 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
501 unsigned int work = 0;
502 gxio_mpipe_idesc_t *idesc;
503 int i, n;
504
505 /* Process packets. */
506 while ((n = gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc)) > 0) {
507 for (i = 0; i < n; i++) {
508 if (i == TILE_NET_BATCH)
509 goto done;
510 if (tile_net_handle_packet(idesc + i)) {
511 if (++work >= budget)
512 goto done;
513 }
514 }
515 }
516
517 /* There are no packets left. */
518 napi_complete(&info->napi);
519
520 /* Re-enable hypervisor interrupts. */
521 gxio_mpipe_enable_notif_ring_interrupt(&context, info->iqueue.ring);
522
523 /* HACK: Avoid the "rotting packet" problem. */
524 if (gxio_mpipe_iqueue_try_peek(&info->iqueue, &idesc) > 0)
525 napi_schedule(&info->napi);
526
527 /* ISSUE: Handle completions? */
528
529done:
530 tile_net_provide_needed_buffers();
531
532 return work;
533}
534
535/* Handle an ingress interrupt on the current cpu. */
536static irqreturn_t tile_net_handle_ingress_irq(int irq, void *unused)
537{
538 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
539 napi_schedule(&info->napi);
540 return IRQ_HANDLED;
541}
542
543/* Free some completions. This must be called with interrupts blocked. */
544static int tile_net_free_comps(gxio_mpipe_equeue_t *equeue,
545 struct tile_net_comps *comps,
546 int limit, bool force_update)
547{
548 int n = 0;
549 while (comps->comp_last < comps->comp_next) {
550 unsigned int cid = comps->comp_last % TILE_NET_MAX_COMPS;
551 struct tile_net_comp *comp = &comps->comp_queue[cid];
552 if (!gxio_mpipe_equeue_is_complete(equeue, comp->when,
553 force_update || n == 0))
554 break;
555 dev_kfree_skb_irq(comp->skb);
556 comps->comp_last++;
557 if (++n == limit)
558 break;
559 }
560 return n;
561}
562
563/* Add a completion. This must be called with interrupts blocked.
564 * tile_net_equeue_try_reserve() will have ensured a free completion entry.
565 */
566static void add_comp(gxio_mpipe_equeue_t *equeue,
567 struct tile_net_comps *comps,
568 uint64_t when, struct sk_buff *skb)
569{
570 int cid = comps->comp_next % TILE_NET_MAX_COMPS;
571 comps->comp_queue[cid].when = when;
572 comps->comp_queue[cid].skb = skb;
573 comps->comp_next++;
574}
575
576static void tile_net_schedule_tx_wake_timer(struct net_device *dev)
577{
578 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
579 struct tile_net_priv *priv = netdev_priv(dev);
580
581 hrtimer_start(&info->tx_wake[priv->echannel].timer,
582 ktime_set(0, TX_TIMER_DELAY_USEC * 1000UL),
583 HRTIMER_MODE_REL_PINNED);
584}
585
586static enum hrtimer_restart tile_net_handle_tx_wake_timer(struct hrtimer *t)
587{
588 struct tile_net_tx_wake *tx_wake =
589 container_of(t, struct tile_net_tx_wake, timer);
590 netif_wake_subqueue(tx_wake->dev, smp_processor_id());
591 return HRTIMER_NORESTART;
592}
593
594/* Make sure the egress timer is scheduled. */
595static void tile_net_schedule_egress_timer(void)
596{
597 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
598
599 if (!info->egress_timer_scheduled) {
600 hrtimer_start(&info->egress_timer,
601 ktime_set(0, EGRESS_TIMER_DELAY_USEC * 1000UL),
602 HRTIMER_MODE_REL_PINNED);
603 info->egress_timer_scheduled = true;
604 }
605}
606
607/* The "function" for "info->egress_timer".
608 *
609 * This timer will reschedule itself as long as there are any pending
610 * completions expected for this tile.
611 */
612static enum hrtimer_restart tile_net_handle_egress_timer(struct hrtimer *t)
613{
614 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
615 unsigned long irqflags;
616 bool pending = false;
617 int i;
618
619 local_irq_save(irqflags);
620
621 /* The timer is no longer scheduled. */
622 info->egress_timer_scheduled = false;
623
624 /* Free all possible comps for this tile. */
625 for (i = 0; i < TILE_NET_CHANNELS; i++) {
626 struct tile_net_egress *egress = &egress_for_echannel[i];
627 struct tile_net_comps *comps = info->comps_for_echannel[i];
628 if (comps->comp_last >= comps->comp_next)
629 continue;
630 tile_net_free_comps(egress->equeue, comps, -1, true);
631 pending = pending || (comps->comp_last < comps->comp_next);
632 }
633
634 /* Reschedule timer if needed. */
635 if (pending)
636 tile_net_schedule_egress_timer();
637
638 local_irq_restore(irqflags);
639
640 return HRTIMER_NORESTART;
641}
642
643/* Helper function for "tile_net_update()".
644 * "dev" (i.e. arg) is the device being brought up or down,
645 * or NULL if all devices are now down.
646 */
647static void tile_net_update_cpu(void *arg)
648{
649 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
650 struct net_device *dev = arg;
651
652 if (!info->has_iqueue)
653 return;
654
655 if (dev != NULL) {
656 if (!info->napi_added) {
657 netif_napi_add(dev, &info->napi,
658 tile_net_poll, TILE_NET_WEIGHT);
659 info->napi_added = true;
660 }
661 if (!info->napi_enabled) {
662 napi_enable(&info->napi);
663 info->napi_enabled = true;
664 }
665 enable_percpu_irq(ingress_irq, 0);
666 } else {
667 disable_percpu_irq(ingress_irq);
668 if (info->napi_enabled) {
669 napi_disable(&info->napi);
670 info->napi_enabled = false;
671 }
672 /* FIXME: Drain the iqueue. */
673 }
674}
675
676/* Helper function for tile_net_open() and tile_net_stop().
677 * Always called under tile_net_devs_for_channel_mutex.
678 */
679static int tile_net_update(struct net_device *dev)
680{
681 static gxio_mpipe_rules_t rules; /* too big to fit on the stack */
682 bool saw_channel = false;
683 int channel;
684 int rc;
685 int cpu;
686
687 gxio_mpipe_rules_init(&rules, &context);
688
689 for (channel = 0; channel < TILE_NET_CHANNELS; channel++) {
690 if (tile_net_devs_for_channel[channel] == NULL)
691 continue;
692 if (!saw_channel) {
693 saw_channel = true;
694 gxio_mpipe_rules_begin(&rules, first_bucket,
695 num_buckets, NULL);
696 gxio_mpipe_rules_set_headroom(&rules, NET_IP_ALIGN);
697 }
698 gxio_mpipe_rules_add_channel(&rules, channel);
699 }
700
701 /* NOTE: This can fail if there is no classifier.
702 * ISSUE: Can anything else cause it to fail?
703 */
704 rc = gxio_mpipe_rules_commit(&rules);
705 if (rc != 0) {
706 netdev_warn(dev, "gxio_mpipe_rules_commit failed: %d\n", rc);
707 return -EIO;
708 }
709
710 /* Update all cpus, sequentially (to protect "netif_napi_add()"). */
711 for_each_online_cpu(cpu)
712 smp_call_function_single(cpu, tile_net_update_cpu,
713 (saw_channel ? dev : NULL), 1);
714
715 /* HACK: Allow packets to flow in the simulator. */
716 if (saw_channel)
717 sim_enable_mpipe_links(0, -1);
718
719 return 0;
720}
721
722/* Allocate and initialize mpipe buffer stacks, and register them in
723 * the mPIPE TLBs, for both small and large packet sizes.
724 * This routine supports tile_net_init_mpipe(), below.
725 */
726static int init_buffer_stacks(struct net_device *dev, int num_buffers)
727{
728 pte_t hash_pte = pte_set_home((pte_t) { 0 }, PAGE_HOME_HASH);
729 int rc;
730
731 /* Compute stack bytes; we round up to 64KB and then use
732 * alloc_pages() so we get the required 64KB alignment as well.
733 */
734 buffer_stack_size =
735 ALIGN(gxio_mpipe_calc_buffer_stack_bytes(num_buffers),
736 64 * 1024);
737
738 /* Allocate two buffer stack indices. */
739 rc = gxio_mpipe_alloc_buffer_stacks(&context, 2, 0, 0);
740 if (rc < 0) {
741 netdev_err(dev, "gxio_mpipe_alloc_buffer_stacks failed: %d\n",
742 rc);
743 return rc;
744 }
745 small_buffer_stack = rc;
746 large_buffer_stack = rc + 1;
747
748 /* Allocate the small memory stack. */
749 small_buffer_stack_va =
750 alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
751 if (small_buffer_stack_va == NULL) {
752 netdev_err(dev,
753 "Could not alloc %zd bytes for buffer stacks\n",
754 buffer_stack_size);
755 return -ENOMEM;
756 }
757 rc = gxio_mpipe_init_buffer_stack(&context, small_buffer_stack,
758 BUFFER_SIZE_SMALL_ENUM,
759 small_buffer_stack_va,
760 buffer_stack_size, 0);
761 if (rc != 0) {
762 netdev_err(dev, "gxio_mpipe_init_buffer_stack: %d\n", rc);
763 return rc;
764 }
765 rc = gxio_mpipe_register_client_memory(&context, small_buffer_stack,
766 hash_pte, 0);
767 if (rc != 0) {
768 netdev_err(dev,
769 "gxio_mpipe_register_buffer_memory failed: %d\n",
770 rc);
771 return rc;
772 }
773
774 /* Allocate the large buffer stack. */
775 large_buffer_stack_va =
776 alloc_pages_exact(buffer_stack_size, GFP_KERNEL);
777 if (large_buffer_stack_va == NULL) {
778 netdev_err(dev,
779 "Could not alloc %zd bytes for buffer stacks\n",
780 buffer_stack_size);
781 return -ENOMEM;
782 }
783 rc = gxio_mpipe_init_buffer_stack(&context, large_buffer_stack,
784 BUFFER_SIZE_LARGE_ENUM,
785 large_buffer_stack_va,
786 buffer_stack_size, 0);
787 if (rc != 0) {
788 netdev_err(dev, "gxio_mpipe_init_buffer_stack failed: %d\n",
789 rc);
790 return rc;
791 }
792 rc = gxio_mpipe_register_client_memory(&context, large_buffer_stack,
793 hash_pte, 0);
794 if (rc != 0) {
795 netdev_err(dev,
796 "gxio_mpipe_register_buffer_memory failed: %d\n",
797 rc);
798 return rc;
799 }
800
801 return 0;
802}
803
804/* Allocate per-cpu resources (memory for completions and idescs).
805 * This routine supports tile_net_init_mpipe(), below.
806 */
807static int alloc_percpu_mpipe_resources(struct net_device *dev,
808 int cpu, int ring)
809{
810 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
811 int order, i, rc;
812 struct page *page;
813 void *addr;
814
815 /* Allocate the "comps". */
816 order = get_order(COMPS_SIZE);
817 page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
818 if (page == NULL) {
819 netdev_err(dev, "Failed to alloc %zd bytes comps memory\n",
820 COMPS_SIZE);
821 return -ENOMEM;
822 }
823 addr = pfn_to_kaddr(page_to_pfn(page));
824 memset(addr, 0, COMPS_SIZE);
825 for (i = 0; i < TILE_NET_CHANNELS; i++)
826 info->comps_for_echannel[i] =
827 addr + i * sizeof(struct tile_net_comps);
828
829 /* If this is a network cpu, create an iqueue. */
830 if (cpu_isset(cpu, network_cpus_map)) {
831 order = get_order(NOTIF_RING_SIZE);
832 page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
833 if (page == NULL) {
834 netdev_err(dev,
835 "Failed to alloc %zd bytes iqueue memory\n",
836 NOTIF_RING_SIZE);
837 return -ENOMEM;
838 }
839 addr = pfn_to_kaddr(page_to_pfn(page));
840 rc = gxio_mpipe_iqueue_init(&info->iqueue, &context, ring++,
841 addr, NOTIF_RING_SIZE, 0);
842 if (rc < 0) {
843 netdev_err(dev,
844 "gxio_mpipe_iqueue_init failed: %d\n", rc);
845 return rc;
846 }
847 info->has_iqueue = true;
848 }
849
850 return ring;
851}
852
853/* Initialize NotifGroup and buckets.
854 * This routine supports tile_net_init_mpipe(), below.
855 */
856static int init_notif_group_and_buckets(struct net_device *dev,
857 int ring, int network_cpus_count)
858{
859 int group, rc;
860
861 /* Allocate one NotifGroup. */
862 rc = gxio_mpipe_alloc_notif_groups(&context, 1, 0, 0);
863 if (rc < 0) {
864 netdev_err(dev, "gxio_mpipe_alloc_notif_groups failed: %d\n",
865 rc);
866 return rc;
867 }
868 group = rc;
869
870 /* Initialize global num_buckets value. */
871 if (network_cpus_count > 4)
872 num_buckets = 256;
873 else if (network_cpus_count > 1)
874 num_buckets = 16;
875
876 /* Allocate some buckets, and set global first_bucket value. */
877 rc = gxio_mpipe_alloc_buckets(&context, num_buckets, 0, 0);
878 if (rc < 0) {
879 netdev_err(dev, "gxio_mpipe_alloc_buckets failed: %d\n", rc);
880 return rc;
881 }
882 first_bucket = rc;
883
884 /* Init group and buckets. */
885 rc = gxio_mpipe_init_notif_group_and_buckets(
886 &context, group, ring, network_cpus_count,
887 first_bucket, num_buckets,
888 GXIO_MPIPE_BUCKET_STICKY_FLOW_LOCALITY);
889 if (rc != 0) {
890 netdev_err(
891 dev,
892 "gxio_mpipe_init_notif_group_and_buckets failed: %d\n",
893 rc);
894 return rc;
895 }
896
897 return 0;
898}
899
900/* Create an irq and register it, then activate the irq and request
901 * interrupts on all cores. Note that "ingress_irq" being initialized
902 * is how we know not to call tile_net_init_mpipe() again.
903 * This routine supports tile_net_init_mpipe(), below.
904 */
905static int tile_net_setup_interrupts(struct net_device *dev)
906{
907 int cpu, rc;
908
909 rc = create_irq();
910 if (rc < 0) {
911 netdev_err(dev, "create_irq failed: %d\n", rc);
912 return rc;
913 }
914 ingress_irq = rc;
915 tile_irq_activate(ingress_irq, TILE_IRQ_PERCPU);
916 rc = request_irq(ingress_irq, tile_net_handle_ingress_irq,
917 0, NULL, NULL);
918 if (rc != 0) {
919 netdev_err(dev, "request_irq failed: %d\n", rc);
920 destroy_irq(ingress_irq);
921 ingress_irq = -1;
922 return rc;
923 }
924
925 for_each_online_cpu(cpu) {
926 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
927 if (info->has_iqueue) {
928 gxio_mpipe_request_notif_ring_interrupt(
929 &context, cpu_x(cpu), cpu_y(cpu),
930 1, ingress_irq, info->iqueue.ring);
931 }
932 }
933
934 return 0;
935}
936
937/* Undo any state set up partially by a failed call to tile_net_init_mpipe. */
938static void tile_net_init_mpipe_fail(void)
939{
940 int cpu;
941
942 /* Do cleanups that require the mpipe context first. */
943 if (small_buffer_stack >= 0)
944 tile_net_pop_all_buffers(small_buffer_stack);
945 if (large_buffer_stack >= 0)
946 tile_net_pop_all_buffers(large_buffer_stack);
947
948 /* Destroy mpipe context so the hardware no longer owns any memory. */
949 gxio_mpipe_destroy(&context);
950
951 for_each_online_cpu(cpu) {
952 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
953 free_pages((unsigned long)(info->comps_for_echannel[0]),
954 get_order(COMPS_SIZE));
955 info->comps_for_echannel[0] = NULL;
956 free_pages((unsigned long)(info->iqueue.idescs),
957 get_order(NOTIF_RING_SIZE));
958 info->iqueue.idescs = NULL;
959 }
960
961 if (small_buffer_stack_va)
962 free_pages_exact(small_buffer_stack_va, buffer_stack_size);
963 if (large_buffer_stack_va)
964 free_pages_exact(large_buffer_stack_va, buffer_stack_size);
965
966 small_buffer_stack_va = NULL;
967 large_buffer_stack_va = NULL;
968 large_buffer_stack = -1;
969 small_buffer_stack = -1;
970 first_bucket = -1;
971}
972
973/* The first time any tilegx network device is opened, we initialize
974 * the global mpipe state. If this step fails, we fail to open the
975 * device, but if it succeeds, we never need to do it again, and since
976 * tile_net can't be unloaded, we never undo it.
977 *
978 * Note that some resources in this path (buffer stack indices,
979 * bindings from init_buffer_stack, etc.) are hypervisor resources
980 * that are freed implicitly by gxio_mpipe_destroy().
981 */
982static int tile_net_init_mpipe(struct net_device *dev)
983{
984 int i, num_buffers, rc;
985 int cpu;
986 int first_ring, ring;
987 int network_cpus_count = cpus_weight(network_cpus_map);
988
989 if (!hash_default) {
990 netdev_err(dev, "Networking requires hash_default!\n");
991 return -EIO;
992 }
993
994 rc = gxio_mpipe_init(&context, 0);
995 if (rc != 0) {
996 netdev_err(dev, "gxio_mpipe_init failed: %d\n", rc);
997 return -EIO;
998 }
999
1000 /* Set up the buffer stacks. */
1001 num_buffers =
1002 network_cpus_count * (IQUEUE_ENTRIES + TILE_NET_BATCH);
1003 rc = init_buffer_stacks(dev, num_buffers);
1004 if (rc != 0)
1005 goto fail;
1006
1007 /* Provide initial buffers. */
1008 rc = -ENOMEM;
1009 for (i = 0; i < num_buffers; i++) {
1010 if (!tile_net_provide_buffer(true)) {
1011 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1012 goto fail;
1013 }
1014 }
1015 for (i = 0; i < num_buffers; i++) {
1016 if (!tile_net_provide_buffer(false)) {
1017 netdev_err(dev, "Cannot allocate initial sk_bufs!\n");
1018 goto fail;
1019 }
1020 }
1021
1022 /* Allocate one NotifRing for each network cpu. */
1023 rc = gxio_mpipe_alloc_notif_rings(&context, network_cpus_count, 0, 0);
1024 if (rc < 0) {
1025 netdev_err(dev, "gxio_mpipe_alloc_notif_rings failed %d\n",
1026 rc);
1027 goto fail;
1028 }
1029
1030 /* Init NotifRings per-cpu. */
1031 first_ring = rc;
1032 ring = first_ring;
1033 for_each_online_cpu(cpu) {
1034 rc = alloc_percpu_mpipe_resources(dev, cpu, ring);
1035 if (rc < 0)
1036 goto fail;
1037 ring = rc;
1038 }
1039
1040 /* Initialize NotifGroup and buckets. */
1041 rc = init_notif_group_and_buckets(dev, first_ring, network_cpus_count);
1042 if (rc != 0)
1043 goto fail;
1044
1045 /* Create and enable interrupts. */
1046 rc = tile_net_setup_interrupts(dev);
1047 if (rc != 0)
1048 goto fail;
1049
1050 return 0;
1051
1052fail:
1053 tile_net_init_mpipe_fail();
1054 return rc;
1055}
1056
1057/* Create persistent egress info for a given egress channel.
1058 * Note that this may be shared between, say, "gbe0" and "xgbe0".
1059 * ISSUE: Defer header allocation until TSO is actually needed?
1060 */
1061static int tile_net_init_egress(struct net_device *dev, int echannel)
1062{
1063 struct page *headers_page, *edescs_page, *equeue_page;
1064 gxio_mpipe_edesc_t *edescs;
1065 gxio_mpipe_equeue_t *equeue;
1066 unsigned char *headers;
1067 int headers_order, edescs_order, equeue_order;
1068 size_t edescs_size;
1069 int edma;
1070 int rc = -ENOMEM;
1071
1072 /* Only initialize once. */
1073 if (egress_for_echannel[echannel].equeue != NULL)
1074 return 0;
1075
1076 /* Allocate memory for the "headers". */
1077 headers_order = get_order(EQUEUE_ENTRIES * HEADER_BYTES);
1078 headers_page = alloc_pages(GFP_KERNEL, headers_order);
1079 if (headers_page == NULL) {
1080 netdev_warn(dev,
1081 "Could not alloc %zd bytes for TSO headers.\n",
1082 PAGE_SIZE << headers_order);
1083 goto fail;
1084 }
1085 headers = pfn_to_kaddr(page_to_pfn(headers_page));
1086
1087 /* Allocate memory for the "edescs". */
1088 edescs_size = EQUEUE_ENTRIES * sizeof(*edescs);
1089 edescs_order = get_order(edescs_size);
1090 edescs_page = alloc_pages(GFP_KERNEL, edescs_order);
1091 if (edescs_page == NULL) {
1092 netdev_warn(dev,
1093 "Could not alloc %zd bytes for eDMA ring.\n",
1094 edescs_size);
1095 goto fail_headers;
1096 }
1097 edescs = pfn_to_kaddr(page_to_pfn(edescs_page));
1098
1099 /* Allocate memory for the "equeue". */
1100 equeue_order = get_order(sizeof(*equeue));
1101 equeue_page = alloc_pages(GFP_KERNEL, equeue_order);
1102 if (equeue_page == NULL) {
1103 netdev_warn(dev,
1104 "Could not alloc %zd bytes for equeue info.\n",
1105 PAGE_SIZE << equeue_order);
1106 goto fail_edescs;
1107 }
1108 equeue = pfn_to_kaddr(page_to_pfn(equeue_page));
1109
1110 /* Allocate an edma ring. Note that in practice this can't
1111 * fail, which is good, because we will leak an edma ring if so.
1112 */
1113 rc = gxio_mpipe_alloc_edma_rings(&context, 1, 0, 0);
1114 if (rc < 0) {
1115 netdev_warn(dev, "gxio_mpipe_alloc_edma_rings failed: %d\n",
1116 rc);
1117 goto fail_equeue;
1118 }
1119 edma = rc;
1120
1121 /* Initialize the equeue. */
1122 rc = gxio_mpipe_equeue_init(equeue, &context, edma, echannel,
1123 edescs, edescs_size, 0);
1124 if (rc != 0) {
1125 netdev_err(dev, "gxio_mpipe_equeue_init failed: %d\n", rc);
1126 goto fail_equeue;
1127 }
1128
1129 /* Done. */
1130 egress_for_echannel[echannel].equeue = equeue;
1131 egress_for_echannel[echannel].headers = headers;
1132 return 0;
1133
1134fail_equeue:
1135 __free_pages(equeue_page, equeue_order);
1136
1137fail_edescs:
1138 __free_pages(edescs_page, edescs_order);
1139
1140fail_headers:
1141 __free_pages(headers_page, headers_order);
1142
1143fail:
1144 return rc;
1145}
1146
1147/* Return channel number for a newly-opened link. */
1148static int tile_net_link_open(struct net_device *dev, gxio_mpipe_link_t *link,
1149 const char *link_name)
1150{
1151 int rc = gxio_mpipe_link_open(link, &context, link_name, 0);
1152 if (rc < 0) {
1153 netdev_err(dev, "Failed to open '%s'\n", link_name);
1154 return rc;
1155 }
1156 rc = gxio_mpipe_link_channel(link);
1157 if (rc < 0 || rc >= TILE_NET_CHANNELS) {
1158 netdev_err(dev, "gxio_mpipe_link_channel bad value: %d\n", rc);
1159 gxio_mpipe_link_close(link);
1160 return -EINVAL;
1161 }
1162 return rc;
1163}
1164
1165/* Help the kernel activate the given network interface. */
1166static int tile_net_open(struct net_device *dev)
1167{
1168 struct tile_net_priv *priv = netdev_priv(dev);
1169 int cpu, rc;
1170
1171 mutex_lock(&tile_net_devs_for_channel_mutex);
1172
1173 /* Do one-time initialization the first time any device is opened. */
1174 if (ingress_irq < 0) {
1175 rc = tile_net_init_mpipe(dev);
1176 if (rc != 0)
1177 goto fail;
1178 }
1179
1180 /* Determine if this is the "loopify" device. */
1181 if (unlikely((loopify_link_name != NULL) &&
1182 !strcmp(dev->name, loopify_link_name))) {
1183 rc = tile_net_link_open(dev, &priv->link, "loop0");
1184 if (rc < 0)
1185 goto fail;
1186 priv->channel = rc;
1187 rc = tile_net_link_open(dev, &priv->loopify_link, "loop1");
1188 if (rc < 0)
1189 goto fail;
1190 priv->loopify_channel = rc;
1191 priv->echannel = rc;
1192 } else {
1193 rc = tile_net_link_open(dev, &priv->link, dev->name);
1194 if (rc < 0)
1195 goto fail;
1196 priv->channel = rc;
1197 priv->echannel = rc;
1198 }
1199
1200 /* Initialize egress info (if needed). Once ever, per echannel. */
1201 rc = tile_net_init_egress(dev, priv->echannel);
1202 if (rc != 0)
1203 goto fail;
1204
1205 tile_net_devs_for_channel[priv->channel] = dev;
1206
1207 rc = tile_net_update(dev);
1208 if (rc != 0)
1209 goto fail;
1210
1211 mutex_unlock(&tile_net_devs_for_channel_mutex);
1212
1213 /* Initialize the transmit wake timer for this device for each cpu. */
1214 for_each_online_cpu(cpu) {
1215 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1216 struct tile_net_tx_wake *tx_wake =
1217 &info->tx_wake[priv->echannel];
1218
1219 hrtimer_init(&tx_wake->timer, CLOCK_MONOTONIC,
1220 HRTIMER_MODE_REL);
1221 tx_wake->timer.function = tile_net_handle_tx_wake_timer;
1222 tx_wake->dev = dev;
1223 }
1224
1225 for_each_online_cpu(cpu)
1226 netif_start_subqueue(dev, cpu);
1227 netif_carrier_on(dev);
1228 return 0;
1229
1230fail:
1231 if (priv->loopify_channel >= 0) {
1232 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
1233 netdev_warn(dev, "Failed to close loopify link!\n");
1234 priv->loopify_channel = -1;
1235 }
1236 if (priv->channel >= 0) {
1237 if (gxio_mpipe_link_close(&priv->link) != 0)
1238 netdev_warn(dev, "Failed to close link!\n");
1239 priv->channel = -1;
1240 }
1241 priv->echannel = -1;
1242 tile_net_devs_for_channel[priv->channel] = NULL;
1243 mutex_unlock(&tile_net_devs_for_channel_mutex);
1244
1245 /* Don't return raw gxio error codes to generic Linux. */
1246 return (rc > -512) ? rc : -EIO;
1247}
1248
1249/* Help the kernel deactivate the given network interface. */
1250static int tile_net_stop(struct net_device *dev)
1251{
1252 struct tile_net_priv *priv = netdev_priv(dev);
1253 int cpu;
1254
1255 for_each_online_cpu(cpu) {
1256 struct tile_net_info *info = &per_cpu(per_cpu_info, cpu);
1257 struct tile_net_tx_wake *tx_wake =
1258 &info->tx_wake[priv->echannel];
1259
1260 hrtimer_cancel(&tx_wake->timer);
1261 netif_stop_subqueue(dev, cpu);
1262 }
1263
1264 mutex_lock(&tile_net_devs_for_channel_mutex);
1265 tile_net_devs_for_channel[priv->channel] = NULL;
1266 (void)tile_net_update(dev);
1267 if (priv->loopify_channel >= 0) {
1268 if (gxio_mpipe_link_close(&priv->loopify_link) != 0)
1269 netdev_warn(dev, "Failed to close loopify link!\n");
1270 priv->loopify_channel = -1;
1271 }
1272 if (priv->channel >= 0) {
1273 if (gxio_mpipe_link_close(&priv->link) != 0)
1274 netdev_warn(dev, "Failed to close link!\n");
1275 priv->channel = -1;
1276 }
1277 priv->echannel = -1;
1278 mutex_unlock(&tile_net_devs_for_channel_mutex);
1279
1280 return 0;
1281}
1282
1283/* Determine the VA for a fragment. */
1284static inline void *tile_net_frag_buf(skb_frag_t *f)
1285{
1286 unsigned long pfn = page_to_pfn(skb_frag_page(f));
1287 return pfn_to_kaddr(pfn) + f->page_offset;
1288}
1289
1290/* Acquire a completion entry and an egress slot, or if we can't,
1291 * stop the queue and schedule the tx_wake timer.
1292 */
1293static s64 tile_net_equeue_try_reserve(struct net_device *dev,
1294 struct tile_net_comps *comps,
1295 gxio_mpipe_equeue_t *equeue,
1296 int num_edescs)
1297{
1298 /* Try to acquire a completion entry. */
1299 if (comps->comp_next - comps->comp_last < TILE_NET_MAX_COMPS - 1 ||
1300 tile_net_free_comps(equeue, comps, 32, false) != 0) {
1301
1302 /* Try to acquire an egress slot. */
1303 s64 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
1304 if (slot >= 0)
1305 return slot;
1306
1307 /* Freeing some completions gives the equeue time to drain. */
1308 tile_net_free_comps(equeue, comps, TILE_NET_MAX_COMPS, false);
1309
1310 slot = gxio_mpipe_equeue_try_reserve(equeue, num_edescs);
1311 if (slot >= 0)
1312 return slot;
1313 }
1314
1315 /* Still nothing; give up and stop the queue for a short while. */
1316 netif_stop_subqueue(dev, smp_processor_id());
1317 tile_net_schedule_tx_wake_timer(dev);
1318 return -1;
1319}
1320
1321/* Determine how many edesc's are needed for TSO.
1322 *
1323 * Sometimes, if "sendfile()" requires copying, we will be called with
1324 * "data" containing the header and payload, with "frags" being empty.
1325 * Sometimes, for example when using NFS over TCP, a single segment can
1326 * span 3 fragments. This requires special care.
1327 */
1328static int tso_count_edescs(struct sk_buff *skb)
1329{
1330 struct skb_shared_info *sh = skb_shinfo(skb);
1331 unsigned int data_len = skb->data_len;
1332 unsigned int p_len = sh->gso_size;
1333 long f_id = -1; /* id of the current fragment */
1334 long f_size = -1; /* size of the current fragment */
1335 long f_used = -1; /* bytes used from the current fragment */
1336 long n; /* size of the current piece of payload */
1337 int num_edescs = 0;
1338 int segment;
1339
1340 for (segment = 0; segment < sh->gso_segs; segment++) {
1341
1342 unsigned int p_used = 0;
1343
1344 /* One edesc for header and for each piece of the payload. */
1345 for (num_edescs++; p_used < p_len; num_edescs++) {
1346
1347 /* Advance as needed. */
1348 while (f_used >= f_size) {
1349 f_id++;
1350 f_size = sh->frags[f_id].size;
1351 f_used = 0;
1352 }
1353
1354 /* Use bytes from the current fragment. */
1355 n = p_len - p_used;
1356 if (n > f_size - f_used)
1357 n = f_size - f_used;
1358 f_used += n;
1359 p_used += n;
1360 }
1361
1362 /* The last segment may be less than gso_size. */
1363 data_len -= p_len;
1364 if (data_len < p_len)
1365 p_len = data_len;
1366 }
1367
1368 return num_edescs;
1369}
1370
1371/* Prepare modified copies of the skbuff headers.
1372 * FIXME: add support for IPv6.
1373 */
1374static void tso_headers_prepare(struct sk_buff *skb, unsigned char *headers,
1375 s64 slot)
1376{
1377 struct skb_shared_info *sh = skb_shinfo(skb);
1378 struct iphdr *ih;
1379 struct tcphdr *th;
1380 unsigned int data_len = skb->data_len;
1381 unsigned char *data = skb->data;
1382 unsigned int ih_off, th_off, sh_len, p_len;
1383 unsigned int isum_seed, tsum_seed, id, seq;
1384 long f_id = -1; /* id of the current fragment */
1385 long f_size = -1; /* size of the current fragment */
1386 long f_used = -1; /* bytes used from the current fragment */
1387 long n; /* size of the current piece of payload */
1388 int segment;
1389
1390 /* Locate original headers and compute various lengths. */
1391 ih = ip_hdr(skb);
1392 th = tcp_hdr(skb);
1393 ih_off = skb_network_offset(skb);
1394 th_off = skb_transport_offset(skb);
1395 sh_len = th_off + tcp_hdrlen(skb);
1396 p_len = sh->gso_size;
1397
1398 /* Set up seed values for IP and TCP csum and initialize id and seq. */
1399 isum_seed = ((0xFFFF - ih->check) +
1400 (0xFFFF - ih->tot_len) +
1401 (0xFFFF - ih->id));
1402 tsum_seed = th->check + (0xFFFF ^ htons(skb->len));
1403 id = ntohs(ih->id);
1404 seq = ntohl(th->seq);
1405
1406 /* Prepare all the headers. */
1407 for (segment = 0; segment < sh->gso_segs; segment++) {
1408 unsigned char *buf;
1409 unsigned int p_used = 0;
1410
1411 /* Copy to the header memory for this segment. */
1412 buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
1413 NET_IP_ALIGN;
1414 memcpy(buf, data, sh_len);
1415
1416 /* Update copied ip header. */
1417 ih = (struct iphdr *)(buf + ih_off);
1418 ih->tot_len = htons(sh_len + p_len - ih_off);
1419 ih->id = htons(id);
1420 ih->check = csum_long(isum_seed + ih->tot_len +
1421 ih->id) ^ 0xffff;
1422
1423 /* Update copied tcp header. */
1424 th = (struct tcphdr *)(buf + th_off);
1425 th->seq = htonl(seq);
1426 th->check = csum_long(tsum_seed + htons(sh_len + p_len));
1427 if (segment != sh->gso_segs - 1) {
1428 th->fin = 0;
1429 th->psh = 0;
1430 }
1431
1432 /* Skip past the header. */
1433 slot++;
1434
1435 /* Skip past the payload. */
1436 while (p_used < p_len) {
1437
1438 /* Advance as needed. */
1439 while (f_used >= f_size) {
1440 f_id++;
1441 f_size = sh->frags[f_id].size;
1442 f_used = 0;
1443 }
1444
1445 /* Use bytes from the current fragment. */
1446 n = p_len - p_used;
1447 if (n > f_size - f_used)
1448 n = f_size - f_used;
1449 f_used += n;
1450 p_used += n;
1451
1452 slot++;
1453 }
1454
1455 id++;
1456 seq += p_len;
1457
1458 /* The last segment may be less than gso_size. */
1459 data_len -= p_len;
1460 if (data_len < p_len)
1461 p_len = data_len;
1462 }
1463
1464 /* Flush the headers so they are ready for hardware DMA. */
1465 wmb();
1466}
1467
1468/* Pass all the data to mpipe for egress. */
1469static void tso_egress(struct net_device *dev, gxio_mpipe_equeue_t *equeue,
1470 struct sk_buff *skb, unsigned char *headers, s64 slot)
1471{
1472 struct tile_net_priv *priv = netdev_priv(dev);
1473 struct skb_shared_info *sh = skb_shinfo(skb);
1474 unsigned int data_len = skb->data_len;
1475 unsigned int p_len = sh->gso_size;
1476 gxio_mpipe_edesc_t edesc_head = { { 0 } };
1477 gxio_mpipe_edesc_t edesc_body = { { 0 } };
1478 long f_id = -1; /* id of the current fragment */
1479 long f_size = -1; /* size of the current fragment */
1480 long f_used = -1; /* bytes used from the current fragment */
1481 long n; /* size of the current piece of payload */
1482 unsigned long tx_packets = 0, tx_bytes = 0;
1483 unsigned int csum_start, sh_len;
1484 int segment;
1485
1486 /* Prepare to egress the headers: set up header edesc. */
1487 csum_start = skb_checksum_start_offset(skb);
1488 sh_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
1489 edesc_head.csum = 1;
1490 edesc_head.csum_start = csum_start;
1491 edesc_head.csum_dest = csum_start + skb->csum_offset;
1492 edesc_head.xfer_size = sh_len;
1493
1494 /* This is only used to specify the TLB. */
1495 edesc_head.stack_idx = large_buffer_stack;
1496 edesc_body.stack_idx = large_buffer_stack;
1497
1498 /* Egress all the edescs. */
1499 for (segment = 0; segment < sh->gso_segs; segment++) {
1500 void *va;
1501 unsigned char *buf;
1502 unsigned int p_used = 0;
1503
1504 /* Egress the header. */
1505 buf = headers + (slot % EQUEUE_ENTRIES) * HEADER_BYTES +
1506 NET_IP_ALIGN;
1507 edesc_head.va = va_to_tile_io_addr(buf);
1508 gxio_mpipe_equeue_put_at(equeue, edesc_head, slot);
1509 slot++;
1510
1511 /* Egress the payload. */
1512 while (p_used < p_len) {
1513
1514 /* Advance as needed. */
1515 while (f_used >= f_size) {
1516 f_id++;
1517 f_size = sh->frags[f_id].size;
1518 f_used = 0;
1519 }
1520
1521 va = tile_net_frag_buf(&sh->frags[f_id]) + f_used;
1522
1523 /* Use bytes from the current fragment. */
1524 n = p_len - p_used;
1525 if (n > f_size - f_used)
1526 n = f_size - f_used;
1527 f_used += n;
1528 p_used += n;
1529
1530 /* Egress a piece of the payload. */
1531 edesc_body.va = va_to_tile_io_addr(va);
1532 edesc_body.xfer_size = n;
1533 edesc_body.bound = !(p_used < p_len);
1534 gxio_mpipe_equeue_put_at(equeue, edesc_body, slot);
1535 slot++;
1536 }
1537
1538 tx_packets++;
1539 tx_bytes += sh_len + p_len;
1540
1541 /* The last segment may be less than gso_size. */
1542 data_len -= p_len;
1543 if (data_len < p_len)
1544 p_len = data_len;
1545 }
1546
1547 /* Update stats. */
1548 tile_net_stats_add(tx_packets, &priv->stats.tx_packets);
1549 tile_net_stats_add(tx_bytes, &priv->stats.tx_bytes);
1550}
1551
1552/* Do "TSO" handling for egress.
1553 *
1554 * Normally drivers set NETIF_F_TSO only to support hardware TSO;
1555 * otherwise the stack uses scatter-gather to implement GSO in software.
1556 * On our testing, enabling GSO support (via NETIF_F_SG) drops network
1557 * performance down to around 7.5 Gbps on the 10G interfaces, although
1558 * also dropping cpu utilization way down, to under 8%. But
1559 * implementing "TSO" in the driver brings performance back up to line
1560 * rate, while dropping cpu usage even further, to less than 4%. In
1561 * practice, profiling of GSO shows that skb_segment() is what causes
1562 * the performance overheads; we benefit in the driver from using
1563 * preallocated memory to duplicate the TCP/IP headers.
1564 */
1565static int tile_net_tx_tso(struct sk_buff *skb, struct net_device *dev)
1566{
1567 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1568 struct tile_net_priv *priv = netdev_priv(dev);
1569 int channel = priv->echannel;
1570 struct tile_net_egress *egress = &egress_for_echannel[channel];
1571 struct tile_net_comps *comps = info->comps_for_echannel[channel];
1572 gxio_mpipe_equeue_t *equeue = egress->equeue;
1573 unsigned long irqflags;
1574 int num_edescs;
1575 s64 slot;
1576
1577 /* Determine how many mpipe edesc's are needed. */
1578 num_edescs = tso_count_edescs(skb);
1579
1580 local_irq_save(irqflags);
1581
1582 /* Try to acquire a completion entry and an egress slot. */
1583 slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
1584 if (slot < 0) {
1585 local_irq_restore(irqflags);
1586 return NETDEV_TX_BUSY;
1587 }
1588
1589 /* Set up copies of header data properly. */
1590 tso_headers_prepare(skb, egress->headers, slot);
1591
1592 /* Actually pass the data to the network hardware. */
1593 tso_egress(dev, equeue, skb, egress->headers, slot);
1594
1595 /* Add a completion record. */
1596 add_comp(equeue, comps, slot + num_edescs - 1, skb);
1597
1598 local_irq_restore(irqflags);
1599
1600 /* Make sure the egress timer is scheduled. */
1601 tile_net_schedule_egress_timer();
1602
1603 return NETDEV_TX_OK;
1604}
1605
1606/* Analyze the body and frags for a transmit request. */
1607static unsigned int tile_net_tx_frags(struct frag *frags,
1608 struct sk_buff *skb,
1609 void *b_data, unsigned int b_len)
1610{
1611 unsigned int i, n = 0;
1612
1613 struct skb_shared_info *sh = skb_shinfo(skb);
1614
1615 if (b_len != 0) {
1616 frags[n].buf = b_data;
1617 frags[n++].length = b_len;
1618 }
1619
1620 for (i = 0; i < sh->nr_frags; i++) {
1621 skb_frag_t *f = &sh->frags[i];
1622 frags[n].buf = tile_net_frag_buf(f);
1623 frags[n++].length = skb_frag_size(f);
1624 }
1625
1626 return n;
1627}
1628
1629/* Help the kernel transmit a packet. */
1630static int tile_net_tx(struct sk_buff *skb, struct net_device *dev)
1631{
1632 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1633 struct tile_net_priv *priv = netdev_priv(dev);
1634 struct tile_net_egress *egress = &egress_for_echannel[priv->echannel];
1635 gxio_mpipe_equeue_t *equeue = egress->equeue;
1636 struct tile_net_comps *comps =
1637 info->comps_for_echannel[priv->echannel];
1638 unsigned int len = skb->len;
1639 unsigned char *data = skb->data;
1640 unsigned int num_edescs;
1641 struct frag frags[MAX_FRAGS];
1642 gxio_mpipe_edesc_t edescs[MAX_FRAGS];
1643 unsigned long irqflags;
1644 gxio_mpipe_edesc_t edesc = { { 0 } };
1645 unsigned int i;
1646 s64 slot;
1647
1648 if (skb_is_gso(skb))
1649 return tile_net_tx_tso(skb, dev);
1650
1651 num_edescs = tile_net_tx_frags(frags, skb, data, skb_headlen(skb));
1652
1653 /* This is only used to specify the TLB. */
1654 edesc.stack_idx = large_buffer_stack;
1655
1656 /* Prepare the edescs. */
1657 for (i = 0; i < num_edescs; i++) {
1658 edesc.xfer_size = frags[i].length;
1659 edesc.va = va_to_tile_io_addr(frags[i].buf);
1660 edescs[i] = edesc;
1661 }
1662
1663 /* Mark the final edesc. */
1664 edescs[num_edescs - 1].bound = 1;
1665
1666 /* Add checksum info to the initial edesc, if needed. */
1667 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1668 unsigned int csum_start = skb_checksum_start_offset(skb);
1669 edescs[0].csum = 1;
1670 edescs[0].csum_start = csum_start;
1671 edescs[0].csum_dest = csum_start + skb->csum_offset;
1672 }
1673
1674 local_irq_save(irqflags);
1675
1676 /* Try to acquire a completion entry and an egress slot. */
1677 slot = tile_net_equeue_try_reserve(dev, comps, equeue, num_edescs);
1678 if (slot < 0) {
1679 local_irq_restore(irqflags);
1680 return NETDEV_TX_BUSY;
1681 }
1682
1683 for (i = 0; i < num_edescs; i++)
1684 gxio_mpipe_equeue_put_at(equeue, edescs[i], slot++);
1685
1686 /* Add a completion record. */
1687 add_comp(equeue, comps, slot - 1, skb);
1688
1689 /* NOTE: Use ETH_ZLEN for short packets (e.g. 42 < 60). */
1690 tile_net_stats_add(1, &priv->stats.tx_packets);
1691 tile_net_stats_add(max_t(unsigned int, len, ETH_ZLEN),
1692 &priv->stats.tx_bytes);
1693
1694 local_irq_restore(irqflags);
1695
1696 /* Make sure the egress timer is scheduled. */
1697 tile_net_schedule_egress_timer();
1698
1699 return NETDEV_TX_OK;
1700}
1701
1702/* Return subqueue id on this core (one per core). */
1703static u16 tile_net_select_queue(struct net_device *dev, struct sk_buff *skb)
1704{
1705 return smp_processor_id();
1706}
1707
1708/* Deal with a transmit timeout. */
1709static void tile_net_tx_timeout(struct net_device *dev)
1710{
1711 int cpu;
1712
1713 for_each_online_cpu(cpu)
1714 netif_wake_subqueue(dev, cpu);
1715}
1716
1717/* Ioctl commands. */
1718static int tile_net_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1719{
1720 return -EOPNOTSUPP;
1721}
1722
1723/* Get system network statistics for device. */
1724static struct net_device_stats *tile_net_get_stats(struct net_device *dev)
1725{
1726 struct tile_net_priv *priv = netdev_priv(dev);
1727 return &priv->stats;
1728}
1729
1730/* Change the MTU. */
1731static int tile_net_change_mtu(struct net_device *dev, int new_mtu)
1732{
1733 if ((new_mtu < 68) || (new_mtu > 1500))
1734 return -EINVAL;
1735 dev->mtu = new_mtu;
1736 return 0;
1737}
1738
1739/* Change the Ethernet address of the NIC.
1740 *
1741 * The hypervisor driver does not support changing MAC address. However,
1742 * the hardware does not do anything with the MAC address, so the address
1743 * which gets used on outgoing packets, and which is accepted on incoming
1744 * packets, is completely up to us.
1745 *
1746 * Returns 0 on success, negative on failure.
1747 */
1748static int tile_net_set_mac_address(struct net_device *dev, void *p)
1749{
1750 struct sockaddr *addr = p;
1751
1752 if (!is_valid_ether_addr(addr->sa_data))
1753 return -EINVAL;
1754 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1755 return 0;
1756}
1757
1758#ifdef CONFIG_NET_POLL_CONTROLLER
1759/* Polling 'interrupt' - used by things like netconsole to send skbs
1760 * without having to re-enable interrupts. It's not called while
1761 * the interrupt routine is executing.
1762 */
1763static void tile_net_netpoll(struct net_device *dev)
1764{
1765 disable_percpu_irq(ingress_irq);
1766 tile_net_handle_ingress_irq(ingress_irq, NULL);
1767 enable_percpu_irq(ingress_irq, 0);
1768}
1769#endif
1770
1771static const struct net_device_ops tile_net_ops = {
1772 .ndo_open = tile_net_open,
1773 .ndo_stop = tile_net_stop,
1774 .ndo_start_xmit = tile_net_tx,
1775 .ndo_select_queue = tile_net_select_queue,
1776 .ndo_do_ioctl = tile_net_ioctl,
1777 .ndo_get_stats = tile_net_get_stats,
1778 .ndo_change_mtu = tile_net_change_mtu,
1779 .ndo_tx_timeout = tile_net_tx_timeout,
1780 .ndo_set_mac_address = tile_net_set_mac_address,
1781#ifdef CONFIG_NET_POLL_CONTROLLER
1782 .ndo_poll_controller = tile_net_netpoll,
1783#endif
1784};
1785
1786/* The setup function.
1787 *
1788 * This uses ether_setup() to assign various fields in dev, including
1789 * setting IFF_BROADCAST and IFF_MULTICAST, then sets some extra fields.
1790 */
1791static void tile_net_setup(struct net_device *dev)
1792{
1793 ether_setup(dev);
1794 dev->netdev_ops = &tile_net_ops;
1795 dev->watchdog_timeo = TILE_NET_TIMEOUT;
1796 dev->features |= NETIF_F_LLTX;
1797 dev->features |= NETIF_F_HW_CSUM;
1798 dev->features |= NETIF_F_SG;
1799 dev->features |= NETIF_F_TSO;
1800 dev->mtu = 1500;
1801}
1802
1803/* Allocate the device structure, register the device, and obtain the
1804 * MAC address from the hypervisor.
1805 */
1806static void tile_net_dev_init(const char *name, const uint8_t *mac)
1807{
1808 int ret;
1809 int i;
1810 int nz_addr = 0;
1811 struct net_device *dev;
1812 struct tile_net_priv *priv;
1813
1814 /* HACK: Ignore "loop" links. */
1815 if (strncmp(name, "loop", 4) == 0)
1816 return;
1817
1818 /* Allocate the device structure. Normally, "name" is a
1819 * template, instantiated by register_netdev(), but not for us.
1820 */
1821 dev = alloc_netdev_mqs(sizeof(*priv), name, tile_net_setup,
1822 NR_CPUS, 1);
1823 if (!dev) {
1824 pr_err("alloc_netdev_mqs(%s) failed\n", name);
1825 return;
1826 }
1827
1828 /* Initialize "priv". */
1829 priv = netdev_priv(dev);
1830 memset(priv, 0, sizeof(*priv));
1831 priv->dev = dev;
1832 priv->channel = -1;
1833 priv->loopify_channel = -1;
1834 priv->echannel = -1;
1835
1836 /* Get the MAC address and set it in the device struct; this must
1837 * be done before the device is opened. If the MAC is all zeroes,
1838 * we use a random address, since we're probably on the simulator.
1839 */
1840 for (i = 0; i < 6; i++)
1841 nz_addr |= mac[i];
1842
1843 if (nz_addr) {
1844 memcpy(dev->dev_addr, mac, 6);
1845 dev->addr_len = 6;
1846 } else {
1847 random_ether_addr(dev->dev_addr);
1848 }
1849
1850 /* Register the network device. */
1851 ret = register_netdev(dev);
1852 if (ret) {
1853 netdev_err(dev, "register_netdev failed %d\n", ret);
1854 free_netdev(dev);
1855 return;
1856 }
1857}
1858
1859/* Per-cpu module initialization. */
1860static void tile_net_init_module_percpu(void *unused)
1861{
1862 struct tile_net_info *info = &__get_cpu_var(per_cpu_info);
1863 int my_cpu = smp_processor_id();
1864
1865 info->has_iqueue = false;
1866
1867 info->my_cpu = my_cpu;
1868
1869 /* Initialize the egress timer. */
1870 hrtimer_init(&info->egress_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
1871 info->egress_timer.function = tile_net_handle_egress_timer;
1872}
1873
1874/* Module initialization. */
1875static int __init tile_net_init_module(void)
1876{
1877 int i;
1878 char name[GXIO_MPIPE_LINK_NAME_LEN];
1879 uint8_t mac[6];
1880
1881 pr_info("Tilera Network Driver\n");
1882
1883 mutex_init(&tile_net_devs_for_channel_mutex);
1884
1885 /* Initialize each CPU. */
1886 on_each_cpu(tile_net_init_module_percpu, NULL, 1);
1887
1888 /* Find out what devices we have, and initialize them. */
1889 for (i = 0; gxio_mpipe_link_enumerate_mac(i, name, mac) >= 0; i++)
1890 tile_net_dev_init(name, mac);
1891
1892 if (!network_cpus_init())
1893 network_cpus_map = *cpu_online_mask;
1894
1895 return 0;
1896}
1897
1898module_init(tile_net_init_module);
diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
index 4ffcd57b011b..2857ab078aac 100644
--- a/drivers/net/hyperv/hyperv_net.h
+++ b/drivers/net/hyperv/hyperv_net.h
@@ -478,6 +478,7 @@ struct netvsc_device {
478 u32 nvsp_version; 478 u32 nvsp_version;
479 479
480 atomic_t num_outstanding_sends; 480 atomic_t num_outstanding_sends;
481 wait_queue_head_t wait_drain;
481 bool start_remove; 482 bool start_remove;
482 bool destroy; 483 bool destroy;
483 /* 484 /*
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 8b919471472f..0c569831db5a 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -42,6 +42,7 @@ static struct netvsc_device *alloc_net_device(struct hv_device *device)
42 if (!net_device) 42 if (!net_device)
43 return NULL; 43 return NULL;
44 44
45 init_waitqueue_head(&net_device->wait_drain);
45 net_device->start_remove = false; 46 net_device->start_remove = false;
46 net_device->destroy = false; 47 net_device->destroy = false;
47 net_device->dev = device; 48 net_device->dev = device;
@@ -387,12 +388,8 @@ int netvsc_device_remove(struct hv_device *device)
387 spin_unlock_irqrestore(&device->channel->inbound_lock, flags); 388 spin_unlock_irqrestore(&device->channel->inbound_lock, flags);
388 389
389 /* Wait for all send completions */ 390 /* Wait for all send completions */
390 while (atomic_read(&net_device->num_outstanding_sends)) { 391 wait_event(net_device->wait_drain,
391 dev_info(&device->device, 392 atomic_read(&net_device->num_outstanding_sends) == 0);
392 "waiting for %d requests to complete...\n",
393 atomic_read(&net_device->num_outstanding_sends));
394 udelay(100);
395 }
396 393
397 netvsc_disconnect_vsp(net_device); 394 netvsc_disconnect_vsp(net_device);
398 395
@@ -486,6 +483,9 @@ static void netvsc_send_completion(struct hv_device *device,
486 num_outstanding_sends = 483 num_outstanding_sends =
487 atomic_dec_return(&net_device->num_outstanding_sends); 484 atomic_dec_return(&net_device->num_outstanding_sends);
488 485
486 if (net_device->destroy && num_outstanding_sends == 0)
487 wake_up(&net_device->wait_drain);
488
489 if (netif_queue_stopped(ndev) && !net_device->start_remove && 489 if (netif_queue_stopped(ndev) && !net_device->start_remove &&
490 (hv_ringbuf_avail_percent(&device->channel->outbound) 490 (hv_ringbuf_avail_percent(&device->channel->outbound)
491 > RING_AVAIL_PERCENT_HIWATER || 491 > RING_AVAIL_PERCENT_HIWATER ||
diff --git a/drivers/net/phy/icplus.c b/drivers/net/phy/icplus.c
index 5ac46f5226f3..47f8e8939266 100644
--- a/drivers/net/phy/icplus.c
+++ b/drivers/net/phy/icplus.c
@@ -41,6 +41,8 @@ MODULE_LICENSE("GPL");
41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */ 41#define IP1001_APS_ON 11 /* IP1001 APS Mode bit */
42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */ 42#define IP101A_G_APS_ON 2 /* IP101A/G APS Mode bit */
43#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */ 43#define IP101A_G_IRQ_CONF_STATUS 0x11 /* Conf Info IRQ & Status Reg */
44#define IP101A_G_IRQ_PIN_USED (1<<15) /* INTR pin used */
45#define IP101A_G_IRQ_DEFAULT IP101A_G_IRQ_PIN_USED
44 46
45static int ip175c_config_init(struct phy_device *phydev) 47static int ip175c_config_init(struct phy_device *phydev)
46{ 48{
@@ -136,6 +138,11 @@ static int ip1001_config_init(struct phy_device *phydev)
136 if (c < 0) 138 if (c < 0)
137 return c; 139 return c;
138 140
141 /* INTR pin used: speed/link/duplex will cause an interrupt */
142 c = phy_write(phydev, IP101A_G_IRQ_CONF_STATUS, IP101A_G_IRQ_DEFAULT);
143 if (c < 0)
144 return c;
145
139 if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { 146 if (phydev->interface == PHY_INTERFACE_MODE_RGMII) {
140 /* Additional delay (2ns) used to adjust RX clock phase 147 /* Additional delay (2ns) used to adjust RX clock phase
141 * at RGMII interface */ 148 * at RGMII interface */
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 683ef1ce5519..5061608f408c 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -96,7 +96,7 @@ static int of_mdio_bus_match(struct device *dev, void *mdio_bus_np)
96} 96}
97/** 97/**
98 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus. 98 * of_mdio_find_bus - Given an mii_bus node, find the mii_bus.
99 * @mdio_np: Pointer to the mii_bus. 99 * @mdio_bus_np: Pointer to the mii_bus.
100 * 100 *
101 * Returns a pointer to the mii_bus, or NULL if none found. 101 * Returns a pointer to the mii_bus, or NULL if none found.
102 * 102 *
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 71e2b0523bc2..3ae80eccd0ef 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -35,6 +35,7 @@
35#include <linux/crc32.h> 35#include <linux/crc32.h>
36#include <linux/usb/usbnet.h> 36#include <linux/usb/usbnet.h>
37#include <linux/slab.h> 37#include <linux/slab.h>
38#include <linux/if_vlan.h>
38 39
39#define DRIVER_VERSION "22-Dec-2011" 40#define DRIVER_VERSION "22-Dec-2011"
40#define DRIVER_NAME "asix" 41#define DRIVER_NAME "asix"
@@ -321,7 +322,7 @@ static int asix_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
321 return 0; 322 return 0;
322 } 323 }
323 324
324 if ((size > dev->net->mtu + ETH_HLEN) || 325 if ((size > dev->net->mtu + ETH_HLEN + VLAN_HLEN) ||
325 (size + offset > skb->len)) { 326 (size + offset > skb->len)) {
326 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n", 327 netdev_err(dev->net, "asix_rx_fixup() Bad RX Length %d\n",
327 size); 328 size);
diff --git a/drivers/net/usb/mcs7830.c b/drivers/net/usb/mcs7830.c
index add1064f755d..03c2d8d653df 100644
--- a/drivers/net/usb/mcs7830.c
+++ b/drivers/net/usb/mcs7830.c
@@ -629,11 +629,31 @@ static int mcs7830_rx_fixup(struct usbnet *dev, struct sk_buff *skb)
629 return skb->len > 0; 629 return skb->len > 0;
630} 630}
631 631
632static void mcs7830_status(struct usbnet *dev, struct urb *urb)
633{
634 u8 *buf = urb->transfer_buffer;
635 bool link;
636
637 if (urb->actual_length < 16)
638 return;
639
640 link = !(buf[1] & 0x20);
641 if (netif_carrier_ok(dev->net) != link) {
642 if (link) {
643 netif_carrier_on(dev->net);
644 usbnet_defer_kevent(dev, EVENT_LINK_RESET);
645 } else
646 netif_carrier_off(dev->net);
647 netdev_dbg(dev->net, "Link Status is: %d\n", link);
648 }
649}
650
632static const struct driver_info moschip_info = { 651static const struct driver_info moschip_info = {
633 .description = "MOSCHIP 7830/7832/7730 usb-NET adapter", 652 .description = "MOSCHIP 7830/7832/7730 usb-NET adapter",
634 .bind = mcs7830_bind, 653 .bind = mcs7830_bind,
635 .rx_fixup = mcs7830_rx_fixup, 654 .rx_fixup = mcs7830_rx_fixup,
636 .flags = FLAG_ETHER, 655 .flags = FLAG_ETHER | FLAG_LINK_INTR,
656 .status = mcs7830_status,
637 .in = 1, 657 .in = 1,
638 .out = 2, 658 .out = 2,
639}; 659};
@@ -642,7 +662,8 @@ static const struct driver_info sitecom_info = {
642 .description = "Sitecom LN-30 usb-NET adapter", 662 .description = "Sitecom LN-30 usb-NET adapter",
643 .bind = mcs7830_bind, 663 .bind = mcs7830_bind,
644 .rx_fixup = mcs7830_rx_fixup, 664 .rx_fixup = mcs7830_rx_fixup,
645 .flags = FLAG_ETHER, 665 .flags = FLAG_ETHER | FLAG_LINK_INTR,
666 .status = mcs7830_status,
646 .in = 1, 667 .in = 1,
647 .out = 2, 668 .out = 2,
648}; 669};
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 380dbea6109d..3b206786b5e7 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -547,6 +547,8 @@ static const struct usb_device_id products[] = {
547 {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */ 547 {QMI_GOBI_DEVICE(0x16d8, 0x8002)}, /* CMDTech Gobi 2000 Modem device (VU922) */
548 {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */ 548 {QMI_GOBI_DEVICE(0x05c6, 0x9205)}, /* Gobi 2000 Modem device */
549 {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ 549 {QMI_GOBI_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
550 {QMI_GOBI_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
551 {QMI_GOBI_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
550 { } /* END */ 552 { } /* END */
551}; 553};
552MODULE_DEVICE_TABLE(usb, products); 554MODULE_DEVICE_TABLE(usb, products);
diff --git a/drivers/net/usb/sierra_net.c b/drivers/net/usb/sierra_net.c
index 3faef5670d1f..d75d1f56becf 100644
--- a/drivers/net/usb/sierra_net.c
+++ b/drivers/net/usb/sierra_net.c
@@ -946,7 +946,7 @@ struct sk_buff *sierra_net_tx_fixup(struct usbnet *dev, struct sk_buff *skb,
946} 946}
947 947
948static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 }; 948static const u8 sierra_net_ifnum_list[] = { 7, 10, 11 };
949static const struct sierra_net_info_data sierra_net_info_data_68A3 = { 949static const struct sierra_net_info_data sierra_net_info_data_direct_ip = {
950 .rx_urb_size = 8 * 1024, 950 .rx_urb_size = 8 * 1024,
951 .whitelist = { 951 .whitelist = {
952 .infolen = ARRAY_SIZE(sierra_net_ifnum_list), 952 .infolen = ARRAY_SIZE(sierra_net_ifnum_list),
@@ -954,7 +954,7 @@ static const struct sierra_net_info_data sierra_net_info_data_68A3 = {
954 } 954 }
955}; 955};
956 956
957static const struct driver_info sierra_net_info_68A3 = { 957static const struct driver_info sierra_net_info_direct_ip = {
958 .description = "Sierra Wireless USB-to-WWAN Modem", 958 .description = "Sierra Wireless USB-to-WWAN Modem",
959 .flags = FLAG_WWAN | FLAG_SEND_ZLP, 959 .flags = FLAG_WWAN | FLAG_SEND_ZLP,
960 .bind = sierra_net_bind, 960 .bind = sierra_net_bind,
@@ -962,12 +962,18 @@ static const struct driver_info sierra_net_info_68A3 = {
962 .status = sierra_net_status, 962 .status = sierra_net_status,
963 .rx_fixup = sierra_net_rx_fixup, 963 .rx_fixup = sierra_net_rx_fixup,
964 .tx_fixup = sierra_net_tx_fixup, 964 .tx_fixup = sierra_net_tx_fixup,
965 .data = (unsigned long)&sierra_net_info_data_68A3, 965 .data = (unsigned long)&sierra_net_info_data_direct_ip,
966}; 966};
967 967
968static const struct usb_device_id products[] = { 968static const struct usb_device_id products[] = {
969 {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */ 969 {USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless USB-to-WWAN modem */
970 .driver_info = (unsigned long) &sierra_net_info_68A3}, 970 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
971 {USB_DEVICE(0x0F3D, 0x68A3), /* AT&T Direct IP modem */
972 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
973 {USB_DEVICE(0x1199, 0x68AA), /* Sierra Wireless Direct IP LTE modem */
974 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
975 {USB_DEVICE(0x0F3D, 0x68AA), /* AT&T Direct IP LTE modem */
976 .driver_info = (unsigned long) &sierra_net_info_direct_ip},
971 977
972 {}, /* last item */ 978 {}, /* last item */
973}; 979};
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index 9ce6995e8d08..f18149ae2588 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -42,7 +42,8 @@ module_param(gso, bool, 0444);
42#define VIRTNET_DRIVER_VERSION "1.0.0" 42#define VIRTNET_DRIVER_VERSION "1.0.0"
43 43
44struct virtnet_stats { 44struct virtnet_stats {
45 struct u64_stats_sync syncp; 45 struct u64_stats_sync tx_syncp;
46 struct u64_stats_sync rx_syncp;
46 u64 tx_bytes; 47 u64 tx_bytes;
47 u64 tx_packets; 48 u64 tx_packets;
48 49
@@ -300,10 +301,10 @@ static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
300 301
301 hdr = skb_vnet_hdr(skb); 302 hdr = skb_vnet_hdr(skb);
302 303
303 u64_stats_update_begin(&stats->syncp); 304 u64_stats_update_begin(&stats->rx_syncp);
304 stats->rx_bytes += skb->len; 305 stats->rx_bytes += skb->len;
305 stats->rx_packets++; 306 stats->rx_packets++;
306 u64_stats_update_end(&stats->syncp); 307 u64_stats_update_end(&stats->rx_syncp);
307 308
308 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) { 309 if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
309 pr_debug("Needs csum!\n"); 310 pr_debug("Needs csum!\n");
@@ -565,10 +566,10 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
565 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) { 566 while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
566 pr_debug("Sent skb %p\n", skb); 567 pr_debug("Sent skb %p\n", skb);
567 568
568 u64_stats_update_begin(&stats->syncp); 569 u64_stats_update_begin(&stats->tx_syncp);
569 stats->tx_bytes += skb->len; 570 stats->tx_bytes += skb->len;
570 stats->tx_packets++; 571 stats->tx_packets++;
571 u64_stats_update_end(&stats->syncp); 572 u64_stats_update_end(&stats->tx_syncp);
572 573
573 tot_sgs += skb_vnet_hdr(skb)->num_sg; 574 tot_sgs += skb_vnet_hdr(skb)->num_sg;
574 dev_kfree_skb_any(skb); 575 dev_kfree_skb_any(skb);
@@ -703,12 +704,16 @@ static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
703 u64 tpackets, tbytes, rpackets, rbytes; 704 u64 tpackets, tbytes, rpackets, rbytes;
704 705
705 do { 706 do {
706 start = u64_stats_fetch_begin(&stats->syncp); 707 start = u64_stats_fetch_begin(&stats->tx_syncp);
707 tpackets = stats->tx_packets; 708 tpackets = stats->tx_packets;
708 tbytes = stats->tx_bytes; 709 tbytes = stats->tx_bytes;
710 } while (u64_stats_fetch_retry(&stats->tx_syncp, start));
711
712 do {
713 start = u64_stats_fetch_begin(&stats->rx_syncp);
709 rpackets = stats->rx_packets; 714 rpackets = stats->rx_packets;
710 rbytes = stats->rx_bytes; 715 rbytes = stats->rx_bytes;
711 } while (u64_stats_fetch_retry(&stats->syncp, start)); 716 } while (u64_stats_fetch_retry(&stats->rx_syncp, start));
712 717
713 tot->rx_packets += rpackets; 718 tot->rx_packets += rpackets;
714 tot->tx_packets += tpackets; 719 tot->tx_packets += tpackets;
@@ -1231,11 +1236,6 @@ static int virtnet_freeze(struct virtio_device *vdev)
1231 vi->config_enable = false; 1236 vi->config_enable = false;
1232 mutex_unlock(&vi->config_lock); 1237 mutex_unlock(&vi->config_lock);
1233 1238
1234 virtqueue_disable_cb(vi->rvq);
1235 virtqueue_disable_cb(vi->svq);
1236 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
1237 virtqueue_disable_cb(vi->cvq);
1238
1239 netif_device_detach(vi->dev); 1239 netif_device_detach(vi->dev);
1240 cancel_delayed_work_sync(&vi->refill); 1240 cancel_delayed_work_sync(&vi->refill);
1241 1241
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 0ba81a66061f..fbaa30930076 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -2415,6 +2415,22 @@ ath5k_tx_complete_poll_work(struct work_struct *work)
2415* Initialization routines * 2415* Initialization routines *
2416\*************************/ 2416\*************************/
2417 2417
2418static const struct ieee80211_iface_limit if_limits[] = {
2419 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
2420 { .max = 4, .types =
2421#ifdef CONFIG_MAC80211_MESH
2422 BIT(NL80211_IFTYPE_MESH_POINT) |
2423#endif
2424 BIT(NL80211_IFTYPE_AP) },
2425};
2426
2427static const struct ieee80211_iface_combination if_comb = {
2428 .limits = if_limits,
2429 .n_limits = ARRAY_SIZE(if_limits),
2430 .max_interfaces = 2048,
2431 .num_different_channels = 1,
2432};
2433
2418int __devinit 2434int __devinit
2419ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops) 2435ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2420{ 2436{
@@ -2436,6 +2452,9 @@ ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
2436 BIT(NL80211_IFTYPE_ADHOC) | 2452 BIT(NL80211_IFTYPE_ADHOC) |
2437 BIT(NL80211_IFTYPE_MESH_POINT); 2453 BIT(NL80211_IFTYPE_MESH_POINT);
2438 2454
2455 hw->wiphy->iface_combinations = &if_comb;
2456 hw->wiphy->n_iface_combinations = 1;
2457
2439 /* SW support for IBSS_RSN is provided by mac80211 */ 2458 /* SW support for IBSS_RSN is provided by mac80211 */
2440 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 2459 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
2441 2460
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
index ac53d901801d..dfb0441f406c 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.c
@@ -3809,7 +3809,7 @@ static bool is_pmu_set(struct ath_hw *ah, u32 pmu_reg, int pmu_set)
3809 return true; 3809 return true;
3810} 3810}
3811 3811
3812static void ar9003_hw_internal_regulator_apply(struct ath_hw *ah) 3812void ar9003_hw_internal_regulator_apply(struct ath_hw *ah)
3813{ 3813{
3814 int internal_regulator = 3814 int internal_regulator =
3815 ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR); 3815 ath9k_hw_ar9300_get_eeprom(ah, EEP_INTERNAL_REGULATOR);
diff --git a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
index 2505ac44f0c1..8396d150ce01 100644
--- a/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/ar9003_eeprom.h
@@ -334,4 +334,7 @@ u8 *ar9003_get_spur_chan_ptr(struct ath_hw *ah, bool is_2ghz);
334 334
335unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah, 335unsigned int ar9003_get_paprd_scale_factor(struct ath_hw *ah,
336 struct ath9k_channel *chan); 336 struct ath9k_channel *chan);
337
338void ar9003_hw_internal_regulator_apply(struct ath_hw *ah);
339
337#endif 340#endif
diff --git a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
index f11d9b2677fd..1bd3a3d22101 100644
--- a/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
+++ b/drivers/net/wireless/ath/ath9k/ar9330_1p1_initvals.h
@@ -1,5 +1,6 @@
1/* 1/*
2 * Copyright (c) 2011 Atheros Communications Inc. 2 * Copyright (c) 2010-2011 Atheros Communications Inc.
3 * Copyright (c) 2011-2012 Qualcomm Atheros Inc.
3 * 4 *
4 * Permission to use, copy, modify, and/or distribute this software for any 5 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above 6 * purpose with or without fee is hereby granted, provided that the above
@@ -18,7 +19,7 @@
18#define INITVALS_9330_1P1_H 19#define INITVALS_9330_1P1_H
19 20
20static const u32 ar9331_1p1_baseband_postamble[][5] = { 21static const u32 ar9331_1p1_baseband_postamble[][5] = {
21 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 22 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
22 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005}, 23 {0x00009810, 0xd00a8005, 0xd00a8005, 0xd00a8005, 0xd00a8005},
23 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e}, 24 {0x00009820, 0x206a002e, 0x206a002e, 0x206a002e, 0x206a002e},
24 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0}, 25 {0x00009824, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0},
@@ -27,10 +28,10 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
27 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c}, 28 {0x00009830, 0x0000059c, 0x0000059c, 0x0000059c, 0x0000059c},
28 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044}, 29 {0x00009c00, 0x00000044, 0x00000044, 0x00000044, 0x00000044},
29 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4}, 30 {0x00009e00, 0x0372161e, 0x0372161e, 0x037216a4, 0x037216a4},
30 {0x00009e04, 0x00182020, 0x00182020, 0x00182020, 0x00182020}, 31 {0x00009e04, 0x00202020, 0x00202020, 0x00202020, 0x00202020},
31 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2}, 32 {0x00009e0c, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2},
32 {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e}, 33 {0x00009e10, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e},
33 {0x00009e14, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e}, 34 {0x00009e14, 0x31365d5e, 0x3136605e, 0x3136605e, 0x31365d5e},
34 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 35 {0x00009e18, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
35 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, 36 {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
36 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, 37 {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
@@ -55,7 +56,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
55 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 56 {0x0000a288, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
56 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 57 {0x0000a28c, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
57 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, 58 {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18},
58 {0x0000a2d0, 0x00071981, 0x00071981, 0x00071981, 0x00071981}, 59 {0x0000a2d0, 0x00071982, 0x00071982, 0x00071982, 0x00071982},
59 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a}, 60 {0x0000a2d8, 0xf999a83a, 0xf999a83a, 0xf999a83a, 0xf999a83a},
60 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000}, 61 {0x0000a358, 0x00000000, 0x00000000, 0x00000000, 0x00000000},
61 {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020}, 62 {0x0000ae04, 0x00802020, 0x00802020, 0x00802020, 0x00802020},
@@ -63,7 +64,7 @@ static const u32 ar9331_1p1_baseband_postamble[][5] = {
63}; 64};
64 65
65static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = { 66static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
66 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 67 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
67 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a}, 68 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
68 {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52}, 69 {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
69 {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84}, 70 {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -155,7 +156,7 @@ static const u32 ar9331_modes_lowest_ob_db_tx_gain_1p1[][5] = {
155}; 156};
156 157
157static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = { 158static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
158 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 159 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
159 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a}, 160 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
160 {0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52}, 161 {0x0000a2dc, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52, 0xffaa9a52},
161 {0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84}, 162 {0x0000a2e0, 0xffb31c84, 0xffb31c84, 0xffb31c84, 0xffb31c84},
@@ -245,7 +246,7 @@ static const u32 ar9331_modes_high_ob_db_tx_gain_1p1[][5] = {
245}; 246};
246 247
247static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = { 248static const u32 ar9331_modes_low_ob_db_tx_gain_1p1[][5] = {
248 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 249 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
249 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a}, 250 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
250 {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52}, 251 {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
251 {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84}, 252 {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -377,14 +378,14 @@ static const u32 ar9331_1p1_radio_core[][2] = {
377 {0x000160b4, 0x92480040}, 378 {0x000160b4, 0x92480040},
378 {0x000160c0, 0x006db6db}, 379 {0x000160c0, 0x006db6db},
379 {0x000160c4, 0x0186db60}, 380 {0x000160c4, 0x0186db60},
380 {0x000160c8, 0x6db6db6c}, 381 {0x000160c8, 0x6db4db6c},
381 {0x000160cc, 0x6de6c300}, 382 {0x000160cc, 0x6de6c300},
382 {0x000160d0, 0x14500820}, 383 {0x000160d0, 0x14500820},
383 {0x00016100, 0x04cb0001}, 384 {0x00016100, 0x04cb0001},
384 {0x00016104, 0xfff80015}, 385 {0x00016104, 0xfff80015},
385 {0x00016108, 0x00080010}, 386 {0x00016108, 0x00080010},
386 {0x0001610c, 0x00170000}, 387 {0x0001610c, 0x00170000},
387 {0x00016140, 0x10804000}, 388 {0x00016140, 0x10800000},
388 {0x00016144, 0x01884080}, 389 {0x00016144, 0x01884080},
389 {0x00016148, 0x000080c0}, 390 {0x00016148, 0x000080c0},
390 {0x00016280, 0x01000015}, 391 {0x00016280, 0x01000015},
@@ -417,7 +418,7 @@ static const u32 ar9331_1p1_radio_core[][2] = {
417}; 418};
418 419
419static const u32 ar9331_1p1_soc_postamble[][5] = { 420static const u32 ar9331_1p1_soc_postamble[][5] = {
420 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 421 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
421 {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022}, 422 {0x00007010, 0x00000022, 0x00000022, 0x00000022, 0x00000022},
422}; 423};
423 424
@@ -691,7 +692,7 @@ static const u32 ar9331_1p1_baseband_core[][2] = {
691}; 692};
692 693
693static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = { 694static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
694 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 695 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
695 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a}, 696 {0x0000a2d8, 0x7999a83a, 0x7999a83a, 0x7999a83a, 0x7999a83a},
696 {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52}, 697 {0x0000a2dc, 0xffff2a52, 0xffff2a52, 0xffff2a52, 0xffff2a52},
697 {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84}, 698 {0x0000a2e0, 0xffffcc84, 0xffffcc84, 0xffffcc84, 0xffffcc84},
@@ -783,7 +784,7 @@ static const u32 ar9331_modes_high_power_tx_gain_1p1[][5] = {
783}; 784};
784 785
785static const u32 ar9331_1p1_mac_postamble[][5] = { 786static const u32 ar9331_1p1_mac_postamble[][5] = {
786 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */ 787 /* Addr 5G_HT20 5G_HT40 2G_HT40 2G_HT20 */
787 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160}, 788 {0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160},
788 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c}, 789 {0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c},
789 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38}, 790 {0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38},
@@ -973,26 +974,27 @@ static const u32 ar9331_1p1_mac_core[][2] = {
973 974
974static const u32 ar9331_common_rx_gain_1p1[][2] = { 975static const u32 ar9331_common_rx_gain_1p1[][2] = {
975 /* Addr allmodes */ 976 /* Addr allmodes */
976 {0x0000a000, 0x00010000}, 977 {0x00009e18, 0x05000000},
977 {0x0000a004, 0x00030002}, 978 {0x0000a000, 0x00060005},
978 {0x0000a008, 0x00050004}, 979 {0x0000a004, 0x00810080},
979 {0x0000a00c, 0x00810080}, 980 {0x0000a008, 0x00830082},
980 {0x0000a010, 0x00830082}, 981 {0x0000a00c, 0x00850084},
981 {0x0000a014, 0x01810180}, 982 {0x0000a010, 0x01820181},
982 {0x0000a018, 0x01830182}, 983 {0x0000a014, 0x01840183},
983 {0x0000a01c, 0x01850184}, 984 {0x0000a018, 0x01880185},
984 {0x0000a020, 0x01890188}, 985 {0x0000a01c, 0x018a0189},
985 {0x0000a024, 0x018b018a}, 986 {0x0000a020, 0x02850284},
986 {0x0000a028, 0x018d018c}, 987 {0x0000a024, 0x02890288},
987 {0x0000a02c, 0x01910190}, 988 {0x0000a028, 0x028b028a},
988 {0x0000a030, 0x01930192}, 989 {0x0000a02c, 0x03850384},
989 {0x0000a034, 0x01950194}, 990 {0x0000a030, 0x03890388},
990 {0x0000a038, 0x038a0196}, 991 {0x0000a034, 0x038b038a},
991 {0x0000a03c, 0x038c038b}, 992 {0x0000a038, 0x038d038c},
992 {0x0000a040, 0x0390038d}, 993 {0x0000a03c, 0x03910390},
993 {0x0000a044, 0x03920391}, 994 {0x0000a040, 0x03930392},
994 {0x0000a048, 0x03940393}, 995 {0x0000a044, 0x03950394},
995 {0x0000a04c, 0x03960395}, 996 {0x0000a048, 0x00000396},
997 {0x0000a04c, 0x00000000},
996 {0x0000a050, 0x00000000}, 998 {0x0000a050, 0x00000000},
997 {0x0000a054, 0x00000000}, 999 {0x0000a054, 0x00000000},
998 {0x0000a058, 0x00000000}, 1000 {0x0000a058, 0x00000000},
@@ -1005,15 +1007,15 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
1005 {0x0000a074, 0x00000000}, 1007 {0x0000a074, 0x00000000},
1006 {0x0000a078, 0x00000000}, 1008 {0x0000a078, 0x00000000},
1007 {0x0000a07c, 0x00000000}, 1009 {0x0000a07c, 0x00000000},
1008 {0x0000a080, 0x22222229}, 1010 {0x0000a080, 0x28282828},
1009 {0x0000a084, 0x1d1d1d1d}, 1011 {0x0000a084, 0x28282828},
1010 {0x0000a088, 0x1d1d1d1d}, 1012 {0x0000a088, 0x28282828},
1011 {0x0000a08c, 0x1d1d1d1d}, 1013 {0x0000a08c, 0x28282828},
1012 {0x0000a090, 0x171d1d1d}, 1014 {0x0000a090, 0x28282828},
1013 {0x0000a094, 0x11111717}, 1015 {0x0000a094, 0x24242428},
1014 {0x0000a098, 0x00030311}, 1016 {0x0000a098, 0x171e1e1e},
1015 {0x0000a09c, 0x00000000}, 1017 {0x0000a09c, 0x02020b0b},
1016 {0x0000a0a0, 0x00000000}, 1018 {0x0000a0a0, 0x02020202},
1017 {0x0000a0a4, 0x00000000}, 1019 {0x0000a0a4, 0x00000000},
1018 {0x0000a0a8, 0x00000000}, 1020 {0x0000a0a8, 0x00000000},
1019 {0x0000a0ac, 0x00000000}, 1021 {0x0000a0ac, 0x00000000},
@@ -1021,27 +1023,27 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
1021 {0x0000a0b4, 0x00000000}, 1023 {0x0000a0b4, 0x00000000},
1022 {0x0000a0b8, 0x00000000}, 1024 {0x0000a0b8, 0x00000000},
1023 {0x0000a0bc, 0x00000000}, 1025 {0x0000a0bc, 0x00000000},
1024 {0x0000a0c0, 0x001f0000}, 1026 {0x0000a0c0, 0x22072208},
1025 {0x0000a0c4, 0x01000101}, 1027 {0x0000a0c4, 0x22052206},
1026 {0x0000a0c8, 0x011e011f}, 1028 {0x0000a0c8, 0x22032204},
1027 {0x0000a0cc, 0x011c011d}, 1029 {0x0000a0cc, 0x22012202},
1028 {0x0000a0d0, 0x02030204}, 1030 {0x0000a0d0, 0x221f2200},
1029 {0x0000a0d4, 0x02010202}, 1031 {0x0000a0d4, 0x221d221e},
1030 {0x0000a0d8, 0x021f0200}, 1032 {0x0000a0d8, 0x33023303},
1031 {0x0000a0dc, 0x0302021e}, 1033 {0x0000a0dc, 0x33003301},
1032 {0x0000a0e0, 0x03000301}, 1034 {0x0000a0e0, 0x331e331f},
1033 {0x0000a0e4, 0x031e031f}, 1035 {0x0000a0e4, 0x4402331d},
1034 {0x0000a0e8, 0x0402031d}, 1036 {0x0000a0e8, 0x44004401},
1035 {0x0000a0ec, 0x04000401}, 1037 {0x0000a0ec, 0x441e441f},
1036 {0x0000a0f0, 0x041e041f}, 1038 {0x0000a0f0, 0x55025503},
1037 {0x0000a0f4, 0x0502041d}, 1039 {0x0000a0f4, 0x55005501},
1038 {0x0000a0f8, 0x05000501}, 1040 {0x0000a0f8, 0x551e551f},
1039 {0x0000a0fc, 0x051e051f}, 1041 {0x0000a0fc, 0x6602551d},
1040 {0x0000a100, 0x06010602}, 1042 {0x0000a100, 0x66006601},
1041 {0x0000a104, 0x061f0600}, 1043 {0x0000a104, 0x661e661f},
1042 {0x0000a108, 0x061d061e}, 1044 {0x0000a108, 0x7703661d},
1043 {0x0000a10c, 0x07020703}, 1045 {0x0000a10c, 0x77017702},
1044 {0x0000a110, 0x07000701}, 1046 {0x0000a110, 0x00007700},
1045 {0x0000a114, 0x00000000}, 1047 {0x0000a114, 0x00000000},
1046 {0x0000a118, 0x00000000}, 1048 {0x0000a118, 0x00000000},
1047 {0x0000a11c, 0x00000000}, 1049 {0x0000a11c, 0x00000000},
@@ -1054,26 +1056,26 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
1054 {0x0000a138, 0x00000000}, 1056 {0x0000a138, 0x00000000},
1055 {0x0000a13c, 0x00000000}, 1057 {0x0000a13c, 0x00000000},
1056 {0x0000a140, 0x001f0000}, 1058 {0x0000a140, 0x001f0000},
1057 {0x0000a144, 0x01000101}, 1059 {0x0000a144, 0x111f1100},
1058 {0x0000a148, 0x011e011f}, 1060 {0x0000a148, 0x111d111e},
1059 {0x0000a14c, 0x011c011d}, 1061 {0x0000a14c, 0x111b111c},
1060 {0x0000a150, 0x02030204}, 1062 {0x0000a150, 0x22032204},
1061 {0x0000a154, 0x02010202}, 1063 {0x0000a154, 0x22012202},
1062 {0x0000a158, 0x021f0200}, 1064 {0x0000a158, 0x221f2200},
1063 {0x0000a15c, 0x0302021e}, 1065 {0x0000a15c, 0x221d221e},
1064 {0x0000a160, 0x03000301}, 1066 {0x0000a160, 0x33013302},
1065 {0x0000a164, 0x031e031f}, 1067 {0x0000a164, 0x331f3300},
1066 {0x0000a168, 0x0402031d}, 1068 {0x0000a168, 0x4402331e},
1067 {0x0000a16c, 0x04000401}, 1069 {0x0000a16c, 0x44004401},
1068 {0x0000a170, 0x041e041f}, 1070 {0x0000a170, 0x441e441f},
1069 {0x0000a174, 0x0502041d}, 1071 {0x0000a174, 0x55015502},
1070 {0x0000a178, 0x05000501}, 1072 {0x0000a178, 0x551f5500},
1071 {0x0000a17c, 0x051e051f}, 1073 {0x0000a17c, 0x6602551e},
1072 {0x0000a180, 0x06010602}, 1074 {0x0000a180, 0x66006601},
1073 {0x0000a184, 0x061f0600}, 1075 {0x0000a184, 0x661e661f},
1074 {0x0000a188, 0x061d061e}, 1076 {0x0000a188, 0x7703661d},
1075 {0x0000a18c, 0x07020703}, 1077 {0x0000a18c, 0x77017702},
1076 {0x0000a190, 0x07000701}, 1078 {0x0000a190, 0x00007700},
1077 {0x0000a194, 0x00000000}, 1079 {0x0000a194, 0x00000000},
1078 {0x0000a198, 0x00000000}, 1080 {0x0000a198, 0x00000000},
1079 {0x0000a19c, 0x00000000}, 1081 {0x0000a19c, 0x00000000},
@@ -1100,14 +1102,14 @@ static const u32 ar9331_common_rx_gain_1p1[][2] = {
1100 {0x0000a1f0, 0x00000396}, 1102 {0x0000a1f0, 0x00000396},
1101 {0x0000a1f4, 0x00000396}, 1103 {0x0000a1f4, 0x00000396},
1102 {0x0000a1f8, 0x00000396}, 1104 {0x0000a1f8, 0x00000396},
1103 {0x0000a1fc, 0x00000196}, 1105 {0x0000a1fc, 0x00000296},
1104}; 1106};
1105 1107
1106static const u32 ar9331_common_tx_gain_offset1_1[][1] = { 1108static const u32 ar9331_common_tx_gain_offset1_1[][1] = {
1107 {0}, 1109 {0x00000000},
1108 {3}, 1110 {0x00000003},
1109 {0}, 1111 {0x00000000},
1110 {0}, 1112 {0x00000000},
1111}; 1113};
1112 1114
1113static const u32 ar9331_1p1_chansel_xtal_25M[] = { 1115static const u32 ar9331_1p1_chansel_xtal_25M[] = {
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index abe05ec85d50..7db1890448f2 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -1468,6 +1468,9 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
1468 return false; 1468 return false;
1469 1469
1470 ah->chip_fullsleep = false; 1470 ah->chip_fullsleep = false;
1471
1472 if (AR_SREV_9330(ah))
1473 ar9003_hw_internal_regulator_apply(ah);
1471 ath9k_hw_init_pll(ah, chan); 1474 ath9k_hw_init_pll(ah, chan);
1472 ath9k_hw_set_rfmode(ah, chan); 1475 ath9k_hw_set_rfmode(ah, chan);
1473 1476
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index dfa78e8b6470..4de4473776ac 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -239,7 +239,7 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
239{ 239{
240 struct ath_hw *ah = sc->sc_ah; 240 struct ath_hw *ah = sc->sc_ah;
241 struct ath_common *common = ath9k_hw_common(ah); 241 struct ath_common *common = ath9k_hw_common(ah);
242 bool ret; 242 bool ret = true;
243 243
244 ieee80211_stop_queues(sc->hw); 244 ieee80211_stop_queues(sc->hw);
245 245
@@ -250,11 +250,12 @@ static bool ath_prepare_reset(struct ath_softc *sc, bool retry_tx, bool flush)
250 ath9k_debug_samp_bb_mac(sc); 250 ath9k_debug_samp_bb_mac(sc);
251 ath9k_hw_disable_interrupts(ah); 251 ath9k_hw_disable_interrupts(ah);
252 252
253 ret = ath_drain_all_txq(sc, retry_tx);
254
255 if (!ath_stoprecv(sc)) 253 if (!ath_stoprecv(sc))
256 ret = false; 254 ret = false;
257 255
256 if (!ath_drain_all_txq(sc, retry_tx))
257 ret = false;
258
258 if (!flush) { 259 if (!flush) {
259 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA) 260 if (ah->caps.hw_caps & ATH9K_HW_CAP_EDMA)
260 ath_rx_tasklet(sc, 1, true); 261 ath_rx_tasklet(sc, 1, true);
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 23eaa1b26ebe..d59dd01d6cde 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -64,7 +64,8 @@ static void ath_tx_update_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
64static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 64static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
65 struct ath_txq *txq, 65 struct ath_txq *txq,
66 struct ath_atx_tid *tid, 66 struct ath_atx_tid *tid,
67 struct sk_buff *skb); 67 struct sk_buff *skb,
68 bool dequeue);
68 69
69enum { 70enum {
70 MCS_HT20, 71 MCS_HT20,
@@ -811,7 +812,7 @@ static enum ATH_AGGR_STATUS ath_tx_form_aggr(struct ath_softc *sc,
811 fi = get_frame_info(skb); 812 fi = get_frame_info(skb);
812 bf = fi->bf; 813 bf = fi->bf;
813 if (!fi->bf) 814 if (!fi->bf)
814 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 815 bf = ath_tx_setup_buffer(sc, txq, tid, skb, true);
815 816
816 if (!bf) 817 if (!bf)
817 continue; 818 continue;
@@ -1726,7 +1727,7 @@ static void ath_tx_send_ampdu(struct ath_softc *sc, struct ath_atx_tid *tid,
1726 return; 1727 return;
1727 } 1728 }
1728 1729
1729 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1730 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
1730 if (!bf) 1731 if (!bf)
1731 return; 1732 return;
1732 1733
@@ -1753,7 +1754,7 @@ static void ath_tx_send_normal(struct ath_softc *sc, struct ath_txq *txq,
1753 1754
1754 bf = fi->bf; 1755 bf = fi->bf;
1755 if (!bf) 1756 if (!bf)
1756 bf = ath_tx_setup_buffer(sc, txq, tid, skb); 1757 bf = ath_tx_setup_buffer(sc, txq, tid, skb, false);
1757 1758
1758 if (!bf) 1759 if (!bf)
1759 return; 1760 return;
@@ -1814,7 +1815,8 @@ u8 ath_txchainmask_reduction(struct ath_softc *sc, u8 chainmask, u32 rate)
1814static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc, 1815static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1815 struct ath_txq *txq, 1816 struct ath_txq *txq,
1816 struct ath_atx_tid *tid, 1817 struct ath_atx_tid *tid,
1817 struct sk_buff *skb) 1818 struct sk_buff *skb,
1819 bool dequeue)
1818{ 1820{
1819 struct ath_common *common = ath9k_hw_common(sc->sc_ah); 1821 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1820 struct ath_frame_info *fi = get_frame_info(skb); 1822 struct ath_frame_info *fi = get_frame_info(skb);
@@ -1863,6 +1865,8 @@ static struct ath_buf *ath_tx_setup_buffer(struct ath_softc *sc,
1863 return bf; 1865 return bf;
1864 1866
1865error: 1867error:
1868 if (dequeue)
1869 __skb_unlink(skb, &tid->buf_q);
1866 dev_kfree_skb_any(skb); 1870 dev_kfree_skb_any(skb);
1867 return NULL; 1871 return NULL;
1868} 1872}
@@ -1893,7 +1897,7 @@ static void ath_tx_start_dma(struct ath_softc *sc, struct sk_buff *skb,
1893 */ 1897 */
1894 ath_tx_send_ampdu(sc, tid, skb, txctl); 1898 ath_tx_send_ampdu(sc, tid, skb, txctl);
1895 } else { 1899 } else {
1896 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb); 1900 bf = ath_tx_setup_buffer(sc, txctl->txq, tid, skb, false);
1897 if (!bf) 1901 if (!bf)
1898 return; 1902 return;
1899 1903
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 67c13af6f206..c06b6cb5c91e 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -877,6 +877,10 @@ struct b43_wl {
877 * from the mac80211 subsystem. */ 877 * from the mac80211 subsystem. */
878 u16 mac80211_initially_registered_queues; 878 u16 mac80211_initially_registered_queues;
879 879
880 /* Set this if we call ieee80211_register_hw() and check if we call
881 * ieee80211_unregister_hw(). */
882 bool hw_registred;
883
880 /* We can only have one operating interface (802.11 core) 884 /* We can only have one operating interface (802.11 core)
881 * at a time. General information about this interface follows. 885 * at a time. General information about this interface follows.
882 */ 886 */
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 5a39b226b2e3..1b988f26bdf1 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -2437,6 +2437,7 @@ start_ieee80211:
2437 err = ieee80211_register_hw(wl->hw); 2437 err = ieee80211_register_hw(wl->hw);
2438 if (err) 2438 if (err)
2439 goto err_one_core_detach; 2439 goto err_one_core_detach;
2440 wl->hw_registred = true;
2440 b43_leds_register(wl->current_dev); 2441 b43_leds_register(wl->current_dev);
2441 goto out; 2442 goto out;
2442 2443
@@ -3766,7 +3767,7 @@ static int b43_switch_band(struct b43_wl *wl, struct ieee80211_channel *chan)
3766 if (prev_status >= B43_STAT_STARTED) { 3767 if (prev_status >= B43_STAT_STARTED) {
3767 err = b43_wireless_core_start(up_dev); 3768 err = b43_wireless_core_start(up_dev);
3768 if (err) { 3769 if (err) {
3769 b43err(wl, "Fatal: Coult not start device for " 3770 b43err(wl, "Fatal: Could not start device for "
3770 "selected %s-GHz band\n", 3771 "selected %s-GHz band\n",
3771 band_to_string(chan->band)); 3772 band_to_string(chan->band));
3772 b43_wireless_core_exit(up_dev); 3773 b43_wireless_core_exit(up_dev);
@@ -5299,6 +5300,7 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
5299 5300
5300 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1; 5301 hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
5301 wl->mac80211_initially_registered_queues = hw->queues; 5302 wl->mac80211_initially_registered_queues = hw->queues;
5303 wl->hw_registred = false;
5302 hw->max_rates = 2; 5304 hw->max_rates = 2;
5303 SET_IEEE80211_DEV(hw, dev->dev); 5305 SET_IEEE80211_DEV(hw, dev->dev);
5304 if (is_valid_ether_addr(sprom->et1mac)) 5306 if (is_valid_ether_addr(sprom->et1mac))
@@ -5370,12 +5372,15 @@ static void b43_bcma_remove(struct bcma_device *core)
5370 * as the ieee80211 unreg will destroy the workqueue. */ 5372 * as the ieee80211 unreg will destroy the workqueue. */
5371 cancel_work_sync(&wldev->restart_work); 5373 cancel_work_sync(&wldev->restart_work);
5372 5374
5373 /* Restore the queues count before unregistering, because firmware detect 5375 B43_WARN_ON(!wl);
5374 * might have modified it. Restoring is important, so the networking 5376 if (wl->current_dev == wldev && wl->hw_registred) {
5375 * stack can properly free resources. */ 5377 /* Restore the queues count before unregistering, because firmware detect
5376 wl->hw->queues = wl->mac80211_initially_registered_queues; 5378 * might have modified it. Restoring is important, so the networking
5377 b43_leds_stop(wldev); 5379 * stack can properly free resources. */
5378 ieee80211_unregister_hw(wl->hw); 5380 wl->hw->queues = wl->mac80211_initially_registered_queues;
5381 b43_leds_stop(wldev);
5382 ieee80211_unregister_hw(wl->hw);
5383 }
5379 5384
5380 b43_one_core_detach(wldev->dev); 5385 b43_one_core_detach(wldev->dev);
5381 5386
@@ -5446,7 +5451,7 @@ static void b43_ssb_remove(struct ssb_device *sdev)
5446 cancel_work_sync(&wldev->restart_work); 5451 cancel_work_sync(&wldev->restart_work);
5447 5452
5448 B43_WARN_ON(!wl); 5453 B43_WARN_ON(!wl);
5449 if (wl->current_dev == wldev) { 5454 if (wl->current_dev == wldev && wl->hw_registred) {
5450 /* Restore the queues count before unregistering, because firmware detect 5455 /* Restore the queues count before unregistering, because firmware detect
5451 * might have modified it. Restoring is important, so the networking 5456 * might have modified it. Restoring is important, so the networking
5452 * stack can properly free resources. */ 5457 * stack can properly free resources. */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index cd9c9bc186d9..eae691e2f7dd 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2633,7 +2633,7 @@ static int b43legacy_switch_phymode(struct b43legacy_wl *wl,
2633 if (prev_status >= B43legacy_STAT_STARTED) { 2633 if (prev_status >= B43legacy_STAT_STARTED) {
2634 err = b43legacy_wireless_core_start(up_dev); 2634 err = b43legacy_wireless_core_start(up_dev);
2635 if (err) { 2635 if (err) {
2636 b43legacyerr(wl, "Fatal: Coult not start device for " 2636 b43legacyerr(wl, "Fatal: Could not start device for "
2637 "newly selected %s-PHY mode\n", 2637 "newly selected %s-PHY mode\n",
2638 phymode_to_string(new_mode)); 2638 phymode_to_string(new_mode));
2639 b43legacy_wireless_core_exit(up_dev); 2639 b43legacy_wireless_core_exit(up_dev);
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
index e2480d196276..8e7e6928c936 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/bcmsdh.c
@@ -89,9 +89,9 @@ int brcmf_sdio_intr_register(struct brcmf_sdio_dev *sdiodev)
89 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1; 89 data |= 1 << SDIO_FUNC_1 | 1 << SDIO_FUNC_2 | 1;
90 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret); 90 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_IENx, data, &ret);
91 91
92 /* redirect, configure ane enable io for interrupt signal */ 92 /* redirect, configure and enable io for interrupt signal */
93 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE; 93 data = SDIO_SEPINT_MASK | SDIO_SEPINT_OE;
94 if (sdiodev->irq_flags | IRQF_TRIGGER_HIGH) 94 if (sdiodev->irq_flags & IRQF_TRIGGER_HIGH)
95 data |= SDIO_SEPINT_ACT_HI; 95 data |= SDIO_SEPINT_ACT_HI;
96 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret); 96 brcmf_sdio_regwb(sdiodev, SDIO_CCCR_BRCM_SEPINT, data, &ret);
97 97
diff --git a/drivers/net/wireless/brcm80211/brcmfmac/usb.c b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
index c5a34ffe6459..a299d42da8e7 100644
--- a/drivers/net/wireless/brcm80211/brcmfmac/usb.c
+++ b/drivers/net/wireless/brcm80211/brcmfmac/usb.c
@@ -28,6 +28,7 @@
28#include <linux/uaccess.h> 28#include <linux/uaccess.h>
29#include <linux/firmware.h> 29#include <linux/firmware.h>
30#include <linux/usb.h> 30#include <linux/usb.h>
31#include <linux/vmalloc.h>
31#include <net/cfg80211.h> 32#include <net/cfg80211.h>
32 33
33#include <defs.h> 34#include <defs.h>
@@ -1239,7 +1240,7 @@ static int brcmf_usb_get_fw(struct brcmf_usbdev_info *devinfo)
1239 return -EINVAL; 1240 return -EINVAL;
1240 } 1241 }
1241 1242
1242 devinfo->image = kmalloc(fw->size, GFP_ATOMIC); /* plus nvram */ 1243 devinfo->image = vmalloc(fw->size); /* plus nvram */
1243 if (!devinfo->image) 1244 if (!devinfo->image)
1244 return -ENOMEM; 1245 return -ENOMEM;
1245 1246
@@ -1603,7 +1604,7 @@ static struct usb_driver brcmf_usbdrvr = {
1603void brcmf_usb_exit(void) 1604void brcmf_usb_exit(void)
1604{ 1605{
1605 usb_deregister(&brcmf_usbdrvr); 1606 usb_deregister(&brcmf_usbdrvr);
1606 kfree(g_image.data); 1607 vfree(g_image.data);
1607 g_image.data = NULL; 1608 g_image.data = NULL;
1608 g_image.len = 0; 1609 g_image.len = 0;
1609} 1610}
diff --git a/drivers/net/wireless/ipw2x00/ipw2100.c b/drivers/net/wireless/ipw2x00/ipw2100.c
index 9cfae0c08707..95aa8e1683ec 100644
--- a/drivers/net/wireless/ipw2x00/ipw2100.c
+++ b/drivers/net/wireless/ipw2x00/ipw2100.c
@@ -1903,14 +1903,6 @@ static void ipw2100_down(struct ipw2100_priv *priv)
1903 netif_stop_queue(priv->net_dev); 1903 netif_stop_queue(priv->net_dev);
1904} 1904}
1905 1905
1906/* Called by register_netdev() */
1907static int ipw2100_net_init(struct net_device *dev)
1908{
1909 struct ipw2100_priv *priv = libipw_priv(dev);
1910
1911 return ipw2100_up(priv, 1);
1912}
1913
1914static int ipw2100_wdev_init(struct net_device *dev) 1906static int ipw2100_wdev_init(struct net_device *dev)
1915{ 1907{
1916 struct ipw2100_priv *priv = libipw_priv(dev); 1908 struct ipw2100_priv *priv = libipw_priv(dev);
@@ -6087,7 +6079,6 @@ static const struct net_device_ops ipw2100_netdev_ops = {
6087 .ndo_stop = ipw2100_close, 6079 .ndo_stop = ipw2100_close,
6088 .ndo_start_xmit = libipw_xmit, 6080 .ndo_start_xmit = libipw_xmit,
6089 .ndo_change_mtu = libipw_change_mtu, 6081 .ndo_change_mtu = libipw_change_mtu,
6090 .ndo_init = ipw2100_net_init,
6091 .ndo_tx_timeout = ipw2100_tx_timeout, 6082 .ndo_tx_timeout = ipw2100_tx_timeout,
6092 .ndo_set_mac_address = ipw2100_set_address, 6083 .ndo_set_mac_address = ipw2100_set_address,
6093 .ndo_validate_addr = eth_validate_addr, 6084 .ndo_validate_addr = eth_validate_addr,
@@ -6329,6 +6320,10 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6329 printk(KERN_INFO DRV_NAME 6320 printk(KERN_INFO DRV_NAME
6330 ": Detected Intel PRO/Wireless 2100 Network Connection\n"); 6321 ": Detected Intel PRO/Wireless 2100 Network Connection\n");
6331 6322
6323 err = ipw2100_up(priv, 1);
6324 if (err)
6325 goto fail;
6326
6332 err = ipw2100_wdev_init(dev); 6327 err = ipw2100_wdev_init(dev);
6333 if (err) 6328 if (err)
6334 goto fail; 6329 goto fail;
@@ -6338,12 +6333,7 @@ static int ipw2100_pci_init_one(struct pci_dev *pci_dev,
6338 * network device we would call ipw2100_up. This introduced a race 6333 * network device we would call ipw2100_up. This introduced a race
6339 * condition with newer hotplug configurations (network was coming 6334 * condition with newer hotplug configurations (network was coming
6340 * up and making calls before the device was initialized). 6335 * up and making calls before the device was initialized).
6341 * 6336 */
6342 * If we called ipw2100_up before we registered the device, then the
6343 * device name wasn't registered. So, we instead use the net_dev->init
6344 * member to call a function that then just turns and calls ipw2100_up.
6345 * net_dev->init is called after name allocation but before the
6346 * notifier chain is called */
6347 err = register_netdev(dev); 6337 err = register_netdev(dev);
6348 if (err) { 6338 if (err) {
6349 printk(KERN_WARNING DRV_NAME 6339 printk(KERN_WARNING DRV_NAME
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index db6c6e528022..2463c0626438 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -137,11 +137,3 @@ config IWLWIFI_EXPERIMENTAL_MFP
137 even if the microcode doesn't advertise it. 137 even if the microcode doesn't advertise it.
138 138
139 Say Y only if you want to experiment with MFP. 139 Say Y only if you want to experiment with MFP.
140
141config IWLWIFI_UCODE16
142 bool "support uCode 16.0"
143 depends on IWLWIFI
144 help
145 This option enables support for uCode version 16.0.
146
147 Say Y if you want to use 16.0 microcode.
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 406f297a9a56..d615eacbf050 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -18,7 +18,6 @@ iwlwifi-objs += iwl-notif-wait.o
18iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o 18iwlwifi-objs += iwl-trans-pcie.o iwl-trans-pcie-rx.o iwl-trans-pcie-tx.o
19 19
20 20
21iwlwifi-$(CONFIG_IWLWIFI_UCODE16) += iwl-phy-db.o
22iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 21iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
23iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o 22iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
24iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o 23iwlwifi-$(CONFIG_IWLWIFI_DEVICE_TESTMODE) += iwl-testmode.o
diff --git a/drivers/net/wireless/iwlwifi/iwl-2000.c b/drivers/net/wireless/iwlwifi/iwl-2000.c
index 7f793417c787..8133105ac645 100644
--- a/drivers/net/wireless/iwlwifi/iwl-2000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-2000.c
@@ -79,7 +79,7 @@ static const struct iwl_base_params iwl2000_base_params = {
79 .chain_noise_scale = 1000, 79 .chain_noise_scale = 1000,
80 .wd_timeout = IWL_DEF_WD_TIMEOUT, 80 .wd_timeout = IWL_DEF_WD_TIMEOUT,
81 .max_event_log_size = 512, 81 .max_event_log_size = 512,
82 .shadow_reg_enable = true, 82 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
83 .hd_v2 = true, 83 .hd_v2 = true,
84}; 84};
85 85
@@ -97,7 +97,7 @@ static const struct iwl_base_params iwl2030_base_params = {
97 .chain_noise_scale = 1000, 97 .chain_noise_scale = 1000,
98 .wd_timeout = IWL_LONG_WD_TIMEOUT, 98 .wd_timeout = IWL_LONG_WD_TIMEOUT,
99 .max_event_log_size = 512, 99 .max_event_log_size = 512,
100 .shadow_reg_enable = true, 100 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
101 .hd_v2 = true, 101 .hd_v2 = true,
102}; 102};
103 103
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 381b02cf339c..e5e8ada4aaf6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -35,17 +35,20 @@
35#define IWL6000_UCODE_API_MAX 6 35#define IWL6000_UCODE_API_MAX 6
36#define IWL6050_UCODE_API_MAX 5 36#define IWL6050_UCODE_API_MAX 5
37#define IWL6000G2_UCODE_API_MAX 6 37#define IWL6000G2_UCODE_API_MAX 6
38#define IWL6035_UCODE_API_MAX 6
38 39
39/* Oldest version we won't warn about */ 40/* Oldest version we won't warn about */
40#define IWL6000_UCODE_API_OK 4 41#define IWL6000_UCODE_API_OK 4
41#define IWL6000G2_UCODE_API_OK 5 42#define IWL6000G2_UCODE_API_OK 5
42#define IWL6050_UCODE_API_OK 5 43#define IWL6050_UCODE_API_OK 5
43#define IWL6000G2B_UCODE_API_OK 6 44#define IWL6000G2B_UCODE_API_OK 6
45#define IWL6035_UCODE_API_OK 6
44 46
45/* Lowest firmware API version supported */ 47/* Lowest firmware API version supported */
46#define IWL6000_UCODE_API_MIN 4 48#define IWL6000_UCODE_API_MIN 4
47#define IWL6050_UCODE_API_MIN 4 49#define IWL6050_UCODE_API_MIN 4
48#define IWL6000G2_UCODE_API_MIN 4 50#define IWL6000G2_UCODE_API_MIN 5
51#define IWL6035_UCODE_API_MIN 6
49 52
50/* EEPROM versions */ 53/* EEPROM versions */
51#define EEPROM_6000_TX_POWER_VERSION (4) 54#define EEPROM_6000_TX_POWER_VERSION (4)
@@ -86,7 +89,7 @@ static const struct iwl_base_params iwl6000_base_params = {
86 .chain_noise_scale = 1000, 89 .chain_noise_scale = 1000,
87 .wd_timeout = IWL_DEF_WD_TIMEOUT, 90 .wd_timeout = IWL_DEF_WD_TIMEOUT,
88 .max_event_log_size = 512, 91 .max_event_log_size = 512,
89 .shadow_reg_enable = true, 92 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
90}; 93};
91 94
92static const struct iwl_base_params iwl6050_base_params = { 95static const struct iwl_base_params iwl6050_base_params = {
@@ -102,7 +105,7 @@ static const struct iwl_base_params iwl6050_base_params = {
102 .chain_noise_scale = 1500, 105 .chain_noise_scale = 1500,
103 .wd_timeout = IWL_DEF_WD_TIMEOUT, 106 .wd_timeout = IWL_DEF_WD_TIMEOUT,
104 .max_event_log_size = 1024, 107 .max_event_log_size = 1024,
105 .shadow_reg_enable = true, 108 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
106}; 109};
107 110
108static const struct iwl_base_params iwl6000_g2_base_params = { 111static const struct iwl_base_params iwl6000_g2_base_params = {
@@ -118,7 +121,7 @@ static const struct iwl_base_params iwl6000_g2_base_params = {
118 .chain_noise_scale = 1000, 121 .chain_noise_scale = 1000,
119 .wd_timeout = IWL_LONG_WD_TIMEOUT, 122 .wd_timeout = IWL_LONG_WD_TIMEOUT,
120 .max_event_log_size = 512, 123 .max_event_log_size = 512,
121 .shadow_reg_enable = true, 124 .shadow_reg_enable = false, /* TODO: fix bugs using this feature */
122}; 125};
123 126
124static const struct iwl_ht_params iwl6000_ht_params = { 127static const struct iwl_ht_params iwl6000_ht_params = {
@@ -227,9 +230,25 @@ const struct iwl_cfg iwl6030_2bg_cfg = {
227 IWL_DEVICE_6030, 230 IWL_DEVICE_6030,
228}; 231};
229 232
233#define IWL_DEVICE_6035 \
234 .fw_name_pre = IWL6030_FW_PRE, \
235 .ucode_api_max = IWL6035_UCODE_API_MAX, \
236 .ucode_api_ok = IWL6035_UCODE_API_OK, \
237 .ucode_api_min = IWL6035_UCODE_API_MIN, \
238 .device_family = IWL_DEVICE_FAMILY_6030, \
239 .max_inst_size = IWL60_RTC_INST_SIZE, \
240 .max_data_size = IWL60_RTC_DATA_SIZE, \
241 .eeprom_ver = EEPROM_6030_EEPROM_VERSION, \
242 .eeprom_calib_ver = EEPROM_6030_TX_POWER_VERSION, \
243 .base_params = &iwl6000_g2_base_params, \
244 .bt_params = &iwl6000_bt_params, \
245 .need_temp_offset_calib = true, \
246 .led_mode = IWL_LED_RF_STATE, \
247 .adv_pm = true
248
230const struct iwl_cfg iwl6035_2agn_cfg = { 249const struct iwl_cfg iwl6035_2agn_cfg = {
231 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN", 250 .name = "Intel(R) Centrino(R) Advanced-N 6235 AGN",
232 IWL_DEVICE_6030, 251 IWL_DEVICE_6035,
233 .ht_params = &iwl6000_ht_params, 252 .ht_params = &iwl6000_ht_params,
234}; 253};
235 254
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 51e1a69ffdda..8cebd7c363fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -884,6 +884,7 @@ static void rs_bt_update_lq(struct iwl_priv *priv, struct iwl_rxon_context *ctx,
884 if ((priv->bt_traffic_load != priv->last_bt_traffic_load) || 884 if ((priv->bt_traffic_load != priv->last_bt_traffic_load) ||
885 (priv->bt_full_concurrent != full_concurrent)) { 885 (priv->bt_full_concurrent != full_concurrent)) {
886 priv->bt_full_concurrent = full_concurrent; 886 priv->bt_full_concurrent = full_concurrent;
887 priv->last_bt_traffic_load = priv->bt_traffic_load;
887 888
888 /* Update uCode's rate table. */ 889 /* Update uCode's rate table. */
889 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]); 890 tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
index b31584e87bc7..eb6a8eaf42fc 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
@@ -772,7 +772,7 @@ void iwl_restore_stations(struct iwl_priv *priv, struct iwl_rxon_context *ctx)
772 ~IWL_STA_DRIVER_ACTIVE; 772 ~IWL_STA_DRIVER_ACTIVE;
773 priv->stations[i].used &= 773 priv->stations[i].used &=
774 ~IWL_STA_UCODE_INPROGRESS; 774 ~IWL_STA_UCODE_INPROGRESS;
775 spin_unlock_bh(&priv->sta_lock); 775 continue;
776 } 776 }
777 /* 777 /*
778 * Rate scaling has already been initialized, send 778 * Rate scaling has already been initialized, send
@@ -1267,7 +1267,7 @@ int iwl_remove_dynamic_key(struct iwl_priv *priv,
1267 key_flags |= STA_KEY_MULTICAST_MSK; 1267 key_flags |= STA_KEY_MULTICAST_MSK;
1268 1268
1269 sta_cmd.key.key_flags = key_flags; 1269 sta_cmd.key.key_flags = key_flags;
1270 sta_cmd.key.key_offset = WEP_INVALID_OFFSET; 1270 sta_cmd.key.key_offset = keyconf->hw_key_idx;
1271 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK; 1271 sta_cmd.sta.modify_mask = STA_MODIFY_KEY_MASK;
1272 sta_cmd.mode = STA_CONTROL_MODIFY_MSK; 1272 sta_cmd.mode = STA_CONTROL_MODIFY_MSK;
1273 1273
diff --git a/drivers/net/wireless/iwlwifi/iwl-drv.c b/drivers/net/wireless/iwlwifi/iwl-drv.c
index 3c72bad0ae56..fac67a526a30 100644
--- a/drivers/net/wireless/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/iwlwifi/iwl-drv.c
@@ -657,17 +657,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
657 return -EINVAL; 657 return -EINVAL;
658} 658}
659 659
660static int alloc_pci_desc(struct iwl_drv *drv, 660static int iwl_alloc_ucode(struct iwl_drv *drv,
661 struct iwl_firmware_pieces *pieces, 661 struct iwl_firmware_pieces *pieces,
662 enum iwl_ucode_type type) 662 enum iwl_ucode_type type)
663{ 663{
664 int i; 664 int i;
665 for (i = 0; 665 for (i = 0;
666 i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i); 666 i < IWL_UCODE_SECTION_MAX && get_sec_size(pieces, type, i);
667 i++) 667 i++)
668 if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]), 668 if (iwl_alloc_fw_desc(drv, &(drv->fw.img[type].sec[i]),
669 get_sec(pieces, type, i))) 669 get_sec(pieces, type, i)))
670 return -1; 670 return -ENOMEM;
671 return 0; 671 return 0;
672} 672}
673 673
@@ -825,8 +825,8 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
825 * 1) unmodified from disk 825 * 1) unmodified from disk
826 * 2) backup cache for save/restore during power-downs */ 826 * 2) backup cache for save/restore during power-downs */
827 for (i = 0; i < IWL_UCODE_TYPE_MAX; i++) 827 for (i = 0; i < IWL_UCODE_TYPE_MAX; i++)
828 if (alloc_pci_desc(drv, &pieces, i)) 828 if (iwl_alloc_ucode(drv, &pieces, i))
829 goto err_pci_alloc; 829 goto out_free_fw;
830 830
831 /* Now that we can no longer fail, copy information */ 831 /* Now that we can no longer fail, copy information */
832 832
@@ -861,13 +861,18 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
861 861
862 /* We have our copies now, allow OS release its copies */ 862 /* We have our copies now, allow OS release its copies */
863 release_firmware(ucode_raw); 863 release_firmware(ucode_raw);
864 complete(&drv->request_firmware_complete);
865 864
866 drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw); 865 drv->op_mode = iwl_dvm_ops.start(drv->trans, drv->cfg, &drv->fw);
867 866
868 if (!drv->op_mode) 867 if (!drv->op_mode)
869 goto out_unbind; 868 goto out_unbind;
870 869
870 /*
871 * Complete the firmware request last so that
872 * a driver unbind (stop) doesn't run while we
873 * are doing the start() above.
874 */
875 complete(&drv->request_firmware_complete);
871 return; 876 return;
872 877
873 try_again: 878 try_again:
@@ -877,7 +882,7 @@ static void iwl_ucode_callback(const struct firmware *ucode_raw, void *context)
877 goto out_unbind; 882 goto out_unbind;
878 return; 883 return;
879 884
880 err_pci_alloc: 885 out_free_fw:
881 IWL_ERR(drv, "failed to allocate pci memory\n"); 886 IWL_ERR(drv, "failed to allocate pci memory\n");
882 iwl_dealloc_ucode(drv); 887 iwl_dealloc_ucode(drv);
883 release_firmware(ucode_raw); 888 release_firmware(ucode_raw);
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index 50c58911e718..b8e2b223ac36 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -568,28 +568,28 @@ static int iwl_find_otp_image(struct iwl_trans *trans,
568 * iwl_get_max_txpower_avg - get the highest tx power from all chains. 568 * iwl_get_max_txpower_avg - get the highest tx power from all chains.
569 * find the highest tx power from all chains for the channel 569 * find the highest tx power from all chains for the channel
570 */ 570 */
571static s8 iwl_get_max_txpower_avg(const struct iwl_cfg *cfg, 571static s8 iwl_get_max_txpower_avg(struct iwl_priv *priv,
572 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower, 572 struct iwl_eeprom_enhanced_txpwr *enhanced_txpower,
573 int element, s8 *max_txpower_in_half_dbm) 573 int element, s8 *max_txpower_in_half_dbm)
574{ 574{
575 s8 max_txpower_avg = 0; /* (dBm) */ 575 s8 max_txpower_avg = 0; /* (dBm) */
576 576
577 /* Take the highest tx power from any valid chains */ 577 /* Take the highest tx power from any valid chains */
578 if ((cfg->valid_tx_ant & ANT_A) && 578 if ((priv->hw_params.valid_tx_ant & ANT_A) &&
579 (enhanced_txpower[element].chain_a_max > max_txpower_avg)) 579 (enhanced_txpower[element].chain_a_max > max_txpower_avg))
580 max_txpower_avg = enhanced_txpower[element].chain_a_max; 580 max_txpower_avg = enhanced_txpower[element].chain_a_max;
581 if ((cfg->valid_tx_ant & ANT_B) && 581 if ((priv->hw_params.valid_tx_ant & ANT_B) &&
582 (enhanced_txpower[element].chain_b_max > max_txpower_avg)) 582 (enhanced_txpower[element].chain_b_max > max_txpower_avg))
583 max_txpower_avg = enhanced_txpower[element].chain_b_max; 583 max_txpower_avg = enhanced_txpower[element].chain_b_max;
584 if ((cfg->valid_tx_ant & ANT_C) && 584 if ((priv->hw_params.valid_tx_ant & ANT_C) &&
585 (enhanced_txpower[element].chain_c_max > max_txpower_avg)) 585 (enhanced_txpower[element].chain_c_max > max_txpower_avg))
586 max_txpower_avg = enhanced_txpower[element].chain_c_max; 586 max_txpower_avg = enhanced_txpower[element].chain_c_max;
587 if (((cfg->valid_tx_ant == ANT_AB) | 587 if (((priv->hw_params.valid_tx_ant == ANT_AB) |
588 (cfg->valid_tx_ant == ANT_BC) | 588 (priv->hw_params.valid_tx_ant == ANT_BC) |
589 (cfg->valid_tx_ant == ANT_AC)) && 589 (priv->hw_params.valid_tx_ant == ANT_AC)) &&
590 (enhanced_txpower[element].mimo2_max > max_txpower_avg)) 590 (enhanced_txpower[element].mimo2_max > max_txpower_avg))
591 max_txpower_avg = enhanced_txpower[element].mimo2_max; 591 max_txpower_avg = enhanced_txpower[element].mimo2_max;
592 if ((cfg->valid_tx_ant == ANT_ABC) && 592 if ((priv->hw_params.valid_tx_ant == ANT_ABC) &&
593 (enhanced_txpower[element].mimo3_max > max_txpower_avg)) 593 (enhanced_txpower[element].mimo3_max > max_txpower_avg))
594 max_txpower_avg = enhanced_txpower[element].mimo3_max; 594 max_txpower_avg = enhanced_txpower[element].mimo3_max;
595 595
@@ -691,7 +691,7 @@ static void iwl_eeprom_enhanced_txpower(struct iwl_priv *priv)
691 ((txp->delta_20_in_40 & 0xf0) >> 4), 691 ((txp->delta_20_in_40 & 0xf0) >> 4),
692 (txp->delta_20_in_40 & 0x0f)); 692 (txp->delta_20_in_40 & 0x0f));
693 693
694 max_txp_avg = iwl_get_max_txpower_avg(priv->cfg, txp_array, idx, 694 max_txp_avg = iwl_get_max_txpower_avg(priv, txp_array, idx,
695 &max_txp_avg_halfdbm); 695 &max_txp_avg_halfdbm);
696 696
697 /* 697 /*
diff --git a/drivers/net/wireless/iwlwifi/iwl-mac80211.c b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
index ab2f4d7500a4..3ee23134c02b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-mac80211.c
+++ b/drivers/net/wireless/iwlwifi/iwl-mac80211.c
@@ -199,6 +199,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
199 WIPHY_FLAG_DISABLE_BEACON_HINTS | 199 WIPHY_FLAG_DISABLE_BEACON_HINTS |
200 WIPHY_FLAG_IBSS_RSN; 200 WIPHY_FLAG_IBSS_RSN;
201 201
202#ifdef CONFIG_PM_SLEEP
202 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len && 203 if (priv->fw->img[IWL_UCODE_WOWLAN].sec[0].len &&
203 priv->trans->ops->wowlan_suspend && 204 priv->trans->ops->wowlan_suspend &&
204 device_can_wakeup(priv->trans->dev)) { 205 device_can_wakeup(priv->trans->dev)) {
@@ -217,6 +218,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
217 hw->wiphy->wowlan.pattern_max_len = 218 hw->wiphy->wowlan.pattern_max_len =
218 IWLAGN_WOWLAN_MAX_PATTERN_LEN; 219 IWLAGN_WOWLAN_MAX_PATTERN_LEN;
219 } 220 }
221#endif
220 222
221 if (iwlwifi_mod_params.power_save) 223 if (iwlwifi_mod_params.power_save)
222 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 224 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT;
@@ -249,6 +251,7 @@ int iwlagn_mac_setup_register(struct iwl_priv *priv,
249 ret = ieee80211_register_hw(priv->hw); 251 ret = ieee80211_register_hw(priv->hw);
250 if (ret) { 252 if (ret) {
251 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret); 253 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
254 iwl_leds_exit(priv);
252 return ret; 255 return ret;
253 } 256 }
254 priv->mac80211_registered = 1; 257 priv->mac80211_registered = 1;
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.c b/drivers/net/wireless/iwlwifi/iwl-phy-db.c
deleted file mode 100644
index f166955340fe..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.c
+++ /dev/null
@@ -1,288 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#include <linux/slab.h>
65#include <linux/string.h>
66
67#include "iwl-debug.h"
68#include "iwl-dev.h"
69
70#include "iwl-phy-db.h"
71
72#define CHANNEL_NUM_SIZE 4 /* num of channels in calib_ch size */
73
74struct iwl_phy_db *iwl_phy_db_init(struct device *dev)
75{
76 struct iwl_phy_db *phy_db = kzalloc(sizeof(struct iwl_phy_db),
77 GFP_KERNEL);
78
79 if (!phy_db)
80 return phy_db;
81
82 phy_db->dev = dev;
83
84 /* TODO: add default values of the phy db. */
85 return phy_db;
86}
87
88/*
89 * get phy db section: returns a pointer to a phy db section specified by
90 * type and channel group id.
91 */
92static struct iwl_phy_db_entry *
93iwl_phy_db_get_section(struct iwl_phy_db *phy_db,
94 enum iwl_phy_db_section_type type,
95 u16 chg_id)
96{
97 if (!phy_db || type < 0 || type >= IWL_PHY_DB_MAX)
98 return NULL;
99
100 switch (type) {
101 case IWL_PHY_DB_CFG:
102 return &phy_db->cfg;
103 case IWL_PHY_DB_CALIB_NCH:
104 return &phy_db->calib_nch;
105 case IWL_PHY_DB_CALIB_CH:
106 return &phy_db->calib_ch;
107 case IWL_PHY_DB_CALIB_CHG_PAPD:
108 if (chg_id < 0 || chg_id >= IWL_NUM_PAPD_CH_GROUPS)
109 return NULL;
110 return &phy_db->calib_ch_group_papd[chg_id];
111 case IWL_PHY_DB_CALIB_CHG_TXP:
112 if (chg_id < 0 || chg_id >= IWL_NUM_TXP_CH_GROUPS)
113 return NULL;
114 return &phy_db->calib_ch_group_txp[chg_id];
115 default:
116 return NULL;
117 }
118 return NULL;
119}
120
121static void iwl_phy_db_free_section(struct iwl_phy_db *phy_db,
122 enum iwl_phy_db_section_type type,
123 u16 chg_id)
124{
125 struct iwl_phy_db_entry *entry =
126 iwl_phy_db_get_section(phy_db, type, chg_id);
127 if (!entry)
128 return;
129
130 kfree(entry->data);
131 entry->data = NULL;
132 entry->size = 0;
133}
134
135void iwl_phy_db_free(struct iwl_phy_db *phy_db)
136{
137 int i;
138
139 if (!phy_db)
140 return;
141
142 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CFG, 0);
143 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_NCH, 0);
144 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CH, 0);
145 for (i = 0; i < IWL_NUM_PAPD_CH_GROUPS; i++)
146 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_PAPD, i);
147 for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++)
148 iwl_phy_db_free_section(phy_db, IWL_PHY_DB_CALIB_CHG_TXP, i);
149
150 kfree(phy_db);
151}
152
153int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
154 enum iwl_phy_db_section_type type, u8 *data,
155 u16 size, gfp_t alloc_ctx)
156{
157 struct iwl_phy_db_entry *entry;
158 u16 chg_id = 0;
159
160 if (!phy_db)
161 return -EINVAL;
162
163 if (type == IWL_PHY_DB_CALIB_CHG_PAPD ||
164 type == IWL_PHY_DB_CALIB_CHG_TXP)
165 chg_id = le16_to_cpup((__le16 *)data);
166
167 entry = iwl_phy_db_get_section(phy_db, type, chg_id);
168 if (!entry)
169 return -EINVAL;
170
171 kfree(entry->data);
172 entry->data = kmemdup(data, size, alloc_ctx);
173 if (!entry->data) {
174 entry->size = 0;
175 return -ENOMEM;
176 }
177
178 entry->size = size;
179
180 if (type == IWL_PHY_DB_CALIB_CH) {
181 phy_db->channel_num = le32_to_cpup((__le32 *)data);
182 phy_db->channel_size =
183 (size - CHANNEL_NUM_SIZE) / phy_db->channel_num;
184 }
185
186 return 0;
187}
188
189static int is_valid_channel(u16 ch_id)
190{
191 if (ch_id <= 14 ||
192 (36 <= ch_id && ch_id <= 64 && ch_id % 4 == 0) ||
193 (100 <= ch_id && ch_id <= 140 && ch_id % 4 == 0) ||
194 (145 <= ch_id && ch_id <= 165 && ch_id % 4 == 1))
195 return 1;
196 return 0;
197}
198
199static u8 ch_id_to_ch_index(u16 ch_id)
200{
201 if (WARN_ON(!is_valid_channel(ch_id)))
202 return 0xff;
203
204 if (ch_id <= 14)
205 return ch_id - 1;
206 if (ch_id <= 64)
207 return (ch_id + 20) / 4;
208 if (ch_id <= 140)
209 return (ch_id - 12) / 4;
210 return (ch_id - 13) / 4;
211}
212
213
214static u16 channel_id_to_papd(u16 ch_id)
215{
216 if (WARN_ON(!is_valid_channel(ch_id)))
217 return 0xff;
218
219 if (1 <= ch_id && ch_id <= 14)
220 return 0;
221 if (36 <= ch_id && ch_id <= 64)
222 return 1;
223 if (100 <= ch_id && ch_id <= 140)
224 return 2;
225 return 3;
226}
227
228static u16 channel_id_to_txp(struct iwl_phy_db *phy_db, u16 ch_id)
229{
230 struct iwl_phy_db_chg_txp *txp_chg;
231 int i;
232 u8 ch_index = ch_id_to_ch_index(ch_id);
233 if (ch_index == 0xff)
234 return 0xff;
235
236 for (i = 0; i < IWL_NUM_TXP_CH_GROUPS; i++) {
237 txp_chg = (void *)phy_db->calib_ch_group_txp[i].data;
238 if (!txp_chg)
239 return 0xff;
240 /*
241 * Looking for the first channel group that its max channel is
242 * higher then wanted channel.
243 */
244 if (le16_to_cpu(txp_chg->max_channel_idx) >= ch_index)
245 return i;
246 }
247 return 0xff;
248}
249
250int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
251 enum iwl_phy_db_section_type type, u8 **data,
252 u16 *size, u16 ch_id)
253{
254 struct iwl_phy_db_entry *entry;
255 u32 channel_num;
256 u32 channel_size;
257 u16 ch_group_id = 0;
258 u16 index;
259
260 if (!phy_db)
261 return -EINVAL;
262
263 /* find wanted channel group */
264 if (type == IWL_PHY_DB_CALIB_CHG_PAPD)
265 ch_group_id = channel_id_to_papd(ch_id);
266 else if (type == IWL_PHY_DB_CALIB_CHG_TXP)
267 ch_group_id = channel_id_to_txp(phy_db, ch_id);
268
269 entry = iwl_phy_db_get_section(phy_db, type, ch_group_id);
270 if (!entry)
271 return -EINVAL;
272
273 if (type == IWL_PHY_DB_CALIB_CH) {
274 index = ch_id_to_ch_index(ch_id);
275 channel_num = phy_db->channel_num;
276 channel_size = phy_db->channel_size;
277 if (index >= channel_num) {
278 IWL_ERR(phy_db, "Wrong channel number %d", ch_id);
279 return -EINVAL;
280 }
281 *data = entry->data + CHANNEL_NUM_SIZE + index * channel_size;
282 *size = channel_size;
283 } else {
284 *data = entry->data;
285 *size = entry->size;
286 }
287 return 0;
288}
diff --git a/drivers/net/wireless/iwlwifi/iwl-phy-db.h b/drivers/net/wireless/iwlwifi/iwl-phy-db.h
deleted file mode 100644
index c34c6a9303ab..000000000000
--- a/drivers/net/wireless/iwlwifi/iwl-phy-db.h
+++ /dev/null
@@ -1,129 +0,0 @@
1/******************************************************************************
2 *
3 * This file is provided under a dual BSD/GPLv2 license. When using or
4 * redistributing this file, you may do so under either license.
5 *
6 * GPL LICENSE SUMMARY
7 *
8 * Copyright(c) 2007 - 2012 Intel Corporation. All rights reserved.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
22 * USA
23 *
24 * The full GNU General Public License is included in this distribution
25 * in the file called LICENSE.GPL.
26 *
27 * Contact Information:
28 * Intel Linux Wireless <ilw@linux.intel.com>
29 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30 *
31 * BSD LICENSE
32 *
33 * Copyright(c) 2005 - 2012 Intel Corporation. All rights reserved.
34 * All rights reserved.
35 *
36 * Redistribution and use in source and binary forms, with or without
37 * modification, are permitted provided that the following conditions
38 * are met:
39 *
40 * * Redistributions of source code must retain the above copyright
41 * notice, this list of conditions and the following disclaimer.
42 * * Redistributions in binary form must reproduce the above copyright
43 * notice, this list of conditions and the following disclaimer in
44 * the documentation and/or other materials provided with the
45 * distribution.
46 * * Neither the name Intel Corporation nor the names of its
47 * contributors may be used to endorse or promote products derived
48 * from this software without specific prior written permission.
49 *
50 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
51 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
52 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
53 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
54 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
55 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
56 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
57 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
58 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
59 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
60 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
61 *
62 *****************************************************************************/
63
64#ifndef __IWL_PHYDB_H__
65#define __IWL_PHYDB_H__
66
67#include <linux/types.h>
68
69#define IWL_NUM_PAPD_CH_GROUPS 4
70#define IWL_NUM_TXP_CH_GROUPS 8
71
72struct iwl_phy_db_entry {
73 u16 size;
74 u8 *data;
75};
76
77struct iwl_shared;
78
79/**
80 * struct iwl_phy_db - stores phy configuration and calibration data.
81 *
82 * @cfg: phy configuration.
83 * @calib_nch: non channel specific calibration data.
84 * @calib_ch: channel specific calibration data.
85 * @calib_ch_group_papd: calibration data related to papd channel group.
86 * @calib_ch_group_txp: calibration data related to tx power chanel group.
87 */
88struct iwl_phy_db {
89 struct iwl_phy_db_entry cfg;
90 struct iwl_phy_db_entry calib_nch;
91 struct iwl_phy_db_entry calib_ch;
92 struct iwl_phy_db_entry calib_ch_group_papd[IWL_NUM_PAPD_CH_GROUPS];
93 struct iwl_phy_db_entry calib_ch_group_txp[IWL_NUM_TXP_CH_GROUPS];
94
95 u32 channel_num;
96 u32 channel_size;
97
98 /* for an access to the logger */
99 struct device *dev;
100};
101
102enum iwl_phy_db_section_type {
103 IWL_PHY_DB_CFG = 1,
104 IWL_PHY_DB_CALIB_NCH,
105 IWL_PHY_DB_CALIB_CH,
106 IWL_PHY_DB_CALIB_CHG_PAPD,
107 IWL_PHY_DB_CALIB_CHG_TXP,
108 IWL_PHY_DB_MAX
109};
110
111/* for parsing of tx power channel group data that comes from the firmware*/
112struct iwl_phy_db_chg_txp {
113 __le32 space;
114 __le16 max_channel_idx;
115} __packed;
116
117struct iwl_phy_db *iwl_phy_db_init(struct device *dev);
118
119void iwl_phy_db_free(struct iwl_phy_db *phy_db);
120
121int iwl_phy_db_set_section(struct iwl_phy_db *phy_db,
122 enum iwl_phy_db_section_type type, u8 *data,
123 u16 size, gfp_t alloc_ctx);
124
125int iwl_phy_db_get_section_data(struct iwl_phy_db *phy_db,
126 enum iwl_phy_db_section_type type, u8 **data,
127 u16 *size, u16 ch_id);
128
129#endif /* __IWL_PHYDB_H__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-prph.h b/drivers/net/wireless/iwlwifi/iwl-prph.h
index 3b1069290fa9..dfd54662e3e6 100644
--- a/drivers/net/wireless/iwlwifi/iwl-prph.h
+++ b/drivers/net/wireless/iwlwifi/iwl-prph.h
@@ -224,6 +224,7 @@
224#define SCD_TXFACT (SCD_BASE + 0x10) 224#define SCD_TXFACT (SCD_BASE + 0x10)
225#define SCD_ACTIVE (SCD_BASE + 0x14) 225#define SCD_ACTIVE (SCD_BASE + 0x14)
226#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8) 226#define SCD_QUEUECHAIN_SEL (SCD_BASE + 0xe8)
227#define SCD_CHAINEXT_EN (SCD_BASE + 0x244)
227#define SCD_AGGR_SEL (SCD_BASE + 0x248) 228#define SCD_AGGR_SEL (SCD_BASE + 0x248)
228#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108) 229#define SCD_INTERRUPT_MASK (SCD_BASE + 0x108)
229 230
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
index 6213c05a4b52..e959207c630a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-int.h
@@ -347,7 +347,7 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
347void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo, 347void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo,
348 int sta_id, int tid, int frame_limit, u16 ssn); 348 int sta_id, int tid, int frame_limit, u16 ssn);
349void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 349void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
350 int index, enum dma_data_direction dma_dir); 350 enum dma_data_direction dma_dir);
351int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index, 351int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
352 struct sk_buff_head *skbs); 352 struct sk_buff_head *skbs);
353int iwl_queue_space(const struct iwl_queue *q); 353int iwl_queue_space(const struct iwl_queue *q);
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
index 21a8a672fbb2..a8750238ee09 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie-tx.c
@@ -204,33 +204,39 @@ static void iwlagn_unmap_tfd(struct iwl_trans *trans, struct iwl_cmd_meta *meta,
204 for (i = 1; i < num_tbs; i++) 204 for (i = 1; i < num_tbs; i++)
205 dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i), 205 dma_unmap_single(trans->dev, iwl_tfd_tb_get_addr(tfd, i),
206 iwl_tfd_tb_get_len(tfd, i), dma_dir); 206 iwl_tfd_tb_get_len(tfd, i), dma_dir);
207
208 tfd->num_tbs = 0;
207} 209}
208 210
209/** 211/**
210 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 212 * iwlagn_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
211 * @trans - transport private data 213 * @trans - transport private data
212 * @txq - tx queue 214 * @txq - tx queue
213 * @index - the index of the TFD to be freed 215 * @dma_dir - the direction of the DMA mapping
214 *@dma_dir - the direction of the DMA mapping
215 * 216 *
216 * Does NOT advance any TFD circular buffer read/write indexes 217 * Does NOT advance any TFD circular buffer read/write indexes
217 * Does NOT free the TFD itself (which is within circular buffer) 218 * Does NOT free the TFD itself (which is within circular buffer)
218 */ 219 */
219void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, 220void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
220 int index, enum dma_data_direction dma_dir) 221 enum dma_data_direction dma_dir)
221{ 222{
222 struct iwl_tfd *tfd_tmp = txq->tfds; 223 struct iwl_tfd *tfd_tmp = txq->tfds;
223 224
225 /* rd_ptr is bounded by n_bd and idx is bounded by n_window */
226 int rd_ptr = txq->q.read_ptr;
227 int idx = get_cmd_index(&txq->q, rd_ptr);
228
224 lockdep_assert_held(&txq->lock); 229 lockdep_assert_held(&txq->lock);
225 230
226 iwlagn_unmap_tfd(trans, &txq->entries[index].meta, 231 /* We have only q->n_window txq->entries, but we use q->n_bd tfds */
227 &tfd_tmp[index], dma_dir); 232 iwlagn_unmap_tfd(trans, &txq->entries[idx].meta,
233 &tfd_tmp[rd_ptr], dma_dir);
228 234
229 /* free SKB */ 235 /* free SKB */
230 if (txq->entries) { 236 if (txq->entries) {
231 struct sk_buff *skb; 237 struct sk_buff *skb;
232 238
233 skb = txq->entries[index].skb; 239 skb = txq->entries[idx].skb;
234 240
235 /* Can be called from irqs-disabled context 241 /* Can be called from irqs-disabled context
236 * If skb is not NULL, it means that the whole queue is being 242 * If skb is not NULL, it means that the whole queue is being
@@ -238,7 +244,7 @@ void iwlagn_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
238 */ 244 */
239 if (skb) { 245 if (skb) {
240 iwl_op_mode_free_skb(trans->op_mode, skb); 246 iwl_op_mode_free_skb(trans->op_mode, skb);
241 txq->entries[index].skb = NULL; 247 txq->entries[idx].skb = NULL;
242 } 248 }
243 } 249 }
244} 250}
@@ -973,7 +979,7 @@ int iwl_tx_queue_reclaim(struct iwl_trans *trans, int txq_id, int index,
973 979
974 iwlagn_txq_inval_byte_cnt_tbl(trans, txq); 980 iwlagn_txq_inval_byte_cnt_tbl(trans, txq);
975 981
976 iwlagn_txq_free_tfd(trans, txq, txq->q.read_ptr, DMA_TO_DEVICE); 982 iwlagn_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
977 freed++; 983 freed++;
978 } 984 }
979 985
diff --git a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
index 2e57161854b9..79c6b91417f9 100644
--- a/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
+++ b/drivers/net/wireless/iwlwifi/iwl-trans-pcie.c
@@ -435,9 +435,7 @@ static void iwl_tx_queue_unmap(struct iwl_trans *trans, int txq_id)
435 435
436 spin_lock_bh(&txq->lock); 436 spin_lock_bh(&txq->lock);
437 while (q->write_ptr != q->read_ptr) { 437 while (q->write_ptr != q->read_ptr) {
438 /* The read_ptr needs to bound by q->n_window */ 438 iwlagn_txq_free_tfd(trans, txq, dma_dir);
439 iwlagn_txq_free_tfd(trans, txq, get_cmd_index(q, q->read_ptr),
440 dma_dir);
441 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd); 439 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd);
442 } 440 }
443 spin_unlock_bh(&txq->lock); 441 spin_unlock_bh(&txq->lock);
@@ -1060,6 +1058,11 @@ static void iwl_tx_start(struct iwl_trans *trans)
1060 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 1058 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
1061 trans_pcie->scd_bc_tbls.dma >> 10); 1059 trans_pcie->scd_bc_tbls.dma >> 10);
1062 1060
1061 /* The chain extension of the SCD doesn't work well. This feature is
1062 * enabled by default by the HW, so we need to disable it manually.
1063 */
1064 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
1065
1063 /* Enable DMA channel */ 1066 /* Enable DMA channel */
1064 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++) 1067 for (chan = 0; chan < FH_TCSR_CHNL_NUM ; chan++)
1065 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 1068 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index fb787df01666..a0b7cfd34685 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -1555,6 +1555,7 @@ static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2,
1555 hdr = (struct ieee80211_hdr *) skb->data; 1555 hdr = (struct ieee80211_hdr *) skb->data;
1556 mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2); 1556 mac80211_hwsim_monitor_ack(data2->hw, hdr->addr2);
1557 } 1557 }
1558 txi->flags |= IEEE80211_TX_STAT_ACK;
1558 } 1559 }
1559 ieee80211_tx_status_irqsafe(data2->hw, skb); 1560 ieee80211_tx_status_irqsafe(data2->hw, skb);
1560 return 0; 1561 return 0;
@@ -1721,6 +1722,24 @@ static void hwsim_exit_netlink(void)
1721 "unregister family %i\n", ret); 1722 "unregister family %i\n", ret);
1722} 1723}
1723 1724
1725static const struct ieee80211_iface_limit hwsim_if_limits[] = {
1726 { .max = 1, .types = BIT(NL80211_IFTYPE_ADHOC) },
1727 { .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) |
1728 BIT(NL80211_IFTYPE_P2P_CLIENT) |
1729#ifdef CONFIG_MAC80211_MESH
1730 BIT(NL80211_IFTYPE_MESH_POINT) |
1731#endif
1732 BIT(NL80211_IFTYPE_AP) |
1733 BIT(NL80211_IFTYPE_P2P_GO) },
1734};
1735
1736static const struct ieee80211_iface_combination hwsim_if_comb = {
1737 .limits = hwsim_if_limits,
1738 .n_limits = ARRAY_SIZE(hwsim_if_limits),
1739 .max_interfaces = 2048,
1740 .num_different_channels = 1,
1741};
1742
1724static int __init init_mac80211_hwsim(void) 1743static int __init init_mac80211_hwsim(void)
1725{ 1744{
1726 int i, err = 0; 1745 int i, err = 0;
@@ -1782,6 +1801,9 @@ static int __init init_mac80211_hwsim(void)
1782 hw->wiphy->n_addresses = 2; 1801 hw->wiphy->n_addresses = 2;
1783 hw->wiphy->addresses = data->addresses; 1802 hw->wiphy->addresses = data->addresses;
1784 1803
1804 hw->wiphy->iface_combinations = &hwsim_if_comb;
1805 hw->wiphy->n_iface_combinations = 1;
1806
1785 if (fake_hw_scan) { 1807 if (fake_hw_scan) {
1786 hw->wiphy->max_scan_ssids = 255; 1808 hw->wiphy->max_scan_ssids = 255;
1787 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; 1809 hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
index 87671446e24b..015fec3371a0 100644
--- a/drivers/net/wireless/mwifiex/cfg80211.c
+++ b/drivers/net/wireless/mwifiex/cfg80211.c
@@ -948,6 +948,19 @@ static int mwifiex_cfg80211_start_ap(struct wiphy *wiphy,
948 bss_cfg->ssid.ssid_len = params->ssid_len; 948 bss_cfg->ssid.ssid_len = params->ssid_len;
949 } 949 }
950 950
951 switch (params->hidden_ssid) {
952 case NL80211_HIDDEN_SSID_NOT_IN_USE:
953 bss_cfg->bcast_ssid_ctl = 1;
954 break;
955 case NL80211_HIDDEN_SSID_ZERO_LEN:
956 bss_cfg->bcast_ssid_ctl = 0;
957 break;
958 case NL80211_HIDDEN_SSID_ZERO_CONTENTS:
959 /* firmware doesn't support this type of hidden SSID */
960 default:
961 return -EINVAL;
962 }
963
951 if (mwifiex_set_secure_params(priv, bss_cfg, params)) { 964 if (mwifiex_set_secure_params(priv, bss_cfg, params)) {
952 kfree(bss_cfg); 965 kfree(bss_cfg);
953 wiphy_err(wiphy, "Failed to parse secuirty parameters!\n"); 966 wiphy_err(wiphy, "Failed to parse secuirty parameters!\n");
diff --git a/drivers/net/wireless/mwifiex/fw.h b/drivers/net/wireless/mwifiex/fw.h
index 9f674bbebe65..561452a5c818 100644
--- a/drivers/net/wireless/mwifiex/fw.h
+++ b/drivers/net/wireless/mwifiex/fw.h
@@ -122,6 +122,7 @@ enum MWIFIEX_802_11_PRIVACY_FILTER {
122#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42) 122#define TLV_TYPE_CHANNELBANDLIST (PROPRIETARY_TLV_BASE_ID + 42)
123#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44) 123#define TLV_TYPE_UAP_BEACON_PERIOD (PROPRIETARY_TLV_BASE_ID + 44)
124#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45) 124#define TLV_TYPE_UAP_DTIM_PERIOD (PROPRIETARY_TLV_BASE_ID + 45)
125#define TLV_TYPE_UAP_BCAST_SSID (PROPRIETARY_TLV_BASE_ID + 48)
125#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51) 126#define TLV_TYPE_UAP_RTS_THRESHOLD (PROPRIETARY_TLV_BASE_ID + 51)
126#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60) 127#define TLV_TYPE_UAP_WPA_PASSPHRASE (PROPRIETARY_TLV_BASE_ID + 60)
127#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64) 128#define TLV_TYPE_UAP_ENCRY_PROTOCOL (PROPRIETARY_TLV_BASE_ID + 64)
@@ -1209,6 +1210,11 @@ struct host_cmd_tlv_ssid {
1209 u8 ssid[0]; 1210 u8 ssid[0];
1210} __packed; 1211} __packed;
1211 1212
1213struct host_cmd_tlv_bcast_ssid {
1214 struct host_cmd_tlv tlv;
1215 u8 bcast_ctl;
1216} __packed;
1217
1212struct host_cmd_tlv_beacon_period { 1218struct host_cmd_tlv_beacon_period {
1213 struct host_cmd_tlv tlv; 1219 struct host_cmd_tlv tlv;
1214 __le16 period; 1220 __le16 period;
diff --git a/drivers/net/wireless/mwifiex/uap_cmd.c b/drivers/net/wireless/mwifiex/uap_cmd.c
index 76dfbc42a732..8173ab66066d 100644
--- a/drivers/net/wireless/mwifiex/uap_cmd.c
+++ b/drivers/net/wireless/mwifiex/uap_cmd.c
@@ -132,6 +132,7 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
132 struct host_cmd_tlv_dtim_period *dtim_period; 132 struct host_cmd_tlv_dtim_period *dtim_period;
133 struct host_cmd_tlv_beacon_period *beacon_period; 133 struct host_cmd_tlv_beacon_period *beacon_period;
134 struct host_cmd_tlv_ssid *ssid; 134 struct host_cmd_tlv_ssid *ssid;
135 struct host_cmd_tlv_bcast_ssid *bcast_ssid;
135 struct host_cmd_tlv_channel_band *chan_band; 136 struct host_cmd_tlv_channel_band *chan_band;
136 struct host_cmd_tlv_frag_threshold *frag_threshold; 137 struct host_cmd_tlv_frag_threshold *frag_threshold;
137 struct host_cmd_tlv_rts_threshold *rts_threshold; 138 struct host_cmd_tlv_rts_threshold *rts_threshold;
@@ -153,6 +154,14 @@ mwifiex_uap_bss_param_prepare(u8 *tlv, void *cmd_buf, u16 *param_size)
153 cmd_size += sizeof(struct host_cmd_tlv) + 154 cmd_size += sizeof(struct host_cmd_tlv) +
154 bss_cfg->ssid.ssid_len; 155 bss_cfg->ssid.ssid_len;
155 tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len; 156 tlv += sizeof(struct host_cmd_tlv) + bss_cfg->ssid.ssid_len;
157
158 bcast_ssid = (struct host_cmd_tlv_bcast_ssid *)tlv;
159 bcast_ssid->tlv.type = cpu_to_le16(TLV_TYPE_UAP_BCAST_SSID);
160 bcast_ssid->tlv.len =
161 cpu_to_le16(sizeof(bcast_ssid->bcast_ctl));
162 bcast_ssid->bcast_ctl = bss_cfg->bcast_ssid_ctl;
163 cmd_size += sizeof(struct host_cmd_tlv_bcast_ssid);
164 tlv += sizeof(struct host_cmd_tlv_bcast_ssid);
156 } 165 }
157 if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) { 166 if (bss_cfg->channel && bss_cfg->channel <= MAX_CHANNEL_BAND_BG) {
158 chan_band = (struct host_cmd_tlv_channel_band *)tlv; 167 chan_band = (struct host_cmd_tlv_channel_band *)tlv;
@@ -416,6 +425,7 @@ int mwifiex_uap_set_channel(struct mwifiex_private *priv, int channel)
416 if (!bss_cfg) 425 if (!bss_cfg)
417 return -ENOMEM; 426 return -ENOMEM;
418 427
428 mwifiex_set_sys_config_invalid_data(bss_cfg);
419 bss_cfg->band_cfg = BAND_CONFIG_MANUAL; 429 bss_cfg->band_cfg = BAND_CONFIG_MANUAL;
420 bss_cfg->channel = channel; 430 bss_cfg->channel = channel;
421 431
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index ca36cccaba31..8f754025b06e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -396,8 +396,7 @@ struct rt2x00_intf {
396 * for hardware which doesn't support hardware 396 * for hardware which doesn't support hardware
397 * sequence counting. 397 * sequence counting.
398 */ 398 */
399 spinlock_t seqlock; 399 atomic_t seqno;
400 u16 seqno;
401}; 400};
402 401
403static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif) 402static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
diff --git a/drivers/net/wireless/rt2x00/rt2x00mac.c b/drivers/net/wireless/rt2x00/rt2x00mac.c
index b49773ef72f2..dd24b2663b5e 100644
--- a/drivers/net/wireless/rt2x00/rt2x00mac.c
+++ b/drivers/net/wireless/rt2x00/rt2x00mac.c
@@ -277,7 +277,6 @@ int rt2x00mac_add_interface(struct ieee80211_hw *hw,
277 else 277 else
278 rt2x00dev->intf_sta_count++; 278 rt2x00dev->intf_sta_count++;
279 279
280 spin_lock_init(&intf->seqlock);
281 mutex_init(&intf->beacon_skb_mutex); 280 mutex_init(&intf->beacon_skb_mutex);
282 intf->beacon = entry; 281 intf->beacon = entry;
283 282
diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
index 4c662eccf53c..2fd830103415 100644
--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
@@ -207,6 +207,7 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
207 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 207 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
208 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 208 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
209 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); 209 struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif);
210 u16 seqno;
210 211
211 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) 212 if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ))
212 return; 213 return;
@@ -238,15 +239,13 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
238 * sequence counting per-frame, since those will override the 239 * sequence counting per-frame, since those will override the
239 * sequence counter given by mac80211. 240 * sequence counter given by mac80211.
240 */ 241 */
241 spin_lock(&intf->seqlock);
242
243 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) 242 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
244 intf->seqno += 0x10; 243 seqno = atomic_add_return(0x10, &intf->seqno);
245 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); 244 else
246 hdr->seq_ctrl |= cpu_to_le16(intf->seqno); 245 seqno = atomic_read(&intf->seqno);
247
248 spin_unlock(&intf->seqlock);
249 246
247 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
248 hdr->seq_ctrl |= cpu_to_le16(seqno);
250} 249}
251 250
252static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, 251static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev,
diff --git a/drivers/net/wireless/rtl818x/rtl8187/leds.c b/drivers/net/wireless/rtl818x/rtl8187/leds.c
index 2e0de2f5f0f9..c2d5b495c179 100644
--- a/drivers/net/wireless/rtl818x/rtl8187/leds.c
+++ b/drivers/net/wireless/rtl818x/rtl8187/leds.c
@@ -117,7 +117,7 @@ static void rtl8187_led_brightness_set(struct led_classdev *led_dev,
117 radio_on = true; 117 radio_on = true;
118 } else if (radio_on) { 118 } else if (radio_on) {
119 radio_on = false; 119 radio_on = false;
120 cancel_delayed_work_sync(&priv->led_on); 120 cancel_delayed_work(&priv->led_on);
121 ieee80211_queue_delayed_work(hw, &priv->led_off, 0); 121 ieee80211_queue_delayed_work(hw, &priv->led_off, 0);
122 } 122 }
123 } else if (radio_on) { 123 } else if (radio_on) {
diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
index 1b851f650e07..e2750a12c6f1 100644
--- a/drivers/net/wireless/ti/wl1251/sdio.c
+++ b/drivers/net/wireless/ti/wl1251/sdio.c
@@ -260,6 +260,7 @@ static int wl1251_sdio_probe(struct sdio_func *func,
260 } 260 }
261 261
262 if (wl->irq) { 262 if (wl->irq) {
263 irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
263 ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl); 264 ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
264 if (ret < 0) { 265 if (ret < 0) {
265 wl1251_error("request_irq() failed: %d", ret); 266 wl1251_error("request_irq() failed: %d", ret);
@@ -267,7 +268,6 @@ static int wl1251_sdio_probe(struct sdio_func *func,
267 } 268 }
268 269
269 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 270 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
270 disable_irq(wl->irq);
271 271
272 wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq; 272 wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
273 wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq; 273 wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
diff --git a/drivers/net/wireless/ti/wl1251/spi.c b/drivers/net/wireless/ti/wl1251/spi.c
index 6248c354fc5c..87f6305bda2c 100644
--- a/drivers/net/wireless/ti/wl1251/spi.c
+++ b/drivers/net/wireless/ti/wl1251/spi.c
@@ -281,6 +281,7 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
281 281
282 wl->use_eeprom = pdata->use_eeprom; 282 wl->use_eeprom = pdata->use_eeprom;
283 283
284 irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
284 ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl); 285 ret = request_irq(wl->irq, wl1251_irq, 0, DRIVER_NAME, wl);
285 if (ret < 0) { 286 if (ret < 0) {
286 wl1251_error("request_irq() failed: %d", ret); 287 wl1251_error("request_irq() failed: %d", ret);
@@ -289,8 +290,6 @@ static int __devinit wl1251_spi_probe(struct spi_device *spi)
289 290
290 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING); 291 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
291 292
292 disable_irq(wl->irq);
293
294 ret = wl1251_init_ieee80211(wl); 293 ret = wl1251_init_ieee80211(wl);
295 if (ret) 294 if (ret)
296 goto out_irq; 295 goto out_irq;
diff --git a/drivers/net/wireless/ti/wlcore/acx.c b/drivers/net/wireless/ti/wlcore/acx.c
index 509aa881d790..f3d6fa508269 100644
--- a/drivers/net/wireless/ti/wlcore/acx.c
+++ b/drivers/net/wireless/ti/wlcore/acx.c
@@ -1715,6 +1715,7 @@ out:
1715 1715
1716} 1716}
1717 1717
1718#ifdef CONFIG_PM
1718/* Set the global behaviour of RX filters - On/Off + default action */ 1719/* Set the global behaviour of RX filters - On/Off + default action */
1719int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable, 1720int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
1720 enum rx_filter_action action) 1721 enum rx_filter_action action)
@@ -1794,3 +1795,4 @@ out:
1794 kfree(acx); 1795 kfree(acx);
1795 return ret; 1796 return ret;
1796} 1797}
1798#endif /* CONFIG_PM */
diff --git a/drivers/net/wireless/ti/wlcore/acx.h b/drivers/net/wireless/ti/wlcore/acx.h
index 8106b2ebfe60..e6a74869a5ff 100644
--- a/drivers/net/wireless/ti/wlcore/acx.h
+++ b/drivers/net/wireless/ti/wlcore/acx.h
@@ -1330,9 +1330,11 @@ int wl1271_acx_set_inconnection_sta(struct wl1271 *wl, u8 *addr);
1330int wl1271_acx_fm_coex(struct wl1271 *wl); 1330int wl1271_acx_fm_coex(struct wl1271 *wl);
1331int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl); 1331int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl);
1332int wl12xx_acx_config_hangover(struct wl1271 *wl); 1332int wl12xx_acx_config_hangover(struct wl1271 *wl);
1333
1334#ifdef CONFIG_PM
1333int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable, 1335int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
1334 enum rx_filter_action action); 1336 enum rx_filter_action action);
1335int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable, 1337int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
1336 struct wl12xx_rx_filter *filter); 1338 struct wl12xx_rx_filter *filter);
1337 1339#endif /* CONFIG_PM */
1338#endif /* __WL1271_ACX_H__ */ 1340#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/ti/wlcore/rx.c b/drivers/net/wireless/ti/wlcore/rx.c
index 1f1d9488dfb6..d6a3c6b07827 100644
--- a/drivers/net/wireless/ti/wlcore/rx.c
+++ b/drivers/net/wireless/ti/wlcore/rx.c
@@ -279,6 +279,7 @@ void wl12xx_rx(struct wl1271 *wl, struct wl_fw_status *status)
279 wl12xx_rearm_rx_streaming(wl, active_hlids); 279 wl12xx_rearm_rx_streaming(wl, active_hlids);
280} 280}
281 281
282#ifdef CONFIG_PM
282int wl1271_rx_filter_enable(struct wl1271 *wl, 283int wl1271_rx_filter_enable(struct wl1271 *wl,
283 int index, bool enable, 284 int index, bool enable,
284 struct wl12xx_rx_filter *filter) 285 struct wl12xx_rx_filter *filter)
@@ -314,3 +315,4 @@ void wl1271_rx_filter_clear_all(struct wl1271 *wl)
314 wl1271_rx_filter_enable(wl, i, 0, NULL); 315 wl1271_rx_filter_enable(wl, i, 0, NULL);
315 } 316 }
316} 317}
318#endif /* CONFIG_PM */
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 2596401308a8..f4a6fcaeffb1 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -325,8 +325,7 @@ unsigned int xen_netbk_count_skb_slots(struct xenvif *vif, struct sk_buff *skb)
325 unsigned int count; 325 unsigned int count;
326 int i, copy_off; 326 int i, copy_off;
327 327
328 count = DIV_ROUND_UP( 328 count = DIV_ROUND_UP(skb_headlen(skb), PAGE_SIZE);
329 offset_in_page(skb->data)+skb_headlen(skb), PAGE_SIZE);
330 329
331 copy_off = skb_headlen(skb) % PAGE_SIZE; 330 copy_off = skb_headlen(skb) % PAGE_SIZE;
332 331
diff --git a/drivers/nfc/pn544_hci.c b/drivers/nfc/pn544_hci.c
index 46f4a9f9f5e4..281f18c2fb82 100644
--- a/drivers/nfc/pn544_hci.c
+++ b/drivers/nfc/pn544_hci.c
@@ -232,7 +232,7 @@ static int pn544_hci_i2c_write(struct i2c_client *client, u8 *buf, int len)
232 232
233static int check_crc(u8 *buf, int buflen) 233static int check_crc(u8 *buf, int buflen)
234{ 234{
235 u8 len; 235 int len;
236 u16 crc; 236 u16 crc;
237 237
238 len = buf[0] + 1; 238 len = buf[0] + 1;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 447e83472c01..77cb54a65cde 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1744,6 +1744,11 @@ int pci_prepare_to_sleep(struct pci_dev *dev)
1744 if (target_state == PCI_POWER_ERROR) 1744 if (target_state == PCI_POWER_ERROR)
1745 return -EIO; 1745 return -EIO;
1746 1746
1747 /* Some devices mustn't be in D3 during system sleep */
1748 if (target_state == PCI_D3hot &&
1749 (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP))
1750 return 0;
1751
1747 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev)); 1752 pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
1748 1753
1749 error = pci_set_power_state(dev, target_state); 1754 error = pci_set_power_state(dev, target_state);
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 2a7521677541..194b243a2817 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -2929,6 +2929,32 @@ static void __devinit disable_igfx_irq(struct pci_dev *dev)
2929DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq); 2929DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0102, disable_igfx_irq);
2930DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq); 2930DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x010a, disable_igfx_irq);
2931 2931
2932/*
2933 * The Intel 6 Series/C200 Series chipset's EHCI controllers on many
2934 * ASUS motherboards will cause memory corruption or a system crash
2935 * if they are in D3 while the system is put into S3 sleep.
2936 */
2937static void __devinit asus_ehci_no_d3(struct pci_dev *dev)
2938{
2939 const char *sys_info;
2940 static const char good_Asus_board[] = "P8Z68-V";
2941
2942 if (dev->dev_flags & PCI_DEV_FLAGS_NO_D3_DURING_SLEEP)
2943 return;
2944 if (dev->subsystem_vendor != PCI_VENDOR_ID_ASUSTEK)
2945 return;
2946 sys_info = dmi_get_system_info(DMI_BOARD_NAME);
2947 if (sys_info && memcmp(sys_info, good_Asus_board,
2948 sizeof(good_Asus_board) - 1) == 0)
2949 return;
2950
2951 dev_info(&dev->dev, "broken D3 during system sleep on ASUS\n");
2952 dev->dev_flags |= PCI_DEV_FLAGS_NO_D3_DURING_SLEEP;
2953 device_set_wakeup_capable(&dev->dev, false);
2954}
2955DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c26, asus_ehci_no_d3);
2956DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x1c2d, asus_ehci_no_d3);
2957
2932static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f, 2958static void pci_do_fixups(struct pci_dev *dev, struct pci_fixup *f,
2933 struct pci_fixup *end) 2959 struct pci_fixup *end)
2934{ 2960{
diff --git a/drivers/pinctrl/core.c b/drivers/pinctrl/core.c
index c3b331b74fa0..0cc053af70bd 100644
--- a/drivers/pinctrl/core.c
+++ b/drivers/pinctrl/core.c
@@ -61,7 +61,7 @@ static LIST_HEAD(pinctrl_maps);
61 list_for_each_entry(_maps_node_, &pinctrl_maps, node) \ 61 list_for_each_entry(_maps_node_, &pinctrl_maps, node) \
62 for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \ 62 for (_i_ = 0, _map_ = &_maps_node_->maps[_i_]; \
63 _i_ < _maps_node_->num_maps; \ 63 _i_ < _maps_node_->num_maps; \
64 i++, _map_ = &_maps_node_->maps[_i_]) 64 _i_++, _map_ = &_maps_node_->maps[_i_])
65 65
66/** 66/**
67 * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support 67 * pinctrl_provide_dummies() - indicate if pinctrl provides dummy state support
diff --git a/drivers/pinctrl/pinctrl-imx.c b/drivers/pinctrl/pinctrl-imx.c
index f6e7c670906c..dd6d93aa5334 100644
--- a/drivers/pinctrl/pinctrl-imx.c
+++ b/drivers/pinctrl/pinctrl-imx.c
@@ -27,16 +27,16 @@
27#include "core.h" 27#include "core.h"
28#include "pinctrl-imx.h" 28#include "pinctrl-imx.h"
29 29
30#define IMX_PMX_DUMP(info, p, m, c, n) \ 30#define IMX_PMX_DUMP(info, p, m, c, n) \
31{ \ 31{ \
32 int i, j; \ 32 int i, j; \
33 printk("Format: Pin Mux Config\n"); \ 33 printk(KERN_DEBUG "Format: Pin Mux Config\n"); \
34 for (i = 0; i < n; i++) { \ 34 for (i = 0; i < n; i++) { \
35 j = p[i]; \ 35 j = p[i]; \
36 printk("%s %d 0x%lx\n", \ 36 printk(KERN_DEBUG "%s %d 0x%lx\n", \
37 info->pins[j].name, \ 37 info->pins[j].name, \
38 m[i], c[i]); \ 38 m[i], c[i]); \
39 } \ 39 } \
40} 40}
41 41
42/* The bits in CONFIG cell defined in binding doc*/ 42/* The bits in CONFIG cell defined in binding doc*/
@@ -173,8 +173,10 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
173 173
174 /* create mux map */ 174 /* create mux map */
175 parent = of_get_parent(np); 175 parent = of_get_parent(np);
176 if (!parent) 176 if (!parent) {
177 kfree(new_map);
177 return -EINVAL; 178 return -EINVAL;
179 }
178 new_map[0].type = PIN_MAP_TYPE_MUX_GROUP; 180 new_map[0].type = PIN_MAP_TYPE_MUX_GROUP;
179 new_map[0].data.mux.function = parent->name; 181 new_map[0].data.mux.function = parent->name;
180 new_map[0].data.mux.group = np->name; 182 new_map[0].data.mux.group = np->name;
@@ -193,7 +195,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
193 } 195 }
194 196
195 dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n", 197 dev_dbg(pctldev->dev, "maps: function %s group %s num %d\n",
196 new_map->data.mux.function, new_map->data.mux.group, map_num); 198 (*map)->data.mux.function, (*map)->data.mux.group, map_num);
197 199
198 return 0; 200 return 0;
199} 201}
@@ -201,10 +203,7 @@ static int imx_dt_node_to_map(struct pinctrl_dev *pctldev,
201static void imx_dt_free_map(struct pinctrl_dev *pctldev, 203static void imx_dt_free_map(struct pinctrl_dev *pctldev,
202 struct pinctrl_map *map, unsigned num_maps) 204 struct pinctrl_map *map, unsigned num_maps)
203{ 205{
204 int i; 206 kfree(map);
205
206 for (i = 0; i < num_maps; i++)
207 kfree(map);
208} 207}
209 208
210static struct pinctrl_ops imx_pctrl_ops = { 209static struct pinctrl_ops imx_pctrl_ops = {
@@ -475,9 +474,8 @@ static int __devinit imx_pinctrl_parse_groups(struct device_node *np,
475 grp->configs[j] = config & ~IMX_PAD_SION; 474 grp->configs[j] = config & ~IMX_PAD_SION;
476 } 475 }
477 476
478#ifdef DEBUG
479 IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins); 477 IMX_PMX_DUMP(info, grp->pins, grp->mux_mode, grp->configs, grp->npins);
480#endif 478
481 return 0; 479 return 0;
482} 480}
483 481
diff --git a/drivers/pinctrl/pinctrl-mxs.c b/drivers/pinctrl/pinctrl-mxs.c
index 556e45a213eb..4ba4636b6a4a 100644
--- a/drivers/pinctrl/pinctrl-mxs.c
+++ b/drivers/pinctrl/pinctrl-mxs.c
@@ -107,8 +107,10 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
107 107
108 /* Compose group name */ 108 /* Compose group name */
109 group = kzalloc(length, GFP_KERNEL); 109 group = kzalloc(length, GFP_KERNEL);
110 if (!group) 110 if (!group) {
111 return -ENOMEM; 111 ret = -ENOMEM;
112 goto free;
113 }
112 snprintf(group, length, "%s.%d", np->name, reg); 114 snprintf(group, length, "%s.%d", np->name, reg);
113 new_map[i].data.mux.group = group; 115 new_map[i].data.mux.group = group;
114 i++; 116 i++;
@@ -118,7 +120,7 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
118 pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL); 120 pconfig = kmemdup(&config, sizeof(config), GFP_KERNEL);
119 if (!pconfig) { 121 if (!pconfig) {
120 ret = -ENOMEM; 122 ret = -ENOMEM;
121 goto free; 123 goto free_group;
122 } 124 }
123 125
124 new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP; 126 new_map[i].type = PIN_MAP_TYPE_CONFIGS_GROUP;
@@ -133,6 +135,9 @@ static int mxs_dt_node_to_map(struct pinctrl_dev *pctldev,
133 135
134 return 0; 136 return 0;
135 137
138free_group:
139 if (!purecfg)
140 kfree(group);
136free: 141free:
137 kfree(new_map); 142 kfree(new_map);
138 return ret; 143 return ret;
@@ -511,6 +516,7 @@ int __devinit mxs_pinctrl_probe(struct platform_device *pdev,
511 return 0; 516 return 0;
512 517
513err: 518err:
519 platform_set_drvdata(pdev, NULL);
514 iounmap(d->base); 520 iounmap(d->base);
515 return ret; 521 return ret;
516} 522}
@@ -520,6 +526,7 @@ int __devexit mxs_pinctrl_remove(struct platform_device *pdev)
520{ 526{
521 struct mxs_pinctrl_data *d = platform_get_drvdata(pdev); 527 struct mxs_pinctrl_data *d = platform_get_drvdata(pdev);
522 528
529 platform_set_drvdata(pdev, NULL);
523 pinctrl_unregister(d->pctl); 530 pinctrl_unregister(d->pctl);
524 iounmap(d->base); 531 iounmap(d->base);
525 532
diff --git a/drivers/pinctrl/pinctrl-nomadik.c b/drivers/pinctrl/pinctrl-nomadik.c
index b8e01c3eaa95..3e7e47d6b385 100644
--- a/drivers/pinctrl/pinctrl-nomadik.c
+++ b/drivers/pinctrl/pinctrl-nomadik.c
@@ -24,6 +24,7 @@
24#include <linux/irq.h> 24#include <linux/irq.h>
25#include <linux/irqdomain.h> 25#include <linux/irqdomain.h>
26#include <linux/slab.h> 26#include <linux/slab.h>
27#include <linux/of_device.h>
27#include <linux/pinctrl/pinctrl.h> 28#include <linux/pinctrl/pinctrl.h>
28#include <linux/pinctrl/pinmux.h> 29#include <linux/pinctrl/pinmux.h>
29#include <linux/pinctrl/pinconf.h> 30#include <linux/pinctrl/pinconf.h>
@@ -672,7 +673,7 @@ static void __nmk_gpio_set_wake(struct nmk_gpio_chip *nmk_chip,
672 * wakeup is anyhow controlled by the RIMSC and FIMSC registers. 673 * wakeup is anyhow controlled by the RIMSC and FIMSC registers.
673 */ 674 */
674 if (nmk_chip->sleepmode && on) { 675 if (nmk_chip->sleepmode && on) {
675 __nmk_gpio_set_slpm(nmk_chip, gpio % nmk_chip->chip.base, 676 __nmk_gpio_set_slpm(nmk_chip, gpio % NMK_GPIO_PER_CHIP,
676 NMK_GPIO_SLPM_WAKEUP_ENABLE); 677 NMK_GPIO_SLPM_WAKEUP_ENABLE);
677 } 678 }
678 679
@@ -1245,6 +1246,7 @@ static int __devinit nmk_gpio_probe(struct platform_device *dev)
1245 ret = PTR_ERR(clk); 1246 ret = PTR_ERR(clk);
1246 goto out_unmap; 1247 goto out_unmap;
1247 } 1248 }
1249 clk_prepare(clk);
1248 1250
1249 nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL); 1251 nmk_chip = kzalloc(sizeof(*nmk_chip), GFP_KERNEL);
1250 if (!nmk_chip) { 1252 if (!nmk_chip) {
@@ -1436,7 +1438,27 @@ static int nmk_pmx_enable(struct pinctrl_dev *pctldev, unsigned function,
1436 1438
1437 dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins); 1439 dev_dbg(npct->dev, "enable group %s, %u pins\n", g->name, g->npins);
1438 1440
1439 /* Handle this special glitch on altfunction C */ 1441 /*
1442 * If we're setting altfunc C by setting both AFSLA and AFSLB to 1,
1443 * we may pass through an undesired state. In this case we take
1444 * some extra care.
1445 *
1446 * Safe sequence used to switch IOs between GPIO and Alternate-C mode:
1447 * - Save SLPM registers (since we have a shadow register in the
1448 * nmk_chip we're using that as backup)
1449 * - Set SLPM=0 for the IOs you want to switch and others to 1
1450 * - Configure the GPIO registers for the IOs that are being switched
1451 * - Set IOFORCE=1
1452 * - Modify the AFLSA/B registers for the IOs that are being switched
1453 * - Set IOFORCE=0
1454 * - Restore SLPM registers
1455 * - Any spurious wake up event during switch sequence to be ignored
1456 * and cleared
1457 *
1458 * We REALLY need to save ALL slpm registers, because the external
1459 * IOFORCE will switch *all* ports to their sleepmode setting to as
1460 * to avoid glitches. (Not just one port!)
1461 */
1440 glitch = (g->altsetting == NMK_GPIO_ALT_C); 1462 glitch = (g->altsetting == NMK_GPIO_ALT_C);
1441 1463
1442 if (glitch) { 1464 if (glitch) {
@@ -1688,18 +1710,34 @@ static struct pinctrl_desc nmk_pinctrl_desc = {
1688 .owner = THIS_MODULE, 1710 .owner = THIS_MODULE,
1689}; 1711};
1690 1712
1713static const struct of_device_id nmk_pinctrl_match[] = {
1714 {
1715 .compatible = "stericsson,nmk_pinctrl",
1716 .data = (void *)PINCTRL_NMK_DB8500,
1717 },
1718 {},
1719};
1720
1691static int __devinit nmk_pinctrl_probe(struct platform_device *pdev) 1721static int __devinit nmk_pinctrl_probe(struct platform_device *pdev)
1692{ 1722{
1693 const struct platform_device_id *platid = platform_get_device_id(pdev); 1723 const struct platform_device_id *platid = platform_get_device_id(pdev);
1724 struct device_node *np = pdev->dev.of_node;
1694 struct nmk_pinctrl *npct; 1725 struct nmk_pinctrl *npct;
1726 unsigned int version = 0;
1695 int i; 1727 int i;
1696 1728
1697 npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL); 1729 npct = devm_kzalloc(&pdev->dev, sizeof(*npct), GFP_KERNEL);
1698 if (!npct) 1730 if (!npct)
1699 return -ENOMEM; 1731 return -ENOMEM;
1700 1732
1733 if (platid)
1734 version = platid->driver_data;
1735 else if (np)
1736 version = (unsigned int)
1737 of_match_device(nmk_pinctrl_match, &pdev->dev)->data;
1738
1701 /* Poke in other ASIC variants here */ 1739 /* Poke in other ASIC variants here */
1702 if (platid->driver_data == PINCTRL_NMK_DB8500) 1740 if (version == PINCTRL_NMK_DB8500)
1703 nmk_pinctrl_db8500_init(&npct->soc); 1741 nmk_pinctrl_db8500_init(&npct->soc);
1704 1742
1705 /* 1743 /*
@@ -1758,6 +1796,7 @@ static struct platform_driver nmk_pinctrl_driver = {
1758 .driver = { 1796 .driver = {
1759 .owner = THIS_MODULE, 1797 .owner = THIS_MODULE,
1760 .name = "pinctrl-nomadik", 1798 .name = "pinctrl-nomadik",
1799 .of_match_table = nmk_pinctrl_match,
1761 }, 1800 },
1762 .probe = nmk_pinctrl_probe, 1801 .probe = nmk_pinctrl_probe,
1763 .id_table = nmk_pinctrl_id, 1802 .id_table = nmk_pinctrl_id,
diff --git a/drivers/pinctrl/pinctrl-sirf.c b/drivers/pinctrl/pinctrl-sirf.c
index ba15b1a29e52..e9f8e7d11001 100644
--- a/drivers/pinctrl/pinctrl-sirf.c
+++ b/drivers/pinctrl/pinctrl-sirf.c
@@ -1184,7 +1184,7 @@ out_no_gpio_remap:
1184 return ret; 1184 return ret;
1185} 1185}
1186 1186
1187static const struct of_device_id pinmux_ids[] = { 1187static const struct of_device_id pinmux_ids[] __devinitconst = {
1188 { .compatible = "sirf,prima2-gpio-pinmux" }, 1188 { .compatible = "sirf,prima2-gpio-pinmux" },
1189 {} 1189 {}
1190}; 1190};
diff --git a/drivers/pinctrl/spear/pinctrl-spear.c b/drivers/pinctrl/spear/pinctrl-spear.c
index 5ae50aadf885..b3f6b2873fdd 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.c
+++ b/drivers/pinctrl/spear/pinctrl-spear.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr pinmux 2 * Driver for the ST Microelectronics SPEAr pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * Inspired from: 7 * Inspired from:
8 * - U300 Pinctl drivers 8 * - U300 Pinctl drivers
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index 9155783bb47f..d950eb78d939 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -2,7 +2,7 @@
2 * Driver header file for the ST Microelectronics SPEAr pinmux 2 * Driver header file for the ST Microelectronics SPEAr pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
index fff168be7f00..d6cca8c81b92 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr1310 pinmux 2 * Driver for the ST Microelectronics SPEAr1310 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -2192,7 +2192,7 @@ static void __exit spear1310_pinctrl_exit(void)
2192} 2192}
2193module_exit(spear1310_pinctrl_exit); 2193module_exit(spear1310_pinctrl_exit);
2194 2194
2195MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 2195MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
2196MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver"); 2196MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
2197MODULE_LICENSE("GPL v2"); 2197MODULE_LICENSE("GPL v2");
2198MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match); 2198MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
index a8ab2a6f51bf..a0eb057e55bd 100644
--- a/drivers/pinctrl/spear/pinctrl-spear1340.c
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr1340 pinmux 2 * Driver for the ST Microelectronics SPEAr1340 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -1983,7 +1983,7 @@ static void __exit spear1340_pinctrl_exit(void)
1983} 1983}
1984module_exit(spear1340_pinctrl_exit); 1984module_exit(spear1340_pinctrl_exit);
1985 1985
1986MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 1986MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
1987MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver"); 1987MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
1988MODULE_LICENSE("GPL v2"); 1988MODULE_LICENSE("GPL v2");
1989MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match); 1989MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear300.c b/drivers/pinctrl/spear/pinctrl-spear300.c
index 9c82a35e4e78..4dfc2849b172 100644
--- a/drivers/pinctrl/spear/pinctrl-spear300.c
+++ b/drivers/pinctrl/spear/pinctrl-spear300.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr300 pinmux 2 * Driver for the ST Microelectronics SPEAr300 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -702,7 +702,7 @@ static void __exit spear300_pinctrl_exit(void)
702} 702}
703module_exit(spear300_pinctrl_exit); 703module_exit(spear300_pinctrl_exit);
704 704
705MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 705MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
706MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver"); 706MODULE_DESCRIPTION("ST Microelectronics SPEAr300 pinctrl driver");
707MODULE_LICENSE("GPL v2"); 707MODULE_LICENSE("GPL v2");
708MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match); 708MODULE_DEVICE_TABLE(of, spear300_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear310.c b/drivers/pinctrl/spear/pinctrl-spear310.c
index 1a9707605125..96883693fb7e 100644
--- a/drivers/pinctrl/spear/pinctrl-spear310.c
+++ b/drivers/pinctrl/spear/pinctrl-spear310.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr310 pinmux 2 * Driver for the ST Microelectronics SPEAr310 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -425,7 +425,7 @@ static void __exit spear310_pinctrl_exit(void)
425} 425}
426module_exit(spear310_pinctrl_exit); 426module_exit(spear310_pinctrl_exit);
427 427
428MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 428MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
429MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver"); 429MODULE_DESCRIPTION("ST Microelectronics SPEAr310 pinctrl driver");
430MODULE_LICENSE("GPL v2"); 430MODULE_LICENSE("GPL v2");
431MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match); 431MODULE_DEVICE_TABLE(of, SPEAr310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear320.c b/drivers/pinctrl/spear/pinctrl-spear320.c
index de726e6c283a..020b1e0bdb3e 100644
--- a/drivers/pinctrl/spear/pinctrl-spear320.c
+++ b/drivers/pinctrl/spear/pinctrl-spear320.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr320 pinmux 2 * Driver for the ST Microelectronics SPEAr320 pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
@@ -3462,7 +3462,7 @@ static void __exit spear320_pinctrl_exit(void)
3462} 3462}
3463module_exit(spear320_pinctrl_exit); 3463module_exit(spear320_pinctrl_exit);
3464 3464
3465MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 3465MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
3466MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver"); 3466MODULE_DESCRIPTION("ST Microelectronics SPEAr320 pinctrl driver");
3467MODULE_LICENSE("GPL v2"); 3467MODULE_LICENSE("GPL v2");
3468MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match); 3468MODULE_DEVICE_TABLE(of, spear320_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 91c883bc46a6..0242378f7cb8 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -2,7 +2,7 @@
2 * Driver for the ST Microelectronics SPEAr3xx pinmux 2 * Driver for the ST Microelectronics SPEAr3xx pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.h b/drivers/pinctrl/spear/pinctrl-spear3xx.h
index 5d5fdd8df7b8..31f44347f17c 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.h
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.h
@@ -2,7 +2,7 @@
2 * Header file for the ST Microelectronics SPEAr3xx pinmux 2 * Header file for the ST Microelectronics SPEAr3xx pinmux
3 * 3 *
4 * Copyright (C) 2012 ST Microelectronics 4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com> 5 * Viresh Kumar <viresh.linux@gmail.com>
6 * 6 *
7 * This file is licensed under the terms of the GNU General Public 7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any 8 * License version 2. This program is licensed "as is" without any
diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c
index c1a3fd8e1243..ce875dc365e5 100644
--- a/drivers/platform/x86/acer-wmi.c
+++ b/drivers/platform/x86/acer-wmi.c
@@ -523,6 +523,30 @@ static const struct dmi_system_id video_vendor_dmi_table[] = {
523 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"), 523 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4750"),
524 }, 524 },
525 }, 525 },
526 {
527 .callback = video_set_backlight_video_vendor,
528 .ident = "Acer Extensa 5235",
529 .matches = {
530 DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
531 DMI_MATCH(DMI_PRODUCT_NAME, "Extensa 5235"),
532 },
533 },
534 {
535 .callback = video_set_backlight_video_vendor,
536 .ident = "Acer TravelMate 5760",
537 .matches = {
538 DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
539 DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 5760"),
540 },
541 },
542 {
543 .callback = video_set_backlight_video_vendor,
544 .ident = "Acer Aspire 5750",
545 .matches = {
546 DMI_MATCH(DMI_BOARD_VENDOR, "Acer"),
547 DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5750"),
548 },
549 },
526 {} 550 {}
527}; 551};
528 552
diff --git a/drivers/platform/x86/acerhdf.c b/drivers/platform/x86/acerhdf.c
index 639db4d0aa76..2fd9d36acd15 100644
--- a/drivers/platform/x86/acerhdf.c
+++ b/drivers/platform/x86/acerhdf.c
@@ -5,7 +5,7 @@
5 * 5 *
6 * (C) 2009 - Peter Feuerer peter (a) piie.net 6 * (C) 2009 - Peter Feuerer peter (a) piie.net
7 * http://piie.net 7 * http://piie.net
8 * 2009 Borislav Petkov <petkovbb@gmail.com> 8 * 2009 Borislav Petkov bp (a) alien8.de
9 * 9 *
10 * Inspired by and many thanks to: 10 * Inspired by and many thanks to:
11 * o acerfand - Rachel Greenham 11 * o acerfand - Rachel Greenham
diff --git a/drivers/platform/x86/apple-gmux.c b/drivers/platform/x86/apple-gmux.c
index 8a582bdfdc76..694a15a56230 100644
--- a/drivers/platform/x86/apple-gmux.c
+++ b/drivers/platform/x86/apple-gmux.c
@@ -87,6 +87,9 @@ static int gmux_update_status(struct backlight_device *bd)
87 struct apple_gmux_data *gmux_data = bl_get_data(bd); 87 struct apple_gmux_data *gmux_data = bl_get_data(bd);
88 u32 brightness = bd->props.brightness; 88 u32 brightness = bd->props.brightness;
89 89
90 if (bd->props.state & BL_CORE_SUSPENDED)
91 return 0;
92
90 /* 93 /*
91 * Older gmux versions require writing out lower bytes first then 94 * Older gmux versions require writing out lower bytes first then
92 * setting the upper byte to 0 to flush the values. Newer versions 95 * setting the upper byte to 0 to flush the values. Newer versions
@@ -102,6 +105,7 @@ static int gmux_update_status(struct backlight_device *bd)
102} 105}
103 106
104static const struct backlight_ops gmux_bl_ops = { 107static const struct backlight_ops gmux_bl_ops = {
108 .options = BL_CORE_SUSPENDRESUME,
105 .get_brightness = gmux_get_brightness, 109 .get_brightness = gmux_get_brightness,
106 .update_status = gmux_update_status, 110 .update_status = gmux_update_status,
107}; 111};
diff --git a/drivers/platform/x86/dell-laptop.c b/drivers/platform/x86/dell-laptop.c
index e6c08ee8d46c..5f78aac9b163 100644
--- a/drivers/platform/x86/dell-laptop.c
+++ b/drivers/platform/x86/dell-laptop.c
@@ -21,7 +21,6 @@
21#include <linux/err.h> 21#include <linux/err.h>
22#include <linux/dmi.h> 22#include <linux/dmi.h>
23#include <linux/io.h> 23#include <linux/io.h>
24#include <linux/rfkill.h>
25#include <linux/power_supply.h> 24#include <linux/power_supply.h>
26#include <linux/acpi.h> 25#include <linux/acpi.h>
27#include <linux/mm.h> 26#include <linux/mm.h>
@@ -90,11 +89,8 @@ static struct platform_driver platform_driver = {
90 89
91static struct platform_device *platform_device; 90static struct platform_device *platform_device;
92static struct backlight_device *dell_backlight_device; 91static struct backlight_device *dell_backlight_device;
93static struct rfkill *wifi_rfkill;
94static struct rfkill *bluetooth_rfkill;
95static struct rfkill *wwan_rfkill;
96 92
97static const struct dmi_system_id __initdata dell_device_table[] = { 93static const struct dmi_system_id dell_device_table[] __initconst = {
98 { 94 {
99 .ident = "Dell laptop", 95 .ident = "Dell laptop",
100 .matches = { 96 .matches = {
@@ -119,96 +115,94 @@ static const struct dmi_system_id __initdata dell_device_table[] = {
119}; 115};
120MODULE_DEVICE_TABLE(dmi, dell_device_table); 116MODULE_DEVICE_TABLE(dmi, dell_device_table);
121 117
122static struct dmi_system_id __devinitdata dell_blacklist[] = { 118static struct dmi_system_id __devinitdata dell_quirks[] = {
123 /* Supported by compal-laptop */
124 {
125 .ident = "Dell Mini 9",
126 .matches = {
127 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
128 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 910"),
129 },
130 },
131 { 119 {
132 .ident = "Dell Mini 10", 120 .callback = dmi_matched,
121 .ident = "Dell Vostro V130",
133 .matches = { 122 .matches = {
134 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 123 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
135 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1010"), 124 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"),
136 }, 125 },
126 .driver_data = &quirk_dell_vostro_v130,
137 }, 127 },
138 { 128 {
139 .ident = "Dell Mini 10v", 129 .callback = dmi_matched,
130 .ident = "Dell Vostro V131",
140 .matches = { 131 .matches = {
141 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 132 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
142 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1011"), 133 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"),
143 }, 134 },
135 .driver_data = &quirk_dell_vostro_v130,
144 }, 136 },
145 { 137 {
146 .ident = "Dell Mini 1012", 138 .callback = dmi_matched,
139 .ident = "Dell Vostro 3350",
147 .matches = { 140 .matches = {
148 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 141 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
149 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1012"), 142 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3350"),
150 }, 143 },
144 .driver_data = &quirk_dell_vostro_v130,
151 }, 145 },
152 { 146 {
153 .ident = "Dell Inspiron 11z", 147 .callback = dmi_matched,
148 .ident = "Dell Vostro 3555",
154 .matches = { 149 .matches = {
155 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 150 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
156 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1110"), 151 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"),
157 }, 152 },
153 .driver_data = &quirk_dell_vostro_v130,
158 }, 154 },
159 { 155 {
160 .ident = "Dell Mini 12", 156 .callback = dmi_matched,
157 .ident = "Dell Inspiron N311z",
161 .matches = { 158 .matches = {
162 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 159 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
163 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1210"), 160 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"),
164 }, 161 },
162 .driver_data = &quirk_dell_vostro_v130,
165 }, 163 },
166 {}
167};
168
169static struct dmi_system_id __devinitdata dell_quirks[] = {
170 { 164 {
171 .callback = dmi_matched, 165 .callback = dmi_matched,
172 .ident = "Dell Vostro V130", 166 .ident = "Dell Inspiron M5110",
173 .matches = { 167 .matches = {
174 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 168 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
175 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V130"), 169 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"),
176 }, 170 },
177 .driver_data = &quirk_dell_vostro_v130, 171 .driver_data = &quirk_dell_vostro_v130,
178 }, 172 },
179 { 173 {
180 .callback = dmi_matched, 174 .callback = dmi_matched,
181 .ident = "Dell Vostro V131", 175 .ident = "Dell Vostro 3360",
182 .matches = { 176 .matches = {
183 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 177 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
184 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro V131"), 178 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
185 }, 179 },
186 .driver_data = &quirk_dell_vostro_v130, 180 .driver_data = &quirk_dell_vostro_v130,
187 }, 181 },
188 { 182 {
189 .callback = dmi_matched, 183 .callback = dmi_matched,
190 .ident = "Dell Vostro 3555", 184 .ident = "Dell Vostro 3460",
191 .matches = { 185 .matches = {
192 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 186 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
193 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3555"), 187 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3460"),
194 }, 188 },
195 .driver_data = &quirk_dell_vostro_v130, 189 .driver_data = &quirk_dell_vostro_v130,
196 }, 190 },
197 { 191 {
198 .callback = dmi_matched, 192 .callback = dmi_matched,
199 .ident = "Dell Inspiron N311z", 193 .ident = "Dell Vostro 3560",
200 .matches = { 194 .matches = {
201 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 195 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
202 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron N311z"), 196 DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 3560"),
203 }, 197 },
204 .driver_data = &quirk_dell_vostro_v130, 198 .driver_data = &quirk_dell_vostro_v130,
205 }, 199 },
206 { 200 {
207 .callback = dmi_matched, 201 .callback = dmi_matched,
208 .ident = "Dell Inspiron M5110", 202 .ident = "Dell Vostro 3450",
209 .matches = { 203 .matches = {
210 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 204 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
211 DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron M5110"), 205 DMI_MATCH(DMI_PRODUCT_NAME, "Dell System Vostro 3450"),
212 }, 206 },
213 .driver_data = &quirk_dell_vostro_v130, 207 .driver_data = &quirk_dell_vostro_v130,
214 }, 208 },
@@ -305,94 +299,6 @@ dell_send_request(struct calling_interface_buffer *buffer, int class,
305 return buffer; 299 return buffer;
306} 300}
307 301
308/* Derived from information in DellWirelessCtl.cpp:
309 Class 17, select 11 is radio control. It returns an array of 32-bit values.
310
311 Input byte 0 = 0: Wireless information
312
313 result[0]: return code
314 result[1]:
315 Bit 0: Hardware switch supported
316 Bit 1: Wifi locator supported
317 Bit 2: Wifi is supported
318 Bit 3: Bluetooth is supported
319 Bit 4: WWAN is supported
320 Bit 5: Wireless keyboard supported
321 Bits 6-7: Reserved
322 Bit 8: Wifi is installed
323 Bit 9: Bluetooth is installed
324 Bit 10: WWAN is installed
325 Bits 11-15: Reserved
326 Bit 16: Hardware switch is on
327 Bit 17: Wifi is blocked
328 Bit 18: Bluetooth is blocked
329 Bit 19: WWAN is blocked
330 Bits 20-31: Reserved
331 result[2]: NVRAM size in bytes
332 result[3]: NVRAM format version number
333
334 Input byte 0 = 2: Wireless switch configuration
335 result[0]: return code
336 result[1]:
337 Bit 0: Wifi controlled by switch
338 Bit 1: Bluetooth controlled by switch
339 Bit 2: WWAN controlled by switch
340 Bits 3-6: Reserved
341 Bit 7: Wireless switch config locked
342 Bit 8: Wifi locator enabled
343 Bits 9-14: Reserved
344 Bit 15: Wifi locator setting locked
345 Bits 16-31: Reserved
346*/
347
348static int dell_rfkill_set(void *data, bool blocked)
349{
350 int disable = blocked ? 1 : 0;
351 unsigned long radio = (unsigned long)data;
352 int hwswitch_bit = (unsigned long)data - 1;
353 int ret = 0;
354
355 get_buffer();
356 dell_send_request(buffer, 17, 11);
357
358 /* If the hardware switch controls this radio, and the hardware
359 switch is disabled, don't allow changing the software state */
360 if ((hwswitch_state & BIT(hwswitch_bit)) &&
361 !(buffer->output[1] & BIT(16))) {
362 ret = -EINVAL;
363 goto out;
364 }
365
366 buffer->input[0] = (1 | (radio<<8) | (disable << 16));
367 dell_send_request(buffer, 17, 11);
368
369out:
370 release_buffer();
371 return ret;
372}
373
374static void dell_rfkill_query(struct rfkill *rfkill, void *data)
375{
376 int status;
377 int bit = (unsigned long)data + 16;
378 int hwswitch_bit = (unsigned long)data - 1;
379
380 get_buffer();
381 dell_send_request(buffer, 17, 11);
382 status = buffer->output[1];
383 release_buffer();
384
385 rfkill_set_sw_state(rfkill, !!(status & BIT(bit)));
386
387 if (hwswitch_state & (BIT(hwswitch_bit)))
388 rfkill_set_hw_state(rfkill, !(status & BIT(16)));
389}
390
391static const struct rfkill_ops dell_rfkill_ops = {
392 .set_block = dell_rfkill_set,
393 .query = dell_rfkill_query,
394};
395
396static struct dentry *dell_laptop_dir; 302static struct dentry *dell_laptop_dir;
397 303
398static int dell_debugfs_show(struct seq_file *s, void *data) 304static int dell_debugfs_show(struct seq_file *s, void *data)
@@ -462,108 +368,6 @@ static const struct file_operations dell_debugfs_fops = {
462 .release = single_release, 368 .release = single_release,
463}; 369};
464 370
465static void dell_update_rfkill(struct work_struct *ignored)
466{
467 if (wifi_rfkill)
468 dell_rfkill_query(wifi_rfkill, (void *)1);
469 if (bluetooth_rfkill)
470 dell_rfkill_query(bluetooth_rfkill, (void *)2);
471 if (wwan_rfkill)
472 dell_rfkill_query(wwan_rfkill, (void *)3);
473}
474static DECLARE_DELAYED_WORK(dell_rfkill_work, dell_update_rfkill);
475
476
477static int __init dell_setup_rfkill(void)
478{
479 int status;
480 int ret;
481
482 if (dmi_check_system(dell_blacklist)) {
483 pr_info("Blacklisted hardware detected - not enabling rfkill\n");
484 return 0;
485 }
486
487 get_buffer();
488 dell_send_request(buffer, 17, 11);
489 status = buffer->output[1];
490 buffer->input[0] = 0x2;
491 dell_send_request(buffer, 17, 11);
492 hwswitch_state = buffer->output[1];
493 release_buffer();
494
495 if ((status & (1<<2|1<<8)) == (1<<2|1<<8)) {
496 wifi_rfkill = rfkill_alloc("dell-wifi", &platform_device->dev,
497 RFKILL_TYPE_WLAN,
498 &dell_rfkill_ops, (void *) 1);
499 if (!wifi_rfkill) {
500 ret = -ENOMEM;
501 goto err_wifi;
502 }
503 ret = rfkill_register(wifi_rfkill);
504 if (ret)
505 goto err_wifi;
506 }
507
508 if ((status & (1<<3|1<<9)) == (1<<3|1<<9)) {
509 bluetooth_rfkill = rfkill_alloc("dell-bluetooth",
510 &platform_device->dev,
511 RFKILL_TYPE_BLUETOOTH,
512 &dell_rfkill_ops, (void *) 2);
513 if (!bluetooth_rfkill) {
514 ret = -ENOMEM;
515 goto err_bluetooth;
516 }
517 ret = rfkill_register(bluetooth_rfkill);
518 if (ret)
519 goto err_bluetooth;
520 }
521
522 if ((status & (1<<4|1<<10)) == (1<<4|1<<10)) {
523 wwan_rfkill = rfkill_alloc("dell-wwan",
524 &platform_device->dev,
525 RFKILL_TYPE_WWAN,
526 &dell_rfkill_ops, (void *) 3);
527 if (!wwan_rfkill) {
528 ret = -ENOMEM;
529 goto err_wwan;
530 }
531 ret = rfkill_register(wwan_rfkill);
532 if (ret)
533 goto err_wwan;
534 }
535
536 return 0;
537err_wwan:
538 rfkill_destroy(wwan_rfkill);
539 if (bluetooth_rfkill)
540 rfkill_unregister(bluetooth_rfkill);
541err_bluetooth:
542 rfkill_destroy(bluetooth_rfkill);
543 if (wifi_rfkill)
544 rfkill_unregister(wifi_rfkill);
545err_wifi:
546 rfkill_destroy(wifi_rfkill);
547
548 return ret;
549}
550
551static void dell_cleanup_rfkill(void)
552{
553 if (wifi_rfkill) {
554 rfkill_unregister(wifi_rfkill);
555 rfkill_destroy(wifi_rfkill);
556 }
557 if (bluetooth_rfkill) {
558 rfkill_unregister(bluetooth_rfkill);
559 rfkill_destroy(bluetooth_rfkill);
560 }
561 if (wwan_rfkill) {
562 rfkill_unregister(wwan_rfkill);
563 rfkill_destroy(wwan_rfkill);
564 }
565}
566
567static int dell_send_intensity(struct backlight_device *bd) 371static int dell_send_intensity(struct backlight_device *bd)
568{ 372{
569 int ret = 0; 373 int ret = 0;
@@ -655,30 +459,6 @@ static void touchpad_led_exit(void)
655 led_classdev_unregister(&touchpad_led); 459 led_classdev_unregister(&touchpad_led);
656} 460}
657 461
658static bool dell_laptop_i8042_filter(unsigned char data, unsigned char str,
659 struct serio *port)
660{
661 static bool extended;
662
663 if (str & 0x20)
664 return false;
665
666 if (unlikely(data == 0xe0)) {
667 extended = true;
668 return false;
669 } else if (unlikely(extended)) {
670 switch (data) {
671 case 0x8:
672 schedule_delayed_work(&dell_rfkill_work,
673 round_jiffies_relative(HZ));
674 break;
675 }
676 extended = false;
677 }
678
679 return false;
680}
681
682static int __init dell_init(void) 462static int __init dell_init(void)
683{ 463{
684 int max_intensity = 0; 464 int max_intensity = 0;
@@ -720,26 +500,10 @@ static int __init dell_init(void)
720 goto fail_buffer; 500 goto fail_buffer;
721 buffer = page_address(bufferpage); 501 buffer = page_address(bufferpage);
722 502
723 ret = dell_setup_rfkill();
724
725 if (ret) {
726 pr_warn("Unable to setup rfkill\n");
727 goto fail_rfkill;
728 }
729
730 ret = i8042_install_filter(dell_laptop_i8042_filter);
731 if (ret) {
732 pr_warn("Unable to install key filter\n");
733 goto fail_filter;
734 }
735
736 if (quirks && quirks->touchpad_led) 503 if (quirks && quirks->touchpad_led)
737 touchpad_led_init(&platform_device->dev); 504 touchpad_led_init(&platform_device->dev);
738 505
739 dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL); 506 dell_laptop_dir = debugfs_create_dir("dell_laptop", NULL);
740 if (dell_laptop_dir != NULL)
741 debugfs_create_file("rfkill", 0444, dell_laptop_dir, NULL,
742 &dell_debugfs_fops);
743 507
744#ifdef CONFIG_ACPI 508#ifdef CONFIG_ACPI
745 /* In the event of an ACPI backlight being available, don't 509 /* In the event of an ACPI backlight being available, don't
@@ -782,11 +546,6 @@ static int __init dell_init(void)
782 return 0; 546 return 0;
783 547
784fail_backlight: 548fail_backlight:
785 i8042_remove_filter(dell_laptop_i8042_filter);
786 cancel_delayed_work_sync(&dell_rfkill_work);
787fail_filter:
788 dell_cleanup_rfkill();
789fail_rfkill:
790 free_page((unsigned long)bufferpage); 549 free_page((unsigned long)bufferpage);
791fail_buffer: 550fail_buffer:
792 platform_device_del(platform_device); 551 platform_device_del(platform_device);
@@ -804,10 +563,7 @@ static void __exit dell_exit(void)
804 debugfs_remove_recursive(dell_laptop_dir); 563 debugfs_remove_recursive(dell_laptop_dir);
805 if (quirks && quirks->touchpad_led) 564 if (quirks && quirks->touchpad_led)
806 touchpad_led_exit(); 565 touchpad_led_exit();
807 i8042_remove_filter(dell_laptop_i8042_filter);
808 cancel_delayed_work_sync(&dell_rfkill_work);
809 backlight_device_unregister(dell_backlight_device); 566 backlight_device_unregister(dell_backlight_device);
810 dell_cleanup_rfkill();
811 if (platform_device) { 567 if (platform_device) {
812 platform_device_unregister(platform_device); 568 platform_device_unregister(platform_device);
813 platform_driver_unregister(&platform_driver); 569 platform_driver_unregister(&platform_driver);
diff --git a/drivers/platform/x86/fujitsu-tablet.c b/drivers/platform/x86/fujitsu-tablet.c
index 580d80a73c3a..da267eae8ba8 100644
--- a/drivers/platform/x86/fujitsu-tablet.c
+++ b/drivers/platform/x86/fujitsu-tablet.c
@@ -16,6 +16,8 @@
16 * 59 Temple Place Suite 330, Boston, MA 02111-1307, USA. 16 * 59 Temple Place Suite 330, Boston, MA 02111-1307, USA.
17 */ 17 */
18 18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
19#include <linux/kernel.h> 21#include <linux/kernel.h>
20#include <linux/module.h> 22#include <linux/module.h>
21#include <linux/init.h> 23#include <linux/init.h>
@@ -34,7 +36,8 @@
34#define ACPI_FUJITSU_CLASS "fujitsu" 36#define ACPI_FUJITSU_CLASS "fujitsu"
35 37
36#define INVERT_TABLET_MODE_BIT 0x01 38#define INVERT_TABLET_MODE_BIT 0x01
37#define FORCE_TABLET_MODE_IF_UNDOCK 0x02 39#define INVERT_DOCK_STATE_BIT 0x02
40#define FORCE_TABLET_MODE_IF_UNDOCK 0x04
38 41
39#define KEYMAP_LEN 16 42#define KEYMAP_LEN 16
40 43
@@ -161,6 +164,8 @@ static void fujitsu_send_state(void)
161 state = fujitsu_read_register(0xdd); 164 state = fujitsu_read_register(0xdd);
162 165
163 dock = state & 0x02; 166 dock = state & 0x02;
167 if (fujitsu.config.quirks & INVERT_DOCK_STATE_BIT)
168 dock = !dock;
164 169
165 if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) { 170 if ((fujitsu.config.quirks & FORCE_TABLET_MODE_IF_UNDOCK) && (!dock)) {
166 tablet_mode = 1; 171 tablet_mode = 1;
@@ -221,9 +226,6 @@ static int __devinit input_fujitsu_setup(struct device *parent,
221 input_set_capability(idev, EV_SW, SW_DOCK); 226 input_set_capability(idev, EV_SW, SW_DOCK);
222 input_set_capability(idev, EV_SW, SW_TABLET_MODE); 227 input_set_capability(idev, EV_SW, SW_TABLET_MODE);
223 228
224 input_set_capability(idev, EV_SW, SW_DOCK);
225 input_set_capability(idev, EV_SW, SW_TABLET_MODE);
226
227 error = input_register_device(idev); 229 error = input_register_device(idev);
228 if (error) { 230 if (error) {
229 input_free_device(idev); 231 input_free_device(idev);
@@ -275,25 +277,31 @@ static irqreturn_t fujitsu_interrupt(int irq, void *dev_id)
275 return IRQ_HANDLED; 277 return IRQ_HANDLED;
276} 278}
277 279
278static int __devinit fujitsu_dmi_default(const struct dmi_system_id *dmi) 280static void __devinit fujitsu_dmi_common(const struct dmi_system_id *dmi)
279{ 281{
280 printk(KERN_INFO MODULENAME ": %s\n", dmi->ident); 282 pr_info("%s\n", dmi->ident);
281 memcpy(fujitsu.config.keymap, dmi->driver_data, 283 memcpy(fujitsu.config.keymap, dmi->driver_data,
282 sizeof(fujitsu.config.keymap)); 284 sizeof(fujitsu.config.keymap));
285}
286
287static int __devinit fujitsu_dmi_lifebook(const struct dmi_system_id *dmi)
288{
289 fujitsu_dmi_common(dmi);
290 fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT;
283 return 1; 291 return 1;
284} 292}
285 293
286static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi) 294static int __devinit fujitsu_dmi_stylistic(const struct dmi_system_id *dmi)
287{ 295{
288 fujitsu_dmi_default(dmi); 296 fujitsu_dmi_common(dmi);
289 fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK; 297 fujitsu.config.quirks |= FORCE_TABLET_MODE_IF_UNDOCK;
290 fujitsu.config.quirks |= INVERT_TABLET_MODE_BIT; 298 fujitsu.config.quirks |= INVERT_DOCK_STATE_BIT;
291 return 1; 299 return 1;
292} 300}
293 301
294static struct dmi_system_id dmi_ids[] __initconst = { 302static struct dmi_system_id dmi_ids[] __initconst = {
295 { 303 {
296 .callback = fujitsu_dmi_default, 304 .callback = fujitsu_dmi_lifebook,
297 .ident = "Fujitsu Siemens P/T Series", 305 .ident = "Fujitsu Siemens P/T Series",
298 .matches = { 306 .matches = {
299 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 307 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -302,7 +310,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
302 .driver_data = keymap_Lifebook_Tseries 310 .driver_data = keymap_Lifebook_Tseries
303 }, 311 },
304 { 312 {
305 .callback = fujitsu_dmi_default, 313 .callback = fujitsu_dmi_lifebook,
306 .ident = "Fujitsu Lifebook T Series", 314 .ident = "Fujitsu Lifebook T Series",
307 .matches = { 315 .matches = {
308 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 316 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -320,7 +328,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
320 .driver_data = keymap_Stylistic_Tseries 328 .driver_data = keymap_Stylistic_Tseries
321 }, 329 },
322 { 330 {
323 .callback = fujitsu_dmi_default, 331 .callback = fujitsu_dmi_lifebook,
324 .ident = "Fujitsu LifeBook U810", 332 .ident = "Fujitsu LifeBook U810",
325 .matches = { 333 .matches = {
326 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), 334 DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"),
@@ -347,7 +355,7 @@ static struct dmi_system_id dmi_ids[] __initconst = {
347 .driver_data = keymap_Stylistic_ST5xxx 355 .driver_data = keymap_Stylistic_ST5xxx
348 }, 356 },
349 { 357 {
350 .callback = fujitsu_dmi_default, 358 .callback = fujitsu_dmi_lifebook,
351 .ident = "Unknown (using defaults)", 359 .ident = "Unknown (using defaults)",
352 .matches = { 360 .matches = {
353 DMI_MATCH(DMI_SYS_VENDOR, ""), 361 DMI_MATCH(DMI_SYS_VENDOR, ""),
@@ -473,6 +481,6 @@ module_exit(fujitsu_module_exit);
473MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>"); 481MODULE_AUTHOR("Robert Gerlach <khnz@gmx.de>");
474MODULE_DESCRIPTION("Fujitsu tablet pc extras driver"); 482MODULE_DESCRIPTION("Fujitsu tablet pc extras driver");
475MODULE_LICENSE("GPL"); 483MODULE_LICENSE("GPL");
476MODULE_VERSION("2.4"); 484MODULE_VERSION("2.5");
477 485
478MODULE_DEVICE_TABLE(acpi, fujitsu_ids); 486MODULE_DEVICE_TABLE(acpi, fujitsu_ids);
diff --git a/drivers/platform/x86/hdaps.c b/drivers/platform/x86/hdaps.c
index 7387f97a2941..24a3ae065f1b 100644
--- a/drivers/platform/x86/hdaps.c
+++ b/drivers/platform/x86/hdaps.c
@@ -2,7 +2,7 @@
2 * hdaps.c - driver for IBM's Hard Drive Active Protection System 2 * hdaps.c - driver for IBM's Hard Drive Active Protection System
3 * 3 *
4 * Copyright (C) 2005 Robert Love <rml@novell.com> 4 * Copyright (C) 2005 Robert Love <rml@novell.com>
5 * Copyright (C) 2005 Jesper Juhl <jesper.juhl@gmail.com> 5 * Copyright (C) 2005 Jesper Juhl <jj@chaosbits.net>
6 * 6 *
7 * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads 7 * The HardDisk Active Protection System (hdaps) is present in IBM ThinkPads
8 * starting with the R40, T41, and X40. It provides a basic two-axis 8 * starting with the R40, T41, and X40. It provides a basic two-axis
diff --git a/drivers/platform/x86/hp-wmi.c b/drivers/platform/x86/hp-wmi.c
index e2faa3cbb792..387183a2d6dd 100644
--- a/drivers/platform/x86/hp-wmi.c
+++ b/drivers/platform/x86/hp-wmi.c
@@ -634,6 +634,8 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
634 RFKILL_TYPE_WLAN, 634 RFKILL_TYPE_WLAN,
635 &hp_wmi_rfkill_ops, 635 &hp_wmi_rfkill_ops,
636 (void *) HPWMI_WIFI); 636 (void *) HPWMI_WIFI);
637 if (!wifi_rfkill)
638 return -ENOMEM;
637 rfkill_init_sw_state(wifi_rfkill, 639 rfkill_init_sw_state(wifi_rfkill,
638 hp_wmi_get_sw_state(HPWMI_WIFI)); 640 hp_wmi_get_sw_state(HPWMI_WIFI));
639 rfkill_set_hw_state(wifi_rfkill, 641 rfkill_set_hw_state(wifi_rfkill,
@@ -648,6 +650,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
648 RFKILL_TYPE_BLUETOOTH, 650 RFKILL_TYPE_BLUETOOTH,
649 &hp_wmi_rfkill_ops, 651 &hp_wmi_rfkill_ops,
650 (void *) HPWMI_BLUETOOTH); 652 (void *) HPWMI_BLUETOOTH);
653 if (!bluetooth_rfkill) {
654 err = -ENOMEM;
655 goto register_wifi_error;
656 }
651 rfkill_init_sw_state(bluetooth_rfkill, 657 rfkill_init_sw_state(bluetooth_rfkill,
652 hp_wmi_get_sw_state(HPWMI_BLUETOOTH)); 658 hp_wmi_get_sw_state(HPWMI_BLUETOOTH));
653 rfkill_set_hw_state(bluetooth_rfkill, 659 rfkill_set_hw_state(bluetooth_rfkill,
@@ -662,6 +668,10 @@ static int __devinit hp_wmi_rfkill_setup(struct platform_device *device)
662 RFKILL_TYPE_WWAN, 668 RFKILL_TYPE_WWAN,
663 &hp_wmi_rfkill_ops, 669 &hp_wmi_rfkill_ops,
664 (void *) HPWMI_WWAN); 670 (void *) HPWMI_WWAN);
671 if (!wwan_rfkill) {
672 err = -ENOMEM;
673 goto register_bluetooth_error;
674 }
665 rfkill_init_sw_state(wwan_rfkill, 675 rfkill_init_sw_state(wwan_rfkill,
666 hp_wmi_get_sw_state(HPWMI_WWAN)); 676 hp_wmi_get_sw_state(HPWMI_WWAN));
667 rfkill_set_hw_state(wwan_rfkill, 677 rfkill_set_hw_state(wwan_rfkill,
diff --git a/drivers/platform/x86/ideapad-laptop.c b/drivers/platform/x86/ideapad-laptop.c
index ac902f7a9baa..4f20f8dd3d7c 100644
--- a/drivers/platform/x86/ideapad-laptop.c
+++ b/drivers/platform/x86/ideapad-laptop.c
@@ -194,7 +194,6 @@ static int write_ec_cmd(acpi_handle handle, int cmd, unsigned long data)
194/* 194/*
195 * debugfs 195 * debugfs
196 */ 196 */
197#define DEBUGFS_EVENT_LEN (4096)
198static int debugfs_status_show(struct seq_file *s, void *data) 197static int debugfs_status_show(struct seq_file *s, void *data)
199{ 198{
200 unsigned long value; 199 unsigned long value;
@@ -315,7 +314,7 @@ static int __devinit ideapad_debugfs_init(struct ideapad_private *priv)
315 node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL, 314 node = debugfs_create_file("status", S_IRUGO, priv->debug, NULL,
316 &debugfs_status_fops); 315 &debugfs_status_fops);
317 if (!node) { 316 if (!node) {
318 pr_err("failed to create event in debugfs"); 317 pr_err("failed to create status in debugfs");
319 goto errout; 318 goto errout;
320 } 319 }
321 320
@@ -785,6 +784,10 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
785 case 9: 784 case 9:
786 ideapad_sync_rfk_state(priv); 785 ideapad_sync_rfk_state(priv);
787 break; 786 break;
787 case 13:
788 case 6:
789 ideapad_input_report(priv, vpc_bit);
790 break;
788 case 4: 791 case 4:
789 ideapad_backlight_notify_brightness(priv); 792 ideapad_backlight_notify_brightness(priv);
790 break; 793 break;
@@ -795,7 +798,7 @@ static void ideapad_acpi_notify(struct acpi_device *adevice, u32 event)
795 ideapad_backlight_notify_power(priv); 798 ideapad_backlight_notify_power(priv);
796 break; 799 break;
797 default: 800 default:
798 ideapad_input_report(priv, vpc_bit); 801 pr_info("Unknown event: %lu\n", vpc_bit);
799 } 802 }
800 } 803 }
801 } 804 }
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index 8a51795aa02a..210d4ae547c2 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -141,6 +141,27 @@ MODULE_PARM_DESC(kbd_backlight_timeout,
141 "(default: 0)"); 141 "(default: 0)");
142 142
143static void sony_nc_kbd_backlight_resume(void); 143static void sony_nc_kbd_backlight_resume(void);
144static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
145 unsigned int handle);
146static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd);
147
148static int sony_nc_battery_care_setup(struct platform_device *pd,
149 unsigned int handle);
150static void sony_nc_battery_care_cleanup(struct platform_device *pd);
151
152static int sony_nc_thermal_setup(struct platform_device *pd);
153static void sony_nc_thermal_cleanup(struct platform_device *pd);
154static void sony_nc_thermal_resume(void);
155
156static int sony_nc_lid_resume_setup(struct platform_device *pd);
157static void sony_nc_lid_resume_cleanup(struct platform_device *pd);
158
159static int sony_nc_highspeed_charging_setup(struct platform_device *pd);
160static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd);
161
162static int sony_nc_touchpad_setup(struct platform_device *pd,
163 unsigned int handle);
164static void sony_nc_touchpad_cleanup(struct platform_device *pd);
144 165
145enum sony_nc_rfkill { 166enum sony_nc_rfkill {
146 SONY_WIFI, 167 SONY_WIFI,
@@ -153,6 +174,9 @@ enum sony_nc_rfkill {
153static int sony_rfkill_handle; 174static int sony_rfkill_handle;
154static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL]; 175static struct rfkill *sony_rfkill_devices[N_SONY_RFKILL];
155static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900}; 176static int sony_rfkill_address[N_SONY_RFKILL] = {0x300, 0x500, 0x700, 0x900};
177static int sony_nc_rfkill_setup(struct acpi_device *device,
178 unsigned int handle);
179static void sony_nc_rfkill_cleanup(void);
156static void sony_nc_rfkill_update(void); 180static void sony_nc_rfkill_update(void);
157 181
158/*********** Input Devices ***********/ 182/*********** Input Devices ***********/
@@ -691,59 +715,97 @@ static struct acpi_device *sony_nc_acpi_device = NULL;
691 715
692/* 716/*
693 * acpi_evaluate_object wrappers 717 * acpi_evaluate_object wrappers
718 * all useful calls into SNC methods take one or zero parameters and return
719 * integers or arrays.
694 */ 720 */
695static int acpi_callgetfunc(acpi_handle handle, char *name, int *result) 721static union acpi_object *__call_snc_method(acpi_handle handle, char *method,
722 u64 *value)
696{ 723{
697 struct acpi_buffer output; 724 union acpi_object *result = NULL;
698 union acpi_object out_obj; 725 struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL };
699 acpi_status status; 726 acpi_status status;
700 727
701 output.length = sizeof(out_obj); 728 if (value) {
702 output.pointer = &out_obj; 729 struct acpi_object_list params;
730 union acpi_object in;
731 in.type = ACPI_TYPE_INTEGER;
732 in.integer.value = *value;
733 params.count = 1;
734 params.pointer = &in;
735 status = acpi_evaluate_object(handle, method, &params, &output);
736 dprintk("__call_snc_method: [%s:0x%.8x%.8x]\n", method,
737 (unsigned int)(*value >> 32),
738 (unsigned int)*value & 0xffffffff);
739 } else {
740 status = acpi_evaluate_object(handle, method, NULL, &output);
741 dprintk("__call_snc_method: [%s]\n", method);
742 }
703 743
704 status = acpi_evaluate_object(handle, name, NULL, &output); 744 if (ACPI_FAILURE(status)) {
705 if ((status == AE_OK) && (out_obj.type == ACPI_TYPE_INTEGER)) { 745 pr_err("Failed to evaluate [%s]\n", method);
706 *result = out_obj.integer.value; 746 return NULL;
707 return 0;
708 } 747 }
709 748
710 pr_warn("acpi_callreadfunc failed\n"); 749 result = (union acpi_object *) output.pointer;
750 if (!result)
751 dprintk("No return object [%s]\n", method);
711 752
712 return -1; 753 return result;
713} 754}
714 755
715static int acpi_callsetfunc(acpi_handle handle, char *name, int value, 756static int sony_nc_int_call(acpi_handle handle, char *name, int *value,
716 int *result) 757 int *result)
717{ 758{
718 struct acpi_object_list params; 759 union acpi_object *object = NULL;
719 union acpi_object in_obj; 760 if (value) {
720 struct acpi_buffer output; 761 u64 v = *value;
721 union acpi_object out_obj; 762 object = __call_snc_method(handle, name, &v);
722 acpi_status status; 763 } else
723 764 object = __call_snc_method(handle, name, NULL);
724 params.count = 1;
725 params.pointer = &in_obj;
726 in_obj.type = ACPI_TYPE_INTEGER;
727 in_obj.integer.value = value;
728 765
729 output.length = sizeof(out_obj); 766 if (!object)
730 output.pointer = &out_obj; 767 return -EINVAL;
731 768
732 status = acpi_evaluate_object(handle, name, &params, &output); 769 if (object->type != ACPI_TYPE_INTEGER) {
733 if (status == AE_OK) { 770 pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
734 if (result != NULL) { 771 ACPI_TYPE_INTEGER, object->type);
735 if (out_obj.type != ACPI_TYPE_INTEGER) { 772 kfree(object);
736 pr_warn("acpi_evaluate_object bad return type\n"); 773 return -EINVAL;
737 return -1;
738 }
739 *result = out_obj.integer.value;
740 }
741 return 0;
742 } 774 }
743 775
744 pr_warn("acpi_evaluate_object failed\n"); 776 if (result)
777 *result = object->integer.value;
778
779 kfree(object);
780 return 0;
781}
782
783#define MIN(a, b) (a > b ? b : a)
784static int sony_nc_buffer_call(acpi_handle handle, char *name, u64 *value,
785 void *buffer, size_t buflen)
786{
787 size_t len = len;
788 union acpi_object *object = __call_snc_method(handle, name, value);
789
790 if (!object)
791 return -EINVAL;
792
793 if (object->type == ACPI_TYPE_BUFFER)
794 len = MIN(buflen, object->buffer.length);
795
796 else if (object->type == ACPI_TYPE_INTEGER)
797 len = MIN(buflen, sizeof(object->integer.value));
798
799 else {
800 pr_warn("Invalid acpi_object: expected 0x%x got 0x%x\n",
801 ACPI_TYPE_BUFFER, object->type);
802 kfree(object);
803 return -EINVAL;
804 }
745 805
746 return -1; 806 memcpy(buffer, object->buffer.pointer, len);
807 kfree(object);
808 return 0;
747} 809}
748 810
749struct sony_nc_handles { 811struct sony_nc_handles {
@@ -770,16 +832,17 @@ static ssize_t sony_nc_handles_show(struct device *dev,
770 832
771static int sony_nc_handles_setup(struct platform_device *pd) 833static int sony_nc_handles_setup(struct platform_device *pd)
772{ 834{
773 int i; 835 int i, r, result, arg;
774 int result;
775 836
776 handles = kzalloc(sizeof(*handles), GFP_KERNEL); 837 handles = kzalloc(sizeof(*handles), GFP_KERNEL);
777 if (!handles) 838 if (!handles)
778 return -ENOMEM; 839 return -ENOMEM;
779 840
780 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) { 841 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
781 if (!acpi_callsetfunc(sony_nc_acpi_handle, 842 arg = i + 0x20;
782 "SN00", i + 0x20, &result)) { 843 r = sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg,
844 &result);
845 if (!r) {
783 dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n", 846 dprintk("caching handle 0x%.4x (offset: 0x%.2x)\n",
784 result, i); 847 result, i);
785 handles->cap[i] = result; 848 handles->cap[i] = result;
@@ -819,8 +882,8 @@ static int sony_find_snc_handle(int handle)
819 int i; 882 int i;
820 883
821 /* not initialized yet, return early */ 884 /* not initialized yet, return early */
822 if (!handles) 885 if (!handles || !handle)
823 return -1; 886 return -EINVAL;
824 887
825 for (i = 0; i < 0x10; i++) { 888 for (i = 0; i < 0x10; i++) {
826 if (handles->cap[i] == handle) { 889 if (handles->cap[i] == handle) {
@@ -830,21 +893,20 @@ static int sony_find_snc_handle(int handle)
830 } 893 }
831 } 894 }
832 dprintk("handle 0x%.4x not found\n", handle); 895 dprintk("handle 0x%.4x not found\n", handle);
833 return -1; 896 return -EINVAL;
834} 897}
835 898
836static int sony_call_snc_handle(int handle, int argument, int *result) 899static int sony_call_snc_handle(int handle, int argument, int *result)
837{ 900{
838 int ret = 0; 901 int arg, ret = 0;
839 int offset = sony_find_snc_handle(handle); 902 int offset = sony_find_snc_handle(handle);
840 903
841 if (offset < 0) 904 if (offset < 0)
842 return -1; 905 return offset;
843 906
844 ret = acpi_callsetfunc(sony_nc_acpi_handle, "SN07", offset | argument, 907 arg = offset | argument;
845 result); 908 ret = sony_nc_int_call(sony_nc_acpi_handle, "SN07", &arg, result);
846 dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", offset | argument, 909 dprintk("called SN07 with 0x%.4x (result: 0x%.4x)\n", arg, *result);
847 *result);
848 return ret; 910 return ret;
849} 911}
850 912
@@ -889,14 +951,16 @@ static int boolean_validate(const int direction, const int value)
889static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr, 951static ssize_t sony_nc_sysfs_show(struct device *dev, struct device_attribute *attr,
890 char *buffer) 952 char *buffer)
891{ 953{
892 int value; 954 int value, ret = 0;
893 struct sony_nc_value *item = 955 struct sony_nc_value *item =
894 container_of(attr, struct sony_nc_value, devattr); 956 container_of(attr, struct sony_nc_value, devattr);
895 957
896 if (!*item->acpiget) 958 if (!*item->acpiget)
897 return -EIO; 959 return -EIO;
898 960
899 if (acpi_callgetfunc(sony_nc_acpi_handle, *item->acpiget, &value) < 0) 961 ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiget, NULL,
962 &value);
963 if (ret < 0)
900 return -EIO; 964 return -EIO;
901 965
902 if (item->validate) 966 if (item->validate)
@@ -909,7 +973,8 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
909 struct device_attribute *attr, 973 struct device_attribute *attr,
910 const char *buffer, size_t count) 974 const char *buffer, size_t count)
911{ 975{
912 int value; 976 unsigned long value = 0;
977 int ret = 0;
913 struct sony_nc_value *item = 978 struct sony_nc_value *item =
914 container_of(attr, struct sony_nc_value, devattr); 979 container_of(attr, struct sony_nc_value, devattr);
915 980
@@ -919,7 +984,8 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
919 if (count > 31) 984 if (count > 31)
920 return -EINVAL; 985 return -EINVAL;
921 986
922 value = simple_strtoul(buffer, NULL, 10); 987 if (kstrtoul(buffer, 10, &value))
988 return -EINVAL;
923 989
924 if (item->validate) 990 if (item->validate)
925 value = item->validate(SNC_VALIDATE_IN, value); 991 value = item->validate(SNC_VALIDATE_IN, value);
@@ -927,8 +993,11 @@ static ssize_t sony_nc_sysfs_store(struct device *dev,
927 if (value < 0) 993 if (value < 0)
928 return value; 994 return value;
929 995
930 if (acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, value, NULL) < 0) 996 ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
997 (int *)&value, NULL);
998 if (ret < 0)
931 return -EIO; 999 return -EIO;
1000
932 item->value = value; 1001 item->value = value;
933 item->valid = 1; 1002 item->valid = 1;
934 return count; 1003 return count;
@@ -948,15 +1017,15 @@ struct sony_backlight_props sony_bl_props;
948 1017
949static int sony_backlight_update_status(struct backlight_device *bd) 1018static int sony_backlight_update_status(struct backlight_device *bd)
950{ 1019{
951 return acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", 1020 int arg = bd->props.brightness + 1;
952 bd->props.brightness + 1, NULL); 1021 return sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &arg, NULL);
953} 1022}
954 1023
955static int sony_backlight_get_brightness(struct backlight_device *bd) 1024static int sony_backlight_get_brightness(struct backlight_device *bd)
956{ 1025{
957 int value; 1026 int value;
958 1027
959 if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) 1028 if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL, &value))
960 return 0; 1029 return 0;
961 /* brightness levels are 1-based, while backlight ones are 0-based */ 1030 /* brightness levels are 1-based, while backlight ones are 0-based */
962 return value - 1; 1031 return value - 1;
@@ -1024,10 +1093,14 @@ static struct sony_nc_event sony_100_events[] = {
1024 { 0x06, SONYPI_EVENT_FNKEY_RELEASED }, 1093 { 0x06, SONYPI_EVENT_FNKEY_RELEASED },
1025 { 0x87, SONYPI_EVENT_FNKEY_F7 }, 1094 { 0x87, SONYPI_EVENT_FNKEY_F7 },
1026 { 0x07, SONYPI_EVENT_FNKEY_RELEASED }, 1095 { 0x07, SONYPI_EVENT_FNKEY_RELEASED },
1096 { 0x88, SONYPI_EVENT_FNKEY_F8 },
1097 { 0x08, SONYPI_EVENT_FNKEY_RELEASED },
1027 { 0x89, SONYPI_EVENT_FNKEY_F9 }, 1098 { 0x89, SONYPI_EVENT_FNKEY_F9 },
1028 { 0x09, SONYPI_EVENT_FNKEY_RELEASED }, 1099 { 0x09, SONYPI_EVENT_FNKEY_RELEASED },
1029 { 0x8A, SONYPI_EVENT_FNKEY_F10 }, 1100 { 0x8A, SONYPI_EVENT_FNKEY_F10 },
1030 { 0x0A, SONYPI_EVENT_FNKEY_RELEASED }, 1101 { 0x0A, SONYPI_EVENT_FNKEY_RELEASED },
1102 { 0x8B, SONYPI_EVENT_FNKEY_F11 },
1103 { 0x0B, SONYPI_EVENT_FNKEY_RELEASED },
1031 { 0x8C, SONYPI_EVENT_FNKEY_F12 }, 1104 { 0x8C, SONYPI_EVENT_FNKEY_F12 },
1032 { 0x0C, SONYPI_EVENT_FNKEY_RELEASED }, 1105 { 0x0C, SONYPI_EVENT_FNKEY_RELEASED },
1033 { 0x9d, SONYPI_EVENT_ZOOM_PRESSED }, 1106 { 0x9d, SONYPI_EVENT_ZOOM_PRESSED },
@@ -1063,63 +1136,116 @@ static struct sony_nc_event sony_127_events[] = {
1063 { 0, 0 }, 1136 { 0, 0 },
1064}; 1137};
1065 1138
1139static int sony_nc_hotkeys_decode(u32 event, unsigned int handle)
1140{
1141 int ret = -EINVAL;
1142 unsigned int result = 0;
1143 struct sony_nc_event *key_event;
1144
1145 if (sony_call_snc_handle(handle, 0x200, &result)) {
1146 dprintk("Unable to decode event 0x%.2x 0x%.2x\n", handle,
1147 event);
1148 return -EINVAL;
1149 }
1150
1151 result &= 0xFF;
1152
1153 if (handle == 0x0100)
1154 key_event = sony_100_events;
1155 else
1156 key_event = sony_127_events;
1157
1158 for (; key_event->data; key_event++) {
1159 if (key_event->data == result) {
1160 ret = key_event->event;
1161 break;
1162 }
1163 }
1164
1165 if (!key_event->data)
1166 pr_info("Unknown hotkey 0x%.2x/0x%.2x (handle 0x%.2x)\n",
1167 event, result, handle);
1168
1169 return ret;
1170}
1171
1066/* 1172/*
1067 * ACPI callbacks 1173 * ACPI callbacks
1068 */ 1174 */
1069static void sony_nc_notify(struct acpi_device *device, u32 event) 1175static void sony_nc_notify(struct acpi_device *device, u32 event)
1070{ 1176{
1071 u32 ev = event; 1177 u32 real_ev = event;
1178 u8 ev_type = 0;
1179 dprintk("sony_nc_notify, event: 0x%.2x\n", event);
1180
1181 if (event >= 0x90) {
1182 unsigned int result = 0;
1183 unsigned int arg = 0;
1184 unsigned int handle = 0;
1185 unsigned int offset = event - 0x90;
1186
1187 if (offset >= ARRAY_SIZE(handles->cap)) {
1188 pr_err("Event 0x%x outside of capabilities list\n",
1189 event);
1190 return;
1191 }
1192 handle = handles->cap[offset];
1193
1194 /* list of handles known for generating events */
1195 switch (handle) {
1196 /* hotkey event */
1197 case 0x0100:
1198 case 0x0127:
1199 ev_type = 1;
1200 real_ev = sony_nc_hotkeys_decode(event, handle);
1201
1202 if (real_ev > 0)
1203 sony_laptop_report_input_event(real_ev);
1204 else
1205 /* restore the original event for reporting */
1206 real_ev = event;
1072 1207
1073 if (ev >= 0x90) { 1208 break;
1074 /* New-style event */
1075 int result;
1076 int key_handle = 0;
1077 ev -= 0x90;
1078
1079 if (sony_find_snc_handle(0x100) == ev)
1080 key_handle = 0x100;
1081 if (sony_find_snc_handle(0x127) == ev)
1082 key_handle = 0x127;
1083
1084 if (key_handle) {
1085 struct sony_nc_event *key_event;
1086
1087 if (sony_call_snc_handle(key_handle, 0x200, &result)) {
1088 dprintk("sony_nc_notify, unable to decode"
1089 " event 0x%.2x 0x%.2x\n", key_handle,
1090 ev);
1091 /* restore the original event */
1092 ev = event;
1093 } else {
1094 ev = result & 0xFF;
1095
1096 if (key_handle == 0x100)
1097 key_event = sony_100_events;
1098 else
1099 key_event = sony_127_events;
1100
1101 for (; key_event->data; key_event++) {
1102 if (key_event->data == ev) {
1103 ev = key_event->event;
1104 break;
1105 }
1106 }
1107 1209
1108 if (!key_event->data) 1210 /* wlan switch */
1109 pr_info("Unknown event: 0x%x 0x%x\n", 1211 case 0x0124:
1110 key_handle, ev); 1212 case 0x0135:
1111 else 1213 /* events on this handle are reported when the
1112 sony_laptop_report_input_event(ev); 1214 * switch changes position or for battery
1113 } 1215 * events. We'll notify both of them but only
1114 } else if (sony_find_snc_handle(sony_rfkill_handle) == ev) { 1216 * update the rfkill device status when the
1115 sony_nc_rfkill_update(); 1217 * switch is moved.
1116 return; 1218 */
1219 ev_type = 2;
1220 sony_call_snc_handle(handle, 0x0100, &result);
1221 real_ev = result & 0x03;
1222
1223 /* hw switch event */
1224 if (real_ev == 1)
1225 sony_nc_rfkill_update();
1226
1227 break;
1228
1229 default:
1230 dprintk("Unknown event 0x%x for handle 0x%x\n",
1231 event, handle);
1232 break;
1117 } 1233 }
1118 } else
1119 sony_laptop_report_input_event(ev);
1120 1234
1121 dprintk("sony_nc_notify, event: 0x%.2x\n", ev); 1235 /* clear the event (and the event reason when present) */
1122 acpi_bus_generate_proc_event(sony_nc_acpi_device, 1, ev); 1236 arg = 1 << offset;
1237 sony_nc_int_call(sony_nc_acpi_handle, "SN05", &arg, &result);
1238
1239 } else {
1240 /* old style event */
1241 ev_type = 1;
1242 sony_laptop_report_input_event(real_ev);
1243 }
1244
1245 acpi_bus_generate_proc_event(sony_nc_acpi_device, ev_type, real_ev);
1246
1247 acpi_bus_generate_netlink_event(sony_nc_acpi_device->pnp.device_class,
1248 dev_name(&sony_nc_acpi_device->dev), ev_type, real_ev);
1123} 1249}
1124 1250
1125static acpi_status sony_walk_callback(acpi_handle handle, u32 level, 1251static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
@@ -1140,20 +1266,190 @@ static acpi_status sony_walk_callback(acpi_handle handle, u32 level,
1140/* 1266/*
1141 * ACPI device 1267 * ACPI device
1142 */ 1268 */
1143static int sony_nc_function_setup(struct acpi_device *device) 1269static void sony_nc_function_setup(struct acpi_device *device,
1270 struct platform_device *pf_device)
1144{ 1271{
1145 int result; 1272 unsigned int i, result, bitmask, arg;
1273
1274 if (!handles)
1275 return;
1276
1277 /* setup found handles here */
1278 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
1279 unsigned int handle = handles->cap[i];
1280
1281 if (!handle)
1282 continue;
1283
1284 dprintk("setting up handle 0x%.4x\n", handle);
1285
1286 switch (handle) {
1287 case 0x0100:
1288 case 0x0101:
1289 case 0x0127:
1290 /* setup hotkeys */
1291 sony_call_snc_handle(handle, 0, &result);
1292 break;
1293 case 0x0102:
1294 /* setup hotkeys */
1295 sony_call_snc_handle(handle, 0x100, &result);
1296 break;
1297 case 0x0105:
1298 case 0x0148:
1299 /* touchpad enable/disable */
1300 result = sony_nc_touchpad_setup(pf_device, handle);
1301 if (result)
1302 pr_err("couldn't set up touchpad control function (%d)\n",
1303 result);
1304 break;
1305 case 0x0115:
1306 case 0x0136:
1307 case 0x013f:
1308 result = sony_nc_battery_care_setup(pf_device, handle);
1309 if (result)
1310 pr_err("couldn't set up battery care function (%d)\n",
1311 result);
1312 break;
1313 case 0x0119:
1314 result = sony_nc_lid_resume_setup(pf_device);
1315 if (result)
1316 pr_err("couldn't set up lid resume function (%d)\n",
1317 result);
1318 break;
1319 case 0x0122:
1320 result = sony_nc_thermal_setup(pf_device);
1321 if (result)
1322 pr_err("couldn't set up thermal profile function (%d)\n",
1323 result);
1324 break;
1325 case 0x0131:
1326 result = sony_nc_highspeed_charging_setup(pf_device);
1327 if (result)
1328 pr_err("couldn't set up high speed charging function (%d)\n",
1329 result);
1330 break;
1331 case 0x0124:
1332 case 0x0135:
1333 result = sony_nc_rfkill_setup(device, handle);
1334 if (result)
1335 pr_err("couldn't set up rfkill support (%d)\n",
1336 result);
1337 break;
1338 case 0x0137:
1339 case 0x0143:
1340 result = sony_nc_kbd_backlight_setup(pf_device, handle);
1341 if (result)
1342 pr_err("couldn't set up keyboard backlight function (%d)\n",
1343 result);
1344 break;
1345 default:
1346 continue;
1347 }
1348 }
1146 1349
1147 /* Enable all events */ 1350 /* Enable all events */
1148 acpi_callsetfunc(sony_nc_acpi_handle, "SN02", 0xffff, &result); 1351 arg = 0x10;
1352 if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
1353 sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
1354 &result);
1355}
1356
1357static void sony_nc_function_cleanup(struct platform_device *pd)
1358{
1359 unsigned int i, result, bitmask, handle;
1149 1360
1150 /* Setup hotkeys */ 1361 /* get enabled events and disable them */
1151 sony_call_snc_handle(0x0100, 0, &result); 1362 sony_nc_int_call(sony_nc_acpi_handle, "SN01", NULL, &bitmask);
1152 sony_call_snc_handle(0x0101, 0, &result); 1363 sony_nc_int_call(sony_nc_acpi_handle, "SN03", &bitmask, &result);
1153 sony_call_snc_handle(0x0102, 0x100, &result);
1154 sony_call_snc_handle(0x0127, 0, &result);
1155 1364
1156 return 0; 1365 /* cleanup handles here */
1366 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
1367
1368 handle = handles->cap[i];
1369
1370 if (!handle)
1371 continue;
1372
1373 switch (handle) {
1374 case 0x0105:
1375 case 0x0148:
1376 sony_nc_touchpad_cleanup(pd);
1377 break;
1378 case 0x0115:
1379 case 0x0136:
1380 case 0x013f:
1381 sony_nc_battery_care_cleanup(pd);
1382 break;
1383 case 0x0119:
1384 sony_nc_lid_resume_cleanup(pd);
1385 break;
1386 case 0x0122:
1387 sony_nc_thermal_cleanup(pd);
1388 break;
1389 case 0x0131:
1390 sony_nc_highspeed_charging_cleanup(pd);
1391 break;
1392 case 0x0124:
1393 case 0x0135:
1394 sony_nc_rfkill_cleanup();
1395 break;
1396 case 0x0137:
1397 case 0x0143:
1398 sony_nc_kbd_backlight_cleanup(pd);
1399 break;
1400 default:
1401 continue;
1402 }
1403 }
1404
1405 /* finally cleanup the handles list */
1406 sony_nc_handles_cleanup(pd);
1407}
1408
1409static void sony_nc_function_resume(void)
1410{
1411 unsigned int i, result, bitmask, arg;
1412
1413 dprintk("Resuming SNC device\n");
1414
1415 for (i = 0; i < ARRAY_SIZE(handles->cap); i++) {
1416 unsigned int handle = handles->cap[i];
1417
1418 if (!handle)
1419 continue;
1420
1421 switch (handle) {
1422 case 0x0100:
1423 case 0x0101:
1424 case 0x0127:
1425 /* re-enable hotkeys */
1426 sony_call_snc_handle(handle, 0, &result);
1427 break;
1428 case 0x0102:
1429 /* re-enable hotkeys */
1430 sony_call_snc_handle(handle, 0x100, &result);
1431 break;
1432 case 0x0122:
1433 sony_nc_thermal_resume();
1434 break;
1435 case 0x0124:
1436 case 0x0135:
1437 sony_nc_rfkill_update();
1438 break;
1439 case 0x0137:
1440 case 0x0143:
1441 sony_nc_kbd_backlight_resume();
1442 break;
1443 default:
1444 continue;
1445 }
1446 }
1447
1448 /* Enable all events */
1449 arg = 0x10;
1450 if (!sony_nc_int_call(sony_nc_acpi_handle, "SN00", &arg, &bitmask))
1451 sony_nc_int_call(sony_nc_acpi_handle, "SN02", &bitmask,
1452 &result);
1157} 1453}
1158 1454
1159static int sony_nc_resume(struct acpi_device *device) 1455static int sony_nc_resume(struct acpi_device *device)
@@ -1166,8 +1462,8 @@ static int sony_nc_resume(struct acpi_device *device)
1166 1462
1167 if (!item->valid) 1463 if (!item->valid)
1168 continue; 1464 continue;
1169 ret = acpi_callsetfunc(sony_nc_acpi_handle, *item->acpiset, 1465 ret = sony_nc_int_call(sony_nc_acpi_handle, *item->acpiset,
1170 item->value, NULL); 1466 &item->value, NULL);
1171 if (ret < 0) { 1467 if (ret < 0) {
1172 pr_err("%s: %d\n", __func__, ret); 1468 pr_err("%s: %d\n", __func__, ret);
1173 break; 1469 break;
@@ -1176,21 +1472,14 @@ static int sony_nc_resume(struct acpi_device *device)
1176 1472
1177 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", 1473 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
1178 &handle))) { 1474 &handle))) {
1179 if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) 1475 int arg = 1;
1476 if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
1180 dprintk("ECON Method failed\n"); 1477 dprintk("ECON Method failed\n");
1181 } 1478 }
1182 1479
1183 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", 1480 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
1184 &handle))) { 1481 &handle)))
1185 dprintk("Doing SNC setup\n"); 1482 sony_nc_function_resume();
1186 sony_nc_function_setup(device);
1187 }
1188
1189 /* re-read rfkill state */
1190 sony_nc_rfkill_update();
1191
1192 /* restore kbd backlight states */
1193 sony_nc_kbd_backlight_resume();
1194 1483
1195 return 0; 1484 return 0;
1196} 1485}
@@ -1213,7 +1502,7 @@ static int sony_nc_rfkill_set(void *data, bool blocked)
1213 int argument = sony_rfkill_address[(long) data] + 0x100; 1502 int argument = sony_rfkill_address[(long) data] + 0x100;
1214 1503
1215 if (!blocked) 1504 if (!blocked)
1216 argument |= 0xff0000; 1505 argument |= 0x030000;
1217 1506
1218 return sony_call_snc_handle(sony_rfkill_handle, argument, &result); 1507 return sony_call_snc_handle(sony_rfkill_handle, argument, &result);
1219} 1508}
@@ -1230,7 +1519,7 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
1230 enum rfkill_type type; 1519 enum rfkill_type type;
1231 const char *name; 1520 const char *name;
1232 int result; 1521 int result;
1233 bool hwblock; 1522 bool hwblock, swblock;
1234 1523
1235 switch (nc_type) { 1524 switch (nc_type) {
1236 case SONY_WIFI: 1525 case SONY_WIFI:
@@ -1258,8 +1547,21 @@ static int sony_nc_setup_rfkill(struct acpi_device *device,
1258 if (!rfk) 1547 if (!rfk)
1259 return -ENOMEM; 1548 return -ENOMEM;
1260 1549
1261 sony_call_snc_handle(sony_rfkill_handle, 0x200, &result); 1550 if (sony_call_snc_handle(sony_rfkill_handle, 0x200, &result) < 0) {
1551 rfkill_destroy(rfk);
1552 return -1;
1553 }
1262 hwblock = !(result & 0x1); 1554 hwblock = !(result & 0x1);
1555
1556 if (sony_call_snc_handle(sony_rfkill_handle,
1557 sony_rfkill_address[nc_type],
1558 &result) < 0) {
1559 rfkill_destroy(rfk);
1560 return -1;
1561 }
1562 swblock = !(result & 0x2);
1563
1564 rfkill_init_sw_state(rfk, swblock);
1263 rfkill_set_hw_state(rfk, hwblock); 1565 rfkill_set_hw_state(rfk, hwblock);
1264 1566
1265 err = rfkill_register(rfk); 1567 err = rfkill_register(rfk);
@@ -1295,101 +1597,79 @@ static void sony_nc_rfkill_update(void)
1295 1597
1296 sony_call_snc_handle(sony_rfkill_handle, argument, &result); 1598 sony_call_snc_handle(sony_rfkill_handle, argument, &result);
1297 rfkill_set_states(sony_rfkill_devices[i], 1599 rfkill_set_states(sony_rfkill_devices[i],
1298 !(result & 0xf), false); 1600 !(result & 0x2), false);
1299 } 1601 }
1300} 1602}
1301 1603
1302static void sony_nc_rfkill_setup(struct acpi_device *device) 1604static int sony_nc_rfkill_setup(struct acpi_device *device,
1605 unsigned int handle)
1303{ 1606{
1304 int offset; 1607 u64 offset;
1305 u8 dev_code, i; 1608 int i;
1306 acpi_status status; 1609 unsigned char buffer[32] = { 0 };
1307 struct acpi_object_list params;
1308 union acpi_object in_obj;
1309 union acpi_object *device_enum;
1310 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1311
1312 offset = sony_find_snc_handle(0x124);
1313 if (offset == -1) {
1314 offset = sony_find_snc_handle(0x135);
1315 if (offset == -1)
1316 return;
1317 else
1318 sony_rfkill_handle = 0x135;
1319 } else
1320 sony_rfkill_handle = 0x124;
1321 dprintk("Found rkfill handle: 0x%.4x\n", sony_rfkill_handle);
1322
1323 /* need to read the whole buffer returned by the acpi call to SN06
1324 * here otherwise we may miss some features
1325 */
1326 params.count = 1;
1327 params.pointer = &in_obj;
1328 in_obj.type = ACPI_TYPE_INTEGER;
1329 in_obj.integer.value = offset;
1330 status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
1331 &buffer);
1332 if (ACPI_FAILURE(status)) {
1333 dprintk("Radio device enumeration failed\n");
1334 return;
1335 }
1336
1337 device_enum = (union acpi_object *) buffer.pointer;
1338 if (!device_enum) {
1339 pr_err("No SN06 return object\n");
1340 goto out_no_enum;
1341 }
1342 if (device_enum->type != ACPI_TYPE_BUFFER) {
1343 pr_err("Invalid SN06 return object 0x%.2x\n",
1344 device_enum->type);
1345 goto out_no_enum;
1346 }
1347 1610
1348 /* the buffer is filled with magic numbers describing the devices 1611 offset = sony_find_snc_handle(handle);
1349 * available, 0xff terminates the enumeration 1612 sony_rfkill_handle = handle;
1613
1614 i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
1615 32);
1616 if (i < 0)
1617 return i;
1618
1619 /* The buffer is filled with magic numbers describing the devices
1620 * available, 0xff terminates the enumeration.
1621 * Known codes:
1622 * 0x00 WLAN
1623 * 0x10 BLUETOOTH
1624 * 0x20 WWAN GPRS-EDGE
1625 * 0x21 WWAN HSDPA
1626 * 0x22 WWAN EV-DO
1627 * 0x23 WWAN GPS
1628 * 0x25 Gobi WWAN no GPS
1629 * 0x26 Gobi WWAN + GPS
1630 * 0x28 Gobi WWAN no GPS
1631 * 0x29 Gobi WWAN + GPS
1632 * 0x30 WIMAX
1633 * 0x50 Gobi WWAN no GPS
1634 * 0x51 Gobi WWAN + GPS
1635 * 0x70 no SIM card slot
1636 * 0x71 SIM card slot
1350 */ 1637 */
1351 for (i = 0; i < device_enum->buffer.length; i++) { 1638 for (i = 0; i < ARRAY_SIZE(buffer); i++) {
1352 1639
1353 dev_code = *(device_enum->buffer.pointer + i); 1640 if (buffer[i] == 0xff)
1354 if (dev_code == 0xff)
1355 break; 1641 break;
1356 1642
1357 dprintk("Radio devices, looking at 0x%.2x\n", dev_code); 1643 dprintk("Radio devices, found 0x%.2x\n", buffer[i]);
1358 1644
1359 if (dev_code == 0 && !sony_rfkill_devices[SONY_WIFI]) 1645 if (buffer[i] == 0 && !sony_rfkill_devices[SONY_WIFI])
1360 sony_nc_setup_rfkill(device, SONY_WIFI); 1646 sony_nc_setup_rfkill(device, SONY_WIFI);
1361 1647
1362 if (dev_code == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH]) 1648 if (buffer[i] == 0x10 && !sony_rfkill_devices[SONY_BLUETOOTH])
1363 sony_nc_setup_rfkill(device, SONY_BLUETOOTH); 1649 sony_nc_setup_rfkill(device, SONY_BLUETOOTH);
1364 1650
1365 if ((0xf0 & dev_code) == 0x20 && 1651 if (((0xf0 & buffer[i]) == 0x20 ||
1652 (0xf0 & buffer[i]) == 0x50) &&
1366 !sony_rfkill_devices[SONY_WWAN]) 1653 !sony_rfkill_devices[SONY_WWAN])
1367 sony_nc_setup_rfkill(device, SONY_WWAN); 1654 sony_nc_setup_rfkill(device, SONY_WWAN);
1368 1655
1369 if (dev_code == 0x30 && !sony_rfkill_devices[SONY_WIMAX]) 1656 if (buffer[i] == 0x30 && !sony_rfkill_devices[SONY_WIMAX])
1370 sony_nc_setup_rfkill(device, SONY_WIMAX); 1657 sony_nc_setup_rfkill(device, SONY_WIMAX);
1371 } 1658 }
1372 1659 return 0;
1373out_no_enum:
1374 kfree(buffer.pointer);
1375 return;
1376} 1660}
1377 1661
1378/* Keyboard backlight feature */ 1662/* Keyboard backlight feature */
1379#define KBDBL_HANDLER 0x137
1380#define KBDBL_PRESENT 0xB00
1381#define SET_MODE 0xC00
1382#define SET_STATE 0xD00
1383#define SET_TIMEOUT 0xE00
1384
1385struct kbd_backlight { 1663struct kbd_backlight {
1386 int mode; 1664 unsigned int handle;
1387 int timeout; 1665 unsigned int base;
1666 unsigned int mode;
1667 unsigned int timeout;
1388 struct device_attribute mode_attr; 1668 struct device_attribute mode_attr;
1389 struct device_attribute timeout_attr; 1669 struct device_attribute timeout_attr;
1390}; 1670};
1391 1671
1392static struct kbd_backlight *kbdbl_handle; 1672static struct kbd_backlight *kbdbl_ctl;
1393 1673
1394static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value) 1674static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
1395{ 1675{
@@ -1398,15 +1678,15 @@ static ssize_t __sony_nc_kbd_backlight_mode_set(u8 value)
1398 if (value > 1) 1678 if (value > 1)
1399 return -EINVAL; 1679 return -EINVAL;
1400 1680
1401 if (sony_call_snc_handle(KBDBL_HANDLER, 1681 if (sony_call_snc_handle(kbdbl_ctl->handle,
1402 (value << 0x10) | SET_MODE, &result)) 1682 (value << 0x10) | (kbdbl_ctl->base), &result))
1403 return -EIO; 1683 return -EIO;
1404 1684
1405 /* Try to turn the light on/off immediately */ 1685 /* Try to turn the light on/off immediately */
1406 sony_call_snc_handle(KBDBL_HANDLER, (value << 0x10) | SET_STATE, 1686 sony_call_snc_handle(kbdbl_ctl->handle,
1407 &result); 1687 (value << 0x10) | (kbdbl_ctl->base + 0x100), &result);
1408 1688
1409 kbdbl_handle->mode = value; 1689 kbdbl_ctl->mode = value;
1410 1690
1411 return 0; 1691 return 0;
1412} 1692}
@@ -1421,7 +1701,7 @@ static ssize_t sony_nc_kbd_backlight_mode_store(struct device *dev,
1421 if (count > 31) 1701 if (count > 31)
1422 return -EINVAL; 1702 return -EINVAL;
1423 1703
1424 if (strict_strtoul(buffer, 10, &value)) 1704 if (kstrtoul(buffer, 10, &value))
1425 return -EINVAL; 1705 return -EINVAL;
1426 1706
1427 ret = __sony_nc_kbd_backlight_mode_set(value); 1707 ret = __sony_nc_kbd_backlight_mode_set(value);
@@ -1435,7 +1715,7 @@ static ssize_t sony_nc_kbd_backlight_mode_show(struct device *dev,
1435 struct device_attribute *attr, char *buffer) 1715 struct device_attribute *attr, char *buffer)
1436{ 1716{
1437 ssize_t count = 0; 1717 ssize_t count = 0;
1438 count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->mode); 1718 count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->mode);
1439 return count; 1719 return count;
1440} 1720}
1441 1721
@@ -1446,11 +1726,11 @@ static int __sony_nc_kbd_backlight_timeout_set(u8 value)
1446 if (value > 3) 1726 if (value > 3)
1447 return -EINVAL; 1727 return -EINVAL;
1448 1728
1449 if (sony_call_snc_handle(KBDBL_HANDLER, 1729 if (sony_call_snc_handle(kbdbl_ctl->handle, (value << 0x10) |
1450 (value << 0x10) | SET_TIMEOUT, &result)) 1730 (kbdbl_ctl->base + 0x200), &result))
1451 return -EIO; 1731 return -EIO;
1452 1732
1453 kbdbl_handle->timeout = value; 1733 kbdbl_ctl->timeout = value;
1454 1734
1455 return 0; 1735 return 0;
1456} 1736}
@@ -1465,7 +1745,7 @@ static ssize_t sony_nc_kbd_backlight_timeout_store(struct device *dev,
1465 if (count > 31) 1745 if (count > 31)
1466 return -EINVAL; 1746 return -EINVAL;
1467 1747
1468 if (strict_strtoul(buffer, 10, &value)) 1748 if (kstrtoul(buffer, 10, &value))
1469 return -EINVAL; 1749 return -EINVAL;
1470 1750
1471 ret = __sony_nc_kbd_backlight_timeout_set(value); 1751 ret = __sony_nc_kbd_backlight_timeout_set(value);
@@ -1479,39 +1759,58 @@ static ssize_t sony_nc_kbd_backlight_timeout_show(struct device *dev,
1479 struct device_attribute *attr, char *buffer) 1759 struct device_attribute *attr, char *buffer)
1480{ 1760{
1481 ssize_t count = 0; 1761 ssize_t count = 0;
1482 count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_handle->timeout); 1762 count = snprintf(buffer, PAGE_SIZE, "%d\n", kbdbl_ctl->timeout);
1483 return count; 1763 return count;
1484} 1764}
1485 1765
1486static int sony_nc_kbd_backlight_setup(struct platform_device *pd) 1766static int sony_nc_kbd_backlight_setup(struct platform_device *pd,
1767 unsigned int handle)
1487{ 1768{
1488 int result; 1769 int result;
1770 int ret = 0;
1489 1771
1490 if (sony_call_snc_handle(KBDBL_HANDLER, KBDBL_PRESENT, &result)) 1772 /* verify the kbd backlight presence, these handles are not used for
1491 return 0; 1773 * keyboard backlight only
1492 if (!(result & 0x02)) 1774 */
1775 ret = sony_call_snc_handle(handle, handle == 0x0137 ? 0x0B00 : 0x0100,
1776 &result);
1777 if (ret)
1778 return ret;
1779
1780 if ((handle == 0x0137 && !(result & 0x02)) ||
1781 !(result & 0x01)) {
1782 dprintk("no backlight keyboard found\n");
1493 return 0; 1783 return 0;
1784 }
1494 1785
1495 kbdbl_handle = kzalloc(sizeof(*kbdbl_handle), GFP_KERNEL); 1786 kbdbl_ctl = kzalloc(sizeof(*kbdbl_ctl), GFP_KERNEL);
1496 if (!kbdbl_handle) 1787 if (!kbdbl_ctl)
1497 return -ENOMEM; 1788 return -ENOMEM;
1498 1789
1499 sysfs_attr_init(&kbdbl_handle->mode_attr.attr); 1790 kbdbl_ctl->handle = handle;
1500 kbdbl_handle->mode_attr.attr.name = "kbd_backlight"; 1791 if (handle == 0x0137)
1501 kbdbl_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR; 1792 kbdbl_ctl->base = 0x0C00;
1502 kbdbl_handle->mode_attr.show = sony_nc_kbd_backlight_mode_show; 1793 else
1503 kbdbl_handle->mode_attr.store = sony_nc_kbd_backlight_mode_store; 1794 kbdbl_ctl->base = 0x4000;
1795
1796 sysfs_attr_init(&kbdbl_ctl->mode_attr.attr);
1797 kbdbl_ctl->mode_attr.attr.name = "kbd_backlight";
1798 kbdbl_ctl->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
1799 kbdbl_ctl->mode_attr.show = sony_nc_kbd_backlight_mode_show;
1800 kbdbl_ctl->mode_attr.store = sony_nc_kbd_backlight_mode_store;
1504 1801
1505 sysfs_attr_init(&kbdbl_handle->timeout_attr.attr); 1802 sysfs_attr_init(&kbdbl_ctl->timeout_attr.attr);
1506 kbdbl_handle->timeout_attr.attr.name = "kbd_backlight_timeout"; 1803 kbdbl_ctl->timeout_attr.attr.name = "kbd_backlight_timeout";
1507 kbdbl_handle->timeout_attr.attr.mode = S_IRUGO | S_IWUSR; 1804 kbdbl_ctl->timeout_attr.attr.mode = S_IRUGO | S_IWUSR;
1508 kbdbl_handle->timeout_attr.show = sony_nc_kbd_backlight_timeout_show; 1805 kbdbl_ctl->timeout_attr.show = sony_nc_kbd_backlight_timeout_show;
1509 kbdbl_handle->timeout_attr.store = sony_nc_kbd_backlight_timeout_store; 1806 kbdbl_ctl->timeout_attr.store = sony_nc_kbd_backlight_timeout_store;
1510 1807
1511 if (device_create_file(&pd->dev, &kbdbl_handle->mode_attr)) 1808 ret = device_create_file(&pd->dev, &kbdbl_ctl->mode_attr);
1809 if (ret)
1512 goto outkzalloc; 1810 goto outkzalloc;
1513 1811
1514 if (device_create_file(&pd->dev, &kbdbl_handle->timeout_attr)) 1812 ret = device_create_file(&pd->dev, &kbdbl_ctl->timeout_attr);
1813 if (ret)
1515 goto outmode; 1814 goto outmode;
1516 1815
1517 __sony_nc_kbd_backlight_mode_set(kbd_backlight); 1816 __sony_nc_kbd_backlight_mode_set(kbd_backlight);
@@ -1520,57 +1819,661 @@ static int sony_nc_kbd_backlight_setup(struct platform_device *pd)
1520 return 0; 1819 return 0;
1521 1820
1522outmode: 1821outmode:
1523 device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); 1822 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
1524outkzalloc: 1823outkzalloc:
1525 kfree(kbdbl_handle); 1824 kfree(kbdbl_ctl);
1526 kbdbl_handle = NULL; 1825 kbdbl_ctl = NULL;
1527 return -1; 1826 return ret;
1528} 1827}
1529 1828
1530static int sony_nc_kbd_backlight_cleanup(struct platform_device *pd) 1829static void sony_nc_kbd_backlight_cleanup(struct platform_device *pd)
1531{ 1830{
1532 if (kbdbl_handle) { 1831 if (kbdbl_ctl) {
1533 int result; 1832 int result;
1534 1833
1535 device_remove_file(&pd->dev, &kbdbl_handle->mode_attr); 1834 device_remove_file(&pd->dev, &kbdbl_ctl->mode_attr);
1536 device_remove_file(&pd->dev, &kbdbl_handle->timeout_attr); 1835 device_remove_file(&pd->dev, &kbdbl_ctl->timeout_attr);
1537 1836
1538 /* restore the default hw behaviour */ 1837 /* restore the default hw behaviour */
1539 sony_call_snc_handle(KBDBL_HANDLER, 0x1000 | SET_MODE, &result); 1838 sony_call_snc_handle(kbdbl_ctl->handle,
1540 sony_call_snc_handle(KBDBL_HANDLER, SET_TIMEOUT, &result); 1839 kbdbl_ctl->base | 0x10000, &result);
1840 sony_call_snc_handle(kbdbl_ctl->handle,
1841 kbdbl_ctl->base + 0x200, &result);
1541 1842
1542 kfree(kbdbl_handle); 1843 kfree(kbdbl_ctl);
1844 kbdbl_ctl = NULL;
1543 } 1845 }
1544 return 0;
1545} 1846}
1546 1847
1547static void sony_nc_kbd_backlight_resume(void) 1848static void sony_nc_kbd_backlight_resume(void)
1548{ 1849{
1549 int ignore = 0; 1850 int ignore = 0;
1550 1851
1551 if (!kbdbl_handle) 1852 if (!kbdbl_ctl)
1552 return; 1853 return;
1553 1854
1554 if (kbdbl_handle->mode == 0) 1855 if (kbdbl_ctl->mode == 0)
1555 sony_call_snc_handle(KBDBL_HANDLER, SET_MODE, &ignore); 1856 sony_call_snc_handle(kbdbl_ctl->handle, kbdbl_ctl->base,
1556
1557 if (kbdbl_handle->timeout != 0)
1558 sony_call_snc_handle(KBDBL_HANDLER,
1559 (kbdbl_handle->timeout << 0x10) | SET_TIMEOUT,
1560 &ignore); 1857 &ignore);
1858
1859 if (kbdbl_ctl->timeout != 0)
1860 sony_call_snc_handle(kbdbl_ctl->handle,
1861 (kbdbl_ctl->base + 0x200) |
1862 (kbdbl_ctl->timeout << 0x10), &ignore);
1863}
1864
1865struct battery_care_control {
1866 struct device_attribute attrs[2];
1867 unsigned int handle;
1868};
1869static struct battery_care_control *bcare_ctl;
1870
1871static ssize_t sony_nc_battery_care_limit_store(struct device *dev,
1872 struct device_attribute *attr,
1873 const char *buffer, size_t count)
1874{
1875 unsigned int result, cmd;
1876 unsigned long value;
1877
1878 if (count > 31)
1879 return -EINVAL;
1880
1881 if (kstrtoul(buffer, 10, &value))
1882 return -EINVAL;
1883
1884 /* limit values (2 bits):
1885 * 00 - none
1886 * 01 - 80%
1887 * 10 - 50%
1888 * 11 - 100%
1889 *
1890 * bit 0: 0 disable BCL, 1 enable BCL
1891 * bit 1: 1 tell to store the battery limit (see bits 6,7) too
1892 * bits 2,3: reserved
1893 * bits 4,5: store the limit into the EC
1894 * bits 6,7: store the limit into the battery
1895 */
1896
1897 /*
1898 * handle 0x0115 should allow storing on battery too;
1899 * handle 0x0136 same as 0x0115 + health status;
1900 * handle 0x013f, same as 0x0136 but no storing on the battery
1901 *
1902 * Store only inside the EC for now, regardless the handle number
1903 */
1904 if (value == 0)
1905 /* disable limits */
1906 cmd = 0x0;
1907
1908 else if (value <= 50)
1909 cmd = 0x21;
1910
1911 else if (value <= 80)
1912 cmd = 0x11;
1913
1914 else if (value <= 100)
1915 cmd = 0x31;
1916
1917 else
1918 return -EINVAL;
1919
1920 if (sony_call_snc_handle(bcare_ctl->handle, (cmd << 0x10) | 0x0100,
1921 &result))
1922 return -EIO;
1923
1924 return count;
1925}
1926
1927static ssize_t sony_nc_battery_care_limit_show(struct device *dev,
1928 struct device_attribute *attr, char *buffer)
1929{
1930 unsigned int result, status;
1931
1932 if (sony_call_snc_handle(bcare_ctl->handle, 0x0000, &result))
1933 return -EIO;
1934
1935 status = (result & 0x01) ? ((result & 0x30) >> 0x04) : 0;
1936 switch (status) {
1937 case 1:
1938 status = 80;
1939 break;
1940 case 2:
1941 status = 50;
1942 break;
1943 case 3:
1944 status = 100;
1945 break;
1946 default:
1947 status = 0;
1948 break;
1949 }
1950
1951 return snprintf(buffer, PAGE_SIZE, "%d\n", status);
1952}
1953
1954static ssize_t sony_nc_battery_care_health_show(struct device *dev,
1955 struct device_attribute *attr, char *buffer)
1956{
1957 ssize_t count = 0;
1958 unsigned int health;
1959
1960 if (sony_call_snc_handle(bcare_ctl->handle, 0x0200, &health))
1961 return -EIO;
1962
1963 count = snprintf(buffer, PAGE_SIZE, "%d\n", health & 0xff);
1964
1965 return count;
1966}
1967
1968static int sony_nc_battery_care_setup(struct platform_device *pd,
1969 unsigned int handle)
1970{
1971 int ret = 0;
1972
1973 bcare_ctl = kzalloc(sizeof(struct battery_care_control), GFP_KERNEL);
1974 if (!bcare_ctl)
1975 return -ENOMEM;
1976
1977 bcare_ctl->handle = handle;
1978
1979 sysfs_attr_init(&bcare_ctl->attrs[0].attr);
1980 bcare_ctl->attrs[0].attr.name = "battery_care_limiter";
1981 bcare_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
1982 bcare_ctl->attrs[0].show = sony_nc_battery_care_limit_show;
1983 bcare_ctl->attrs[0].store = sony_nc_battery_care_limit_store;
1984
1985 ret = device_create_file(&pd->dev, &bcare_ctl->attrs[0]);
1986 if (ret)
1987 goto outkzalloc;
1988
1989 /* 0x0115 is for models with no health reporting capability */
1990 if (handle == 0x0115)
1991 return 0;
1992
1993 sysfs_attr_init(&bcare_ctl->attrs[1].attr);
1994 bcare_ctl->attrs[1].attr.name = "battery_care_health";
1995 bcare_ctl->attrs[1].attr.mode = S_IRUGO;
1996 bcare_ctl->attrs[1].show = sony_nc_battery_care_health_show;
1997
1998 ret = device_create_file(&pd->dev, &bcare_ctl->attrs[1]);
1999 if (ret)
2000 goto outlimiter;
2001
2002 return 0;
2003
2004outlimiter:
2005 device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
2006
2007outkzalloc:
2008 kfree(bcare_ctl);
2009 bcare_ctl = NULL;
2010
2011 return ret;
2012}
2013
2014static void sony_nc_battery_care_cleanup(struct platform_device *pd)
2015{
2016 if (bcare_ctl) {
2017 device_remove_file(&pd->dev, &bcare_ctl->attrs[0]);
2018 if (bcare_ctl->handle != 0x0115)
2019 device_remove_file(&pd->dev, &bcare_ctl->attrs[1]);
2020
2021 kfree(bcare_ctl);
2022 bcare_ctl = NULL;
2023 }
2024}
2025
2026struct snc_thermal_ctrl {
2027 unsigned int mode;
2028 unsigned int profiles;
2029 struct device_attribute mode_attr;
2030 struct device_attribute profiles_attr;
2031};
2032static struct snc_thermal_ctrl *th_handle;
2033
2034#define THM_PROFILE_MAX 3
2035static const char * const snc_thermal_profiles[] = {
2036 "balanced",
2037 "silent",
2038 "performance"
2039};
2040
2041static int sony_nc_thermal_mode_set(unsigned short mode)
2042{
2043 unsigned int result;
2044
2045 /* the thermal profile seems to be a two bit bitmask:
2046 * lsb -> silent
2047 * msb -> performance
2048 * no bit set is the normal operation and is always valid
2049 * Some vaio models only have "balanced" and "performance"
2050 */
2051 if ((mode && !(th_handle->profiles & mode)) || mode >= THM_PROFILE_MAX)
2052 return -EINVAL;
2053
2054 if (sony_call_snc_handle(0x0122, mode << 0x10 | 0x0200, &result))
2055 return -EIO;
2056
2057 th_handle->mode = mode;
2058
2059 return 0;
2060}
2061
2062static int sony_nc_thermal_mode_get(void)
2063{
2064 unsigned int result;
2065
2066 if (sony_call_snc_handle(0x0122, 0x0100, &result))
2067 return -EIO;
2068
2069 return result & 0xff;
2070}
2071
2072static ssize_t sony_nc_thermal_profiles_show(struct device *dev,
2073 struct device_attribute *attr, char *buffer)
2074{
2075 short cnt;
2076 size_t idx = 0;
2077
2078 for (cnt = 0; cnt < THM_PROFILE_MAX; cnt++) {
2079 if (!cnt || (th_handle->profiles & cnt))
2080 idx += snprintf(buffer + idx, PAGE_SIZE - idx, "%s ",
2081 snc_thermal_profiles[cnt]);
2082 }
2083 idx += snprintf(buffer + idx, PAGE_SIZE - idx, "\n");
2084
2085 return idx;
2086}
2087
2088static ssize_t sony_nc_thermal_mode_store(struct device *dev,
2089 struct device_attribute *attr,
2090 const char *buffer, size_t count)
2091{
2092 unsigned short cmd;
2093 size_t len = count;
2094
2095 if (count == 0)
2096 return -EINVAL;
2097
2098 /* skip the newline if present */
2099 if (buffer[len - 1] == '\n')
2100 len--;
2101
2102 for (cmd = 0; cmd < THM_PROFILE_MAX; cmd++)
2103 if (strncmp(buffer, snc_thermal_profiles[cmd], len) == 0)
2104 break;
2105
2106 if (sony_nc_thermal_mode_set(cmd))
2107 return -EIO;
2108
2109 return count;
2110}
2111
2112static ssize_t sony_nc_thermal_mode_show(struct device *dev,
2113 struct device_attribute *attr, char *buffer)
2114{
2115 ssize_t count = 0;
2116 unsigned int mode = sony_nc_thermal_mode_get();
2117
2118 if (mode < 0)
2119 return mode;
2120
2121 count = snprintf(buffer, PAGE_SIZE, "%s\n", snc_thermal_profiles[mode]);
2122
2123 return count;
2124}
2125
2126static int sony_nc_thermal_setup(struct platform_device *pd)
2127{
2128 int ret = 0;
2129 th_handle = kzalloc(sizeof(struct snc_thermal_ctrl), GFP_KERNEL);
2130 if (!th_handle)
2131 return -ENOMEM;
2132
2133 ret = sony_call_snc_handle(0x0122, 0x0000, &th_handle->profiles);
2134 if (ret) {
2135 pr_warn("couldn't to read the thermal profiles\n");
2136 goto outkzalloc;
2137 }
2138
2139 ret = sony_nc_thermal_mode_get();
2140 if (ret < 0) {
2141 pr_warn("couldn't to read the current thermal profile");
2142 goto outkzalloc;
2143 }
2144 th_handle->mode = ret;
2145
2146 sysfs_attr_init(&th_handle->profiles_attr.attr);
2147 th_handle->profiles_attr.attr.name = "thermal_profiles";
2148 th_handle->profiles_attr.attr.mode = S_IRUGO;
2149 th_handle->profiles_attr.show = sony_nc_thermal_profiles_show;
2150
2151 sysfs_attr_init(&th_handle->mode_attr.attr);
2152 th_handle->mode_attr.attr.name = "thermal_control";
2153 th_handle->mode_attr.attr.mode = S_IRUGO | S_IWUSR;
2154 th_handle->mode_attr.show = sony_nc_thermal_mode_show;
2155 th_handle->mode_attr.store = sony_nc_thermal_mode_store;
2156
2157 ret = device_create_file(&pd->dev, &th_handle->profiles_attr);
2158 if (ret)
2159 goto outkzalloc;
2160
2161 ret = device_create_file(&pd->dev, &th_handle->mode_attr);
2162 if (ret)
2163 goto outprofiles;
2164
2165 return 0;
2166
2167outprofiles:
2168 device_remove_file(&pd->dev, &th_handle->profiles_attr);
2169outkzalloc:
2170 kfree(th_handle);
2171 th_handle = NULL;
2172 return ret;
2173}
2174
2175static void sony_nc_thermal_cleanup(struct platform_device *pd)
2176{
2177 if (th_handle) {
2178 device_remove_file(&pd->dev, &th_handle->profiles_attr);
2179 device_remove_file(&pd->dev, &th_handle->mode_attr);
2180 kfree(th_handle);
2181 th_handle = NULL;
2182 }
2183}
2184
2185static void sony_nc_thermal_resume(void)
2186{
2187 unsigned int status = sony_nc_thermal_mode_get();
2188
2189 if (status != th_handle->mode)
2190 sony_nc_thermal_mode_set(th_handle->mode);
2191}
2192
2193/* resume on LID open */
2194struct snc_lid_resume_control {
2195 struct device_attribute attrs[3];
2196 unsigned int status;
2197};
2198static struct snc_lid_resume_control *lid_ctl;
2199
2200static ssize_t sony_nc_lid_resume_store(struct device *dev,
2201 struct device_attribute *attr,
2202 const char *buffer, size_t count)
2203{
2204 unsigned int result, pos;
2205 unsigned long value;
2206 if (count > 31)
2207 return -EINVAL;
2208
2209 if (kstrtoul(buffer, 10, &value) || value > 1)
2210 return -EINVAL;
2211
2212 /* the value we have to write to SNC is a bitmask:
2213 * +--------------+
2214 * | S3 | S4 | S5 |
2215 * +--------------+
2216 * 2 1 0
2217 */
2218 if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
2219 pos = 2;
2220 else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
2221 pos = 1;
2222 else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
2223 pos = 0;
2224 else
2225 return -EINVAL;
2226
2227 if (value)
2228 value = lid_ctl->status | (1 << pos);
2229 else
2230 value = lid_ctl->status & ~(1 << pos);
2231
2232 if (sony_call_snc_handle(0x0119, value << 0x10 | 0x0100, &result))
2233 return -EIO;
2234
2235 lid_ctl->status = value;
2236
2237 return count;
2238}
2239
2240static ssize_t sony_nc_lid_resume_show(struct device *dev,
2241 struct device_attribute *attr, char *buffer)
2242{
2243 unsigned int pos;
2244
2245 if (strcmp(attr->attr.name, "lid_resume_S3") == 0)
2246 pos = 2;
2247 else if (strcmp(attr->attr.name, "lid_resume_S4") == 0)
2248 pos = 1;
2249 else if (strcmp(attr->attr.name, "lid_resume_S5") == 0)
2250 pos = 0;
2251 else
2252 return -EINVAL;
2253
2254 return snprintf(buffer, PAGE_SIZE, "%d\n",
2255 (lid_ctl->status >> pos) & 0x01);
2256}
2257
2258static int sony_nc_lid_resume_setup(struct platform_device *pd)
2259{
2260 unsigned int result;
2261 int i;
2262
2263 if (sony_call_snc_handle(0x0119, 0x0000, &result))
2264 return -EIO;
2265
2266 lid_ctl = kzalloc(sizeof(struct snc_lid_resume_control), GFP_KERNEL);
2267 if (!lid_ctl)
2268 return -ENOMEM;
2269
2270 lid_ctl->status = result & 0x7;
2271
2272 sysfs_attr_init(&lid_ctl->attrs[0].attr);
2273 lid_ctl->attrs[0].attr.name = "lid_resume_S3";
2274 lid_ctl->attrs[0].attr.mode = S_IRUGO | S_IWUSR;
2275 lid_ctl->attrs[0].show = sony_nc_lid_resume_show;
2276 lid_ctl->attrs[0].store = sony_nc_lid_resume_store;
2277
2278 sysfs_attr_init(&lid_ctl->attrs[1].attr);
2279 lid_ctl->attrs[1].attr.name = "lid_resume_S4";
2280 lid_ctl->attrs[1].attr.mode = S_IRUGO | S_IWUSR;
2281 lid_ctl->attrs[1].show = sony_nc_lid_resume_show;
2282 lid_ctl->attrs[1].store = sony_nc_lid_resume_store;
2283
2284 sysfs_attr_init(&lid_ctl->attrs[2].attr);
2285 lid_ctl->attrs[2].attr.name = "lid_resume_S5";
2286 lid_ctl->attrs[2].attr.mode = S_IRUGO | S_IWUSR;
2287 lid_ctl->attrs[2].show = sony_nc_lid_resume_show;
2288 lid_ctl->attrs[2].store = sony_nc_lid_resume_store;
2289
2290 for (i = 0; i < 3; i++) {
2291 result = device_create_file(&pd->dev, &lid_ctl->attrs[i]);
2292 if (result)
2293 goto liderror;
2294 }
2295
2296 return 0;
2297
2298liderror:
2299 for (; i > 0; i--)
2300 device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
2301
2302 kfree(lid_ctl);
2303 lid_ctl = NULL;
2304
2305 return result;
2306}
2307
2308static void sony_nc_lid_resume_cleanup(struct platform_device *pd)
2309{
2310 int i;
2311
2312 if (lid_ctl) {
2313 for (i = 0; i < 3; i++)
2314 device_remove_file(&pd->dev, &lid_ctl->attrs[i]);
2315
2316 kfree(lid_ctl);
2317 lid_ctl = NULL;
2318 }
2319}
2320
2321/* High speed charging function */
2322static struct device_attribute *hsc_handle;
2323
2324static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
2325 struct device_attribute *attr,
2326 const char *buffer, size_t count)
2327{
2328 unsigned int result;
2329 unsigned long value;
2330
2331 if (count > 31)
2332 return -EINVAL;
2333
2334 if (kstrtoul(buffer, 10, &value) || value > 1)
2335 return -EINVAL;
2336
2337 if (sony_call_snc_handle(0x0131, value << 0x10 | 0x0200, &result))
2338 return -EIO;
2339
2340 return count;
2341}
2342
2343static ssize_t sony_nc_highspeed_charging_show(struct device *dev,
2344 struct device_attribute *attr, char *buffer)
2345{
2346 unsigned int result;
2347
2348 if (sony_call_snc_handle(0x0131, 0x0100, &result))
2349 return -EIO;
2350
2351 return snprintf(buffer, PAGE_SIZE, "%d\n", result & 0x01);
2352}
2353
2354static int sony_nc_highspeed_charging_setup(struct platform_device *pd)
2355{
2356 unsigned int result;
2357
2358 if (sony_call_snc_handle(0x0131, 0x0000, &result) || !(result & 0x01)) {
2359 /* some models advertise the handle but have no implementation
2360 * for it
2361 */
2362 pr_info("No High Speed Charging capability found\n");
2363 return 0;
2364 }
2365
2366 hsc_handle = kzalloc(sizeof(struct device_attribute), GFP_KERNEL);
2367 if (!hsc_handle)
2368 return -ENOMEM;
2369
2370 sysfs_attr_init(&hsc_handle->attr);
2371 hsc_handle->attr.name = "battery_highspeed_charging";
2372 hsc_handle->attr.mode = S_IRUGO | S_IWUSR;
2373 hsc_handle->show = sony_nc_highspeed_charging_show;
2374 hsc_handle->store = sony_nc_highspeed_charging_store;
2375
2376 result = device_create_file(&pd->dev, hsc_handle);
2377 if (result) {
2378 kfree(hsc_handle);
2379 hsc_handle = NULL;
2380 return result;
2381 }
2382
2383 return 0;
2384}
2385
2386static void sony_nc_highspeed_charging_cleanup(struct platform_device *pd)
2387{
2388 if (hsc_handle) {
2389 device_remove_file(&pd->dev, hsc_handle);
2390 kfree(hsc_handle);
2391 hsc_handle = NULL;
2392 }
2393}
2394
2395/* Touchpad enable/disable */
2396struct touchpad_control {
2397 struct device_attribute attr;
2398 int handle;
2399};
2400static struct touchpad_control *tp_ctl;
2401
2402static ssize_t sony_nc_touchpad_store(struct device *dev,
2403 struct device_attribute *attr, const char *buffer, size_t count)
2404{
2405 unsigned int result;
2406 unsigned long value;
2407
2408 if (count > 31)
2409 return -EINVAL;
2410
2411 if (kstrtoul(buffer, 10, &value) || value > 1)
2412 return -EINVAL;
2413
2414 /* sysfs: 0 disabled, 1 enabled
2415 * EC: 0 enabled, 1 disabled
2416 */
2417 if (sony_call_snc_handle(tp_ctl->handle,
2418 (!value << 0x10) | 0x100, &result))
2419 return -EIO;
2420
2421 return count;
2422}
2423
2424static ssize_t sony_nc_touchpad_show(struct device *dev,
2425 struct device_attribute *attr, char *buffer)
2426{
2427 unsigned int result;
2428
2429 if (sony_call_snc_handle(tp_ctl->handle, 0x000, &result))
2430 return -EINVAL;
2431
2432 return snprintf(buffer, PAGE_SIZE, "%d\n", !(result & 0x01));
2433}
2434
2435static int sony_nc_touchpad_setup(struct platform_device *pd,
2436 unsigned int handle)
2437{
2438 int ret = 0;
2439
2440 tp_ctl = kzalloc(sizeof(struct touchpad_control), GFP_KERNEL);
2441 if (!tp_ctl)
2442 return -ENOMEM;
2443
2444 tp_ctl->handle = handle;
2445
2446 sysfs_attr_init(&tp_ctl->attr.attr);
2447 tp_ctl->attr.attr.name = "touchpad";
2448 tp_ctl->attr.attr.mode = S_IRUGO | S_IWUSR;
2449 tp_ctl->attr.show = sony_nc_touchpad_show;
2450 tp_ctl->attr.store = sony_nc_touchpad_store;
2451
2452 ret = device_create_file(&pd->dev, &tp_ctl->attr);
2453 if (ret) {
2454 kfree(tp_ctl);
2455 tp_ctl = NULL;
2456 }
2457
2458 return ret;
2459}
2460
2461static void sony_nc_touchpad_cleanup(struct platform_device *pd)
2462{
2463 if (tp_ctl) {
2464 device_remove_file(&pd->dev, &tp_ctl->attr);
2465 kfree(tp_ctl);
2466 tp_ctl = NULL;
2467 }
1561} 2468}
1562 2469
1563static void sony_nc_backlight_ng_read_limits(int handle, 2470static void sony_nc_backlight_ng_read_limits(int handle,
1564 struct sony_backlight_props *props) 2471 struct sony_backlight_props *props)
1565{ 2472{
1566 int offset; 2473 u64 offset;
1567 acpi_status status; 2474 int i;
1568 u8 brlvl, i;
1569 u8 min = 0xff, max = 0x00; 2475 u8 min = 0xff, max = 0x00;
1570 struct acpi_object_list params; 2476 unsigned char buffer[32] = { 0 };
1571 union acpi_object in_obj;
1572 union acpi_object *lvl_enum;
1573 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1574 2477
1575 props->handle = handle; 2478 props->handle = handle;
1576 props->offset = 0; 2479 props->offset = 0;
@@ -1583,50 +2486,31 @@ static void sony_nc_backlight_ng_read_limits(int handle,
1583 /* try to read the boundaries from ACPI tables, if we fail the above 2486 /* try to read the boundaries from ACPI tables, if we fail the above
1584 * defaults should be reasonable 2487 * defaults should be reasonable
1585 */ 2488 */
1586 params.count = 1; 2489 i = sony_nc_buffer_call(sony_nc_acpi_handle, "SN06", &offset, buffer,
1587 params.pointer = &in_obj; 2490 32);
1588 in_obj.type = ACPI_TYPE_INTEGER; 2491 if (i < 0)
1589 in_obj.integer.value = offset;
1590 status = acpi_evaluate_object(sony_nc_acpi_handle, "SN06", &params,
1591 &buffer);
1592 if (ACPI_FAILURE(status))
1593 return; 2492 return;
1594 2493
1595 lvl_enum = (union acpi_object *) buffer.pointer;
1596 if (!lvl_enum) {
1597 pr_err("No SN06 return object.");
1598 return;
1599 }
1600 if (lvl_enum->type != ACPI_TYPE_BUFFER) {
1601 pr_err("Invalid SN06 return object 0x%.2x\n",
1602 lvl_enum->type);
1603 goto out_invalid;
1604 }
1605
1606 /* the buffer lists brightness levels available, brightness levels are 2494 /* the buffer lists brightness levels available, brightness levels are
1607 * from 0 to 8 in the array, other values are used by ALS control. 2495 * from position 0 to 8 in the array, other values are used by ALS
2496 * control.
1608 */ 2497 */
1609 for (i = 0; i < 9 && i < lvl_enum->buffer.length; i++) { 2498 for (i = 0; i < 9 && i < ARRAY_SIZE(buffer); i++) {
1610 2499
1611 brlvl = *(lvl_enum->buffer.pointer + i); 2500 dprintk("Brightness level: %d\n", buffer[i]);
1612 dprintk("Brightness level: %d\n", brlvl);
1613 2501
1614 if (!brlvl) 2502 if (!buffer[i])
1615 break; 2503 break;
1616 2504
1617 if (brlvl > max) 2505 if (buffer[i] > max)
1618 max = brlvl; 2506 max = buffer[i];
1619 if (brlvl < min) 2507 if (buffer[i] < min)
1620 min = brlvl; 2508 min = buffer[i];
1621 } 2509 }
1622 props->offset = min; 2510 props->offset = min;
1623 props->maxlvl = max; 2511 props->maxlvl = max;
1624 dprintk("Brightness levels: min=%d max=%d\n", props->offset, 2512 dprintk("Brightness levels: min=%d max=%d\n", props->offset,
1625 props->maxlvl); 2513 props->maxlvl);
1626
1627out_invalid:
1628 kfree(buffer.pointer);
1629 return;
1630} 2514}
1631 2515
1632static void sony_nc_backlight_setup(void) 2516static void sony_nc_backlight_setup(void)
@@ -1715,28 +2599,25 @@ static int sony_nc_add(struct acpi_device *device)
1715 2599
1716 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", 2600 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
1717 &handle))) { 2601 &handle))) {
1718 if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) 2602 int arg = 1;
2603 if (sony_nc_int_call(sony_nc_acpi_handle, "ECON", &arg, NULL))
1719 dprintk("ECON Method failed\n"); 2604 dprintk("ECON Method failed\n");
1720 } 2605 }
1721 2606
1722 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00", 2607 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "SN00",
1723 &handle))) { 2608 &handle))) {
1724 dprintk("Doing SNC setup\n"); 2609 dprintk("Doing SNC setup\n");
2610 /* retrieve the available handles */
1725 result = sony_nc_handles_setup(sony_pf_device); 2611 result = sony_nc_handles_setup(sony_pf_device);
1726 if (result) 2612 if (!result)
1727 goto outpresent; 2613 sony_nc_function_setup(device, sony_pf_device);
1728 result = sony_nc_kbd_backlight_setup(sony_pf_device);
1729 if (result)
1730 goto outsnc;
1731 sony_nc_function_setup(device);
1732 sony_nc_rfkill_setup(device);
1733 } 2614 }
1734 2615
1735 /* setup input devices and helper fifo */ 2616 /* setup input devices and helper fifo */
1736 result = sony_laptop_setup_input(device); 2617 result = sony_laptop_setup_input(device);
1737 if (result) { 2618 if (result) {
1738 pr_err("Unable to create input devices\n"); 2619 pr_err("Unable to create input devices\n");
1739 goto outkbdbacklight; 2620 goto outsnc;
1740 } 2621 }
1741 2622
1742 if (acpi_video_backlight_support()) { 2623 if (acpi_video_backlight_support()) {
@@ -1794,10 +2675,8 @@ static int sony_nc_add(struct acpi_device *device)
1794 2675
1795 sony_laptop_remove_input(); 2676 sony_laptop_remove_input();
1796 2677
1797 outkbdbacklight:
1798 sony_nc_kbd_backlight_cleanup(sony_pf_device);
1799
1800 outsnc: 2678 outsnc:
2679 sony_nc_function_cleanup(sony_pf_device);
1801 sony_nc_handles_cleanup(sony_pf_device); 2680 sony_nc_handles_cleanup(sony_pf_device);
1802 2681
1803 outpresent: 2682 outpresent:
@@ -1820,11 +2699,10 @@ static int sony_nc_remove(struct acpi_device *device, int type)
1820 device_remove_file(&sony_pf_device->dev, &item->devattr); 2699 device_remove_file(&sony_pf_device->dev, &item->devattr);
1821 } 2700 }
1822 2701
1823 sony_nc_kbd_backlight_cleanup(sony_pf_device); 2702 sony_nc_function_cleanup(sony_pf_device);
1824 sony_nc_handles_cleanup(sony_pf_device); 2703 sony_nc_handles_cleanup(sony_pf_device);
1825 sony_pf_remove(); 2704 sony_pf_remove();
1826 sony_laptop_remove_input(); 2705 sony_laptop_remove_input();
1827 sony_nc_rfkill_cleanup();
1828 dprintk(SONY_NC_DRIVER_NAME " removed.\n"); 2706 dprintk(SONY_NC_DRIVER_NAME " removed.\n");
1829 2707
1830 return 0; 2708 return 0;
@@ -2437,7 +3315,9 @@ static ssize_t sony_pic_wwanpower_store(struct device *dev,
2437 if (count > 31) 3315 if (count > 31)
2438 return -EINVAL; 3316 return -EINVAL;
2439 3317
2440 value = simple_strtoul(buffer, NULL, 10); 3318 if (kstrtoul(buffer, 10, &value))
3319 return -EINVAL;
3320
2441 mutex_lock(&spic_dev.lock); 3321 mutex_lock(&spic_dev.lock);
2442 __sony_pic_set_wwanpower(value); 3322 __sony_pic_set_wwanpower(value);
2443 mutex_unlock(&spic_dev.lock); 3323 mutex_unlock(&spic_dev.lock);
@@ -2474,7 +3354,9 @@ static ssize_t sony_pic_bluetoothpower_store(struct device *dev,
2474 if (count > 31) 3354 if (count > 31)
2475 return -EINVAL; 3355 return -EINVAL;
2476 3356
2477 value = simple_strtoul(buffer, NULL, 10); 3357 if (kstrtoul(buffer, 10, &value))
3358 return -EINVAL;
3359
2478 mutex_lock(&spic_dev.lock); 3360 mutex_lock(&spic_dev.lock);
2479 __sony_pic_set_bluetoothpower(value); 3361 __sony_pic_set_bluetoothpower(value);
2480 mutex_unlock(&spic_dev.lock); 3362 mutex_unlock(&spic_dev.lock);
@@ -2513,7 +3395,9 @@ static ssize_t sony_pic_fanspeed_store(struct device *dev,
2513 if (count > 31) 3395 if (count > 31)
2514 return -EINVAL; 3396 return -EINVAL;
2515 3397
2516 value = simple_strtoul(buffer, NULL, 10); 3398 if (kstrtoul(buffer, 10, &value))
3399 return -EINVAL;
3400
2517 if (sony_pic_set_fanspeed(value)) 3401 if (sony_pic_set_fanspeed(value))
2518 return -EIO; 3402 return -EIO;
2519 3403
@@ -2671,7 +3555,8 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
2671 ret = -EIO; 3555 ret = -EIO;
2672 break; 3556 break;
2673 } 3557 }
2674 if (acpi_callgetfunc(sony_nc_acpi_handle, "GBRT", &value)) { 3558 if (sony_nc_int_call(sony_nc_acpi_handle, "GBRT", NULL,
3559 &value)) {
2675 ret = -EIO; 3560 ret = -EIO;
2676 break; 3561 break;
2677 } 3562 }
@@ -2688,8 +3573,9 @@ static long sonypi_misc_ioctl(struct file *fp, unsigned int cmd,
2688 ret = -EFAULT; 3573 ret = -EFAULT;
2689 break; 3574 break;
2690 } 3575 }
2691 if (acpi_callsetfunc(sony_nc_acpi_handle, "SBRT", 3576 value = (val8 >> 5) + 1;
2692 (val8 >> 5) + 1, NULL)) { 3577 if (sony_nc_int_call(sony_nc_acpi_handle, "SBRT", &value,
3578 NULL)) {
2693 ret = -EIO; 3579 ret = -EIO;
2694 break; 3580 break;
2695 } 3581 }
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index d68c0002f4a2..8b5610d88418 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -3402,7 +3402,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3402 /* Do not issue duplicate brightness change events to 3402 /* Do not issue duplicate brightness change events to
3403 * userspace. tpacpi_detect_brightness_capabilities() must have 3403 * userspace. tpacpi_detect_brightness_capabilities() must have
3404 * been called before this point */ 3404 * been called before this point */
3405 if (tp_features.bright_acpimode && acpi_video_backlight_support()) { 3405 if (acpi_video_backlight_support()) {
3406 pr_info("This ThinkPad has standard ACPI backlight " 3406 pr_info("This ThinkPad has standard ACPI backlight "
3407 "brightness control, supported by the ACPI " 3407 "brightness control, supported by the ACPI "
3408 "video driver\n"); 3408 "video driver\n");
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index 57787d87d9a4..dab10f6edcd4 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -95,6 +95,7 @@ MODULE_LICENSE("GPL");
95 95
96/* registers */ 96/* registers */
97#define HCI_FAN 0x0004 97#define HCI_FAN 0x0004
98#define HCI_TR_BACKLIGHT 0x0005
98#define HCI_SYSTEM_EVENT 0x0016 99#define HCI_SYSTEM_EVENT 0x0016
99#define HCI_VIDEO_OUT 0x001c 100#define HCI_VIDEO_OUT 0x001c
100#define HCI_HOTKEY_EVENT 0x001e 101#define HCI_HOTKEY_EVENT 0x001e
@@ -134,6 +135,7 @@ struct toshiba_acpi_dev {
134 unsigned int system_event_supported:1; 135 unsigned int system_event_supported:1;
135 unsigned int ntfy_supported:1; 136 unsigned int ntfy_supported:1;
136 unsigned int info_supported:1; 137 unsigned int info_supported:1;
138 unsigned int tr_backlight_supported:1;
137 139
138 struct mutex mutex; 140 struct mutex mutex;
139}; 141};
@@ -478,34 +480,70 @@ static const struct rfkill_ops toshiba_rfk_ops = {
478 .poll = bt_rfkill_poll, 480 .poll = bt_rfkill_poll,
479}; 481};
480 482
483static int get_tr_backlight_status(struct toshiba_acpi_dev *dev, bool *enabled)
484{
485 u32 hci_result;
486 u32 status;
487
488 hci_read1(dev, HCI_TR_BACKLIGHT, &status, &hci_result);
489 *enabled = !status;
490 return hci_result == HCI_SUCCESS ? 0 : -EIO;
491}
492
493static int set_tr_backlight_status(struct toshiba_acpi_dev *dev, bool enable)
494{
495 u32 hci_result;
496 u32 value = !enable;
497
498 hci_write1(dev, HCI_TR_BACKLIGHT, value, &hci_result);
499 return hci_result == HCI_SUCCESS ? 0 : -EIO;
500}
501
481static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ; 502static struct proc_dir_entry *toshiba_proc_dir /*= 0*/ ;
482 503
483static int get_lcd(struct backlight_device *bd) 504static int __get_lcd_brightness(struct toshiba_acpi_dev *dev)
484{ 505{
485 struct toshiba_acpi_dev *dev = bl_get_data(bd);
486 u32 hci_result; 506 u32 hci_result;
487 u32 value; 507 u32 value;
508 int brightness = 0;
509
510 if (dev->tr_backlight_supported) {
511 bool enabled;
512 int ret = get_tr_backlight_status(dev, &enabled);
513 if (ret)
514 return ret;
515 if (enabled)
516 return 0;
517 brightness++;
518 }
488 519
489 hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result); 520 hci_read1(dev, HCI_LCD_BRIGHTNESS, &value, &hci_result);
490 if (hci_result == HCI_SUCCESS) 521 if (hci_result == HCI_SUCCESS)
491 return (value >> HCI_LCD_BRIGHTNESS_SHIFT); 522 return brightness + (value >> HCI_LCD_BRIGHTNESS_SHIFT);
492 523
493 return -EIO; 524 return -EIO;
494} 525}
495 526
527static int get_lcd_brightness(struct backlight_device *bd)
528{
529 struct toshiba_acpi_dev *dev = bl_get_data(bd);
530 return __get_lcd_brightness(dev);
531}
532
496static int lcd_proc_show(struct seq_file *m, void *v) 533static int lcd_proc_show(struct seq_file *m, void *v)
497{ 534{
498 struct toshiba_acpi_dev *dev = m->private; 535 struct toshiba_acpi_dev *dev = m->private;
499 int value; 536 int value;
537 int levels;
500 538
501 if (!dev->backlight_dev) 539 if (!dev->backlight_dev)
502 return -ENODEV; 540 return -ENODEV;
503 541
504 value = get_lcd(dev->backlight_dev); 542 levels = dev->backlight_dev->props.max_brightness + 1;
543 value = get_lcd_brightness(dev->backlight_dev);
505 if (value >= 0) { 544 if (value >= 0) {
506 seq_printf(m, "brightness: %d\n", value); 545 seq_printf(m, "brightness: %d\n", value);
507 seq_printf(m, "brightness_levels: %d\n", 546 seq_printf(m, "brightness_levels: %d\n", levels);
508 HCI_LCD_BRIGHTNESS_LEVELS);
509 return 0; 547 return 0;
510 } 548 }
511 549
@@ -518,10 +556,19 @@ static int lcd_proc_open(struct inode *inode, struct file *file)
518 return single_open(file, lcd_proc_show, PDE(inode)->data); 556 return single_open(file, lcd_proc_show, PDE(inode)->data);
519} 557}
520 558
521static int set_lcd(struct toshiba_acpi_dev *dev, int value) 559static int set_lcd_brightness(struct toshiba_acpi_dev *dev, int value)
522{ 560{
523 u32 hci_result; 561 u32 hci_result;
524 562
563 if (dev->tr_backlight_supported) {
564 bool enable = !value;
565 int ret = set_tr_backlight_status(dev, enable);
566 if (ret)
567 return ret;
568 if (value)
569 value--;
570 }
571
525 value = value << HCI_LCD_BRIGHTNESS_SHIFT; 572 value = value << HCI_LCD_BRIGHTNESS_SHIFT;
526 hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result); 573 hci_write1(dev, HCI_LCD_BRIGHTNESS, value, &hci_result);
527 return hci_result == HCI_SUCCESS ? 0 : -EIO; 574 return hci_result == HCI_SUCCESS ? 0 : -EIO;
@@ -530,7 +577,7 @@ static int set_lcd(struct toshiba_acpi_dev *dev, int value)
530static int set_lcd_status(struct backlight_device *bd) 577static int set_lcd_status(struct backlight_device *bd)
531{ 578{
532 struct toshiba_acpi_dev *dev = bl_get_data(bd); 579 struct toshiba_acpi_dev *dev = bl_get_data(bd);
533 return set_lcd(dev, bd->props.brightness); 580 return set_lcd_brightness(dev, bd->props.brightness);
534} 581}
535 582
536static ssize_t lcd_proc_write(struct file *file, const char __user *buf, 583static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
@@ -541,6 +588,7 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
541 size_t len; 588 size_t len;
542 int value; 589 int value;
543 int ret; 590 int ret;
591 int levels = dev->backlight_dev->props.max_brightness + 1;
544 592
545 len = min(count, sizeof(cmd) - 1); 593 len = min(count, sizeof(cmd) - 1);
546 if (copy_from_user(cmd, buf, len)) 594 if (copy_from_user(cmd, buf, len))
@@ -548,8 +596,8 @@ static ssize_t lcd_proc_write(struct file *file, const char __user *buf,
548 cmd[len] = '\0'; 596 cmd[len] = '\0';
549 597
550 if (sscanf(cmd, " brightness : %i", &value) == 1 && 598 if (sscanf(cmd, " brightness : %i", &value) == 1 &&
551 value >= 0 && value < HCI_LCD_BRIGHTNESS_LEVELS) { 599 value >= 0 && value < levels) {
552 ret = set_lcd(dev, value); 600 ret = set_lcd_brightness(dev, value);
553 if (ret == 0) 601 if (ret == 0)
554 ret = count; 602 ret = count;
555 } else { 603 } else {
@@ -860,8 +908,9 @@ static void remove_toshiba_proc_entries(struct toshiba_acpi_dev *dev)
860} 908}
861 909
862static const struct backlight_ops toshiba_backlight_data = { 910static const struct backlight_ops toshiba_backlight_data = {
863 .get_brightness = get_lcd, 911 .options = BL_CORE_SUSPENDRESUME,
864 .update_status = set_lcd_status, 912 .get_brightness = get_lcd_brightness,
913 .update_status = set_lcd_status,
865}; 914};
866 915
867static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str, 916static bool toshiba_acpi_i8042_filter(unsigned char data, unsigned char str,
@@ -1020,6 +1069,56 @@ static int __devinit toshiba_acpi_setup_keyboard(struct toshiba_acpi_dev *dev)
1020 return error; 1069 return error;
1021} 1070}
1022 1071
1072static int __devinit toshiba_acpi_setup_backlight(struct toshiba_acpi_dev *dev)
1073{
1074 struct backlight_properties props;
1075 int brightness;
1076 int ret;
1077 bool enabled;
1078
1079 /*
1080 * Some machines don't support the backlight methods at all, and
1081 * others support it read-only. Either of these is pretty useless,
1082 * so only register the backlight device if the backlight method
1083 * supports both reads and writes.
1084 */
1085 brightness = __get_lcd_brightness(dev);
1086 if (brightness < 0)
1087 return 0;
1088 ret = set_lcd_brightness(dev, brightness);
1089 if (ret) {
1090 pr_debug("Backlight method is read-only, disabling backlight support\n");
1091 return 0;
1092 }
1093
1094 /* Determine whether or not BIOS supports transflective backlight */
1095 ret = get_tr_backlight_status(dev, &enabled);
1096 dev->tr_backlight_supported = !ret;
1097
1098 memset(&props, 0, sizeof(props));
1099 props.type = BACKLIGHT_PLATFORM;
1100 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
1101
1102 /* adding an extra level and having 0 change to transflective mode */
1103 if (dev->tr_backlight_supported)
1104 props.max_brightness++;
1105
1106 dev->backlight_dev = backlight_device_register("toshiba",
1107 &dev->acpi_dev->dev,
1108 dev,
1109 &toshiba_backlight_data,
1110 &props);
1111 if (IS_ERR(dev->backlight_dev)) {
1112 ret = PTR_ERR(dev->backlight_dev);
1113 pr_err("Could not register toshiba backlight device\n");
1114 dev->backlight_dev = NULL;
1115 return ret;
1116 }
1117
1118 dev->backlight_dev->props.brightness = brightness;
1119 return 0;
1120}
1121
1023static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type) 1122static int toshiba_acpi_remove(struct acpi_device *acpi_dev, int type)
1024{ 1123{
1025 struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev); 1124 struct toshiba_acpi_dev *dev = acpi_driver_data(acpi_dev);
@@ -1078,7 +1177,6 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
1078 u32 dummy; 1177 u32 dummy;
1079 bool bt_present; 1178 bool bt_present;
1080 int ret = 0; 1179 int ret = 0;
1081 struct backlight_properties props;
1082 1180
1083 if (toshiba_acpi) 1181 if (toshiba_acpi)
1084 return -EBUSY; 1182 return -EBUSY;
@@ -1104,22 +1202,9 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
1104 1202
1105 mutex_init(&dev->mutex); 1203 mutex_init(&dev->mutex);
1106 1204
1107 memset(&props, 0, sizeof(props)); 1205 ret = toshiba_acpi_setup_backlight(dev);
1108 props.type = BACKLIGHT_PLATFORM; 1206 if (ret)
1109 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
1110 dev->backlight_dev = backlight_device_register("toshiba",
1111 &acpi_dev->dev,
1112 dev,
1113 &toshiba_backlight_data,
1114 &props);
1115 if (IS_ERR(dev->backlight_dev)) {
1116 ret = PTR_ERR(dev->backlight_dev);
1117
1118 pr_err("Could not register toshiba backlight device\n");
1119 dev->backlight_dev = NULL;
1120 goto error; 1207 goto error;
1121 }
1122 dev->backlight_dev->props.brightness = get_lcd(dev->backlight_dev);
1123 1208
1124 /* Register rfkill switch for Bluetooth */ 1209 /* Register rfkill switch for Bluetooth */
1125 if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) { 1210 if (hci_get_bt_present(dev, &bt_present) == HCI_SUCCESS && bt_present) {
diff --git a/drivers/platform/x86/xo1-rfkill.c b/drivers/platform/x86/xo1-rfkill.c
index 41781ed8301c..b57ad8641480 100644
--- a/drivers/platform/x86/xo1-rfkill.c
+++ b/drivers/platform/x86/xo1-rfkill.c
@@ -15,15 +15,26 @@
15 15
16#include <asm/olpc.h> 16#include <asm/olpc.h>
17 17
18static bool card_blocked;
19
18static int rfkill_set_block(void *data, bool blocked) 20static int rfkill_set_block(void *data, bool blocked)
19{ 21{
20 unsigned char cmd; 22 unsigned char cmd;
23 int r;
24
25 if (blocked == card_blocked)
26 return 0;
27
21 if (blocked) 28 if (blocked)
22 cmd = EC_WLAN_ENTER_RESET; 29 cmd = EC_WLAN_ENTER_RESET;
23 else 30 else
24 cmd = EC_WLAN_LEAVE_RESET; 31 cmd = EC_WLAN_LEAVE_RESET;
25 32
26 return olpc_ec_cmd(cmd, NULL, 0, NULL, 0); 33 r = olpc_ec_cmd(cmd, NULL, 0, NULL, 0);
34 if (r == 0)
35 card_blocked = blocked;
36
37 return r;
27} 38}
28 39
29static const struct rfkill_ops rfkill_ops = { 40static const struct rfkill_ops rfkill_ops = {
diff --git a/drivers/power/Kconfig b/drivers/power/Kconfig
index 99dc29f2f2f2..e3a3b4956f08 100644
--- a/drivers/power/Kconfig
+++ b/drivers/power/Kconfig
@@ -1,5 +1,5 @@
1menuconfig POWER_SUPPLY 1menuconfig POWER_SUPPLY
2 tristate "Power supply class support" 2 bool "Power supply class support"
3 help 3 help
4 Say Y here to enable power supply class support. This allows 4 Say Y here to enable power supply class support. This allows
5 power supply (batteries, AC, USB) monitoring by userspace 5 power supply (batteries, AC, USB) monitoring by userspace
@@ -77,7 +77,7 @@ config BATTERY_DS2780
77 Say Y here to enable support for batteries with ds2780 chip. 77 Say Y here to enable support for batteries with ds2780 chip.
78 78
79config BATTERY_DS2781 79config BATTERY_DS2781
80 tristate "2781 battery driver" 80 tristate "DS2781 battery driver"
81 depends on HAS_IOMEM 81 depends on HAS_IOMEM
82 select W1 82 select W1
83 select W1_SLAVE_DS2781 83 select W1_SLAVE_DS2781
@@ -181,14 +181,15 @@ config BATTERY_MAX17040
181 to operate with a single lithium cell 181 to operate with a single lithium cell
182 182
183config BATTERY_MAX17042 183config BATTERY_MAX17042
184 tristate "Maxim MAX17042/8997/8966 Fuel Gauge" 184 tristate "Maxim MAX17042/17047/17050/8997/8966 Fuel Gauge"
185 depends on I2C 185 depends on I2C
186 help 186 help
187 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries 187 MAX17042 is fuel-gauge systems for lithium-ion (Li+) batteries
188 in handheld and portable equipment. The MAX17042 is configured 188 in handheld and portable equipment. The MAX17042 is configured
189 to operate with a single lithium cell. MAX8997 and MAX8966 are 189 to operate with a single lithium cell. MAX8997 and MAX8966 are
190 multi-function devices that include fuel gauages that are compatible 190 multi-function devices that include fuel gauages that are compatible
191 with MAX17042. 191 with MAX17042. This driver also supports max17047/50 chips which are
192 improved version of max17042.
192 193
193config BATTERY_Z2 194config BATTERY_Z2
194 tristate "Z2 battery driver" 195 tristate "Z2 battery driver"
@@ -291,6 +292,7 @@ config CHARGER_MAX8998
291config CHARGER_SMB347 292config CHARGER_SMB347
292 tristate "Summit Microelectronics SMB347 Battery Charger" 293 tristate "Summit Microelectronics SMB347 Battery Charger"
293 depends on I2C 294 depends on I2C
295 select REGMAP_I2C
294 help 296 help
295 Say Y to include support for Summit Microelectronics SMB347 297 Say Y to include support for Summit Microelectronics SMB347
296 Battery Charger. 298 Battery Charger.
diff --git a/drivers/power/ab8500_btemp.c b/drivers/power/ab8500_btemp.c
index d8bb99394ac0..bba3ccac72fe 100644
--- a/drivers/power/ab8500_btemp.c
+++ b/drivers/power/ab8500_btemp.c
@@ -964,10 +964,15 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
964{ 964{
965 int irq, i, ret = 0; 965 int irq, i, ret = 0;
966 u8 val; 966 u8 val;
967 struct abx500_bm_plat_data *plat_data; 967 struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
968 struct ab8500_btemp *di;
969
970 if (!plat_data) {
971 dev_err(&pdev->dev, "No platform data\n");
972 return -EINVAL;
973 }
968 974
969 struct ab8500_btemp *di = 975 di = kzalloc(sizeof(*di), GFP_KERNEL);
970 kzalloc(sizeof(struct ab8500_btemp), GFP_KERNEL);
971 if (!di) 976 if (!di)
972 return -ENOMEM; 977 return -ENOMEM;
973 978
@@ -977,7 +982,6 @@ static int __devinit ab8500_btemp_probe(struct platform_device *pdev)
977 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); 982 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
978 983
979 /* get btemp specific platform data */ 984 /* get btemp specific platform data */
980 plat_data = pdev->dev.platform_data;
981 di->pdata = plat_data->btemp; 985 di->pdata = plat_data->btemp;
982 if (!di->pdata) { 986 if (!di->pdata) {
983 dev_err(di->dev, "no btemp platform data supplied\n"); 987 dev_err(di->dev, "no btemp platform data supplied\n");
diff --git a/drivers/power/ab8500_charger.c b/drivers/power/ab8500_charger.c
index e2b4accbec88..d2303d0b7c75 100644
--- a/drivers/power/ab8500_charger.c
+++ b/drivers/power/ab8500_charger.c
@@ -2534,10 +2534,15 @@ static int __devexit ab8500_charger_remove(struct platform_device *pdev)
2534static int __devinit ab8500_charger_probe(struct platform_device *pdev) 2534static int __devinit ab8500_charger_probe(struct platform_device *pdev)
2535{ 2535{
2536 int irq, i, charger_status, ret = 0; 2536 int irq, i, charger_status, ret = 0;
2537 struct abx500_bm_plat_data *plat_data; 2537 struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
2538 struct ab8500_charger *di;
2538 2539
2539 struct ab8500_charger *di = 2540 if (!plat_data) {
2540 kzalloc(sizeof(struct ab8500_charger), GFP_KERNEL); 2541 dev_err(&pdev->dev, "No platform data\n");
2542 return -EINVAL;
2543 }
2544
2545 di = kzalloc(sizeof(*di), GFP_KERNEL);
2541 if (!di) 2546 if (!di)
2542 return -ENOMEM; 2547 return -ENOMEM;
2543 2548
@@ -2550,9 +2555,7 @@ static int __devinit ab8500_charger_probe(struct platform_device *pdev)
2550 spin_lock_init(&di->usb_state.usb_lock); 2555 spin_lock_init(&di->usb_state.usb_lock);
2551 2556
2552 /* get charger specific platform data */ 2557 /* get charger specific platform data */
2553 plat_data = pdev->dev.platform_data;
2554 di->pdata = plat_data->charger; 2558 di->pdata = plat_data->charger;
2555
2556 if (!di->pdata) { 2559 if (!di->pdata) {
2557 dev_err(di->dev, "no charger platform data supplied\n"); 2560 dev_err(di->dev, "no charger platform data supplied\n");
2558 ret = -EINVAL; 2561 ret = -EINVAL;
diff --git a/drivers/power/ab8500_fg.c b/drivers/power/ab8500_fg.c
index c22f2f05657e..bf022255994c 100644
--- a/drivers/power/ab8500_fg.c
+++ b/drivers/power/ab8500_fg.c
@@ -2446,10 +2446,15 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
2446{ 2446{
2447 int i, irq; 2447 int i, irq;
2448 int ret = 0; 2448 int ret = 0;
2449 struct abx500_bm_plat_data *plat_data; 2449 struct abx500_bm_plat_data *plat_data = pdev->dev.platform_data;
2450 struct ab8500_fg *di;
2451
2452 if (!plat_data) {
2453 dev_err(&pdev->dev, "No platform data\n");
2454 return -EINVAL;
2455 }
2450 2456
2451 struct ab8500_fg *di = 2457 di = kzalloc(sizeof(*di), GFP_KERNEL);
2452 kzalloc(sizeof(struct ab8500_fg), GFP_KERNEL);
2453 if (!di) 2458 if (!di)
2454 return -ENOMEM; 2459 return -ENOMEM;
2455 2460
@@ -2461,7 +2466,6 @@ static int __devinit ab8500_fg_probe(struct platform_device *pdev)
2461 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); 2466 di->gpadc = ab8500_gpadc_get("ab8500-gpadc.0");
2462 2467
2463 /* get fg specific platform data */ 2468 /* get fg specific platform data */
2464 plat_data = pdev->dev.platform_data;
2465 di->pdata = plat_data->fg; 2469 di->pdata = plat_data->fg;
2466 if (!di->pdata) { 2470 if (!di->pdata) {
2467 dev_err(di->dev, "no fg platform data supplied\n"); 2471 dev_err(di->dev, "no fg platform data supplied\n");
diff --git a/drivers/power/charger-manager.c b/drivers/power/charger-manager.c
index 9eca9f1ff0ea..86935ec18954 100644
--- a/drivers/power/charger-manager.c
+++ b/drivers/power/charger-manager.c
@@ -23,6 +23,16 @@
23#include <linux/power/charger-manager.h> 23#include <linux/power/charger-manager.h>
24#include <linux/regulator/consumer.h> 24#include <linux/regulator/consumer.h>
25 25
26static const char * const default_event_names[] = {
27 [CM_EVENT_UNKNOWN] = "Unknown",
28 [CM_EVENT_BATT_FULL] = "Battery Full",
29 [CM_EVENT_BATT_IN] = "Battery Inserted",
30 [CM_EVENT_BATT_OUT] = "Battery Pulled Out",
31 [CM_EVENT_EXT_PWR_IN_OUT] = "External Power Attach/Detach",
32 [CM_EVENT_CHG_START_STOP] = "Charging Start/Stop",
33 [CM_EVENT_OTHERS] = "Other battery events"
34};
35
26/* 36/*
27 * Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for 37 * Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for
28 * delayed works so that we can run delayed works with CM_JIFFIES_SMALL 38 * delayed works so that we can run delayed works with CM_JIFFIES_SMALL
@@ -57,6 +67,12 @@ static bool cm_suspended;
57static bool cm_rtc_set; 67static bool cm_rtc_set;
58static unsigned long cm_suspend_duration_ms; 68static unsigned long cm_suspend_duration_ms;
59 69
70/* About normal (not suspended) monitoring */
71static unsigned long polling_jiffy = ULONG_MAX; /* ULONG_MAX: no polling */
72static unsigned long next_polling; /* Next appointed polling time */
73static struct workqueue_struct *cm_wq; /* init at driver add */
74static struct delayed_work cm_monitor_work; /* init at driver add */
75
60/* Global charger-manager description */ 76/* Global charger-manager description */
61static struct charger_global_desc *g_desc; /* init with setup_charger_manager */ 77static struct charger_global_desc *g_desc; /* init with setup_charger_manager */
62 78
@@ -71,6 +87,11 @@ static bool is_batt_present(struct charger_manager *cm)
71 int i, ret; 87 int i, ret;
72 88
73 switch (cm->desc->battery_present) { 89 switch (cm->desc->battery_present) {
90 case CM_BATTERY_PRESENT:
91 present = true;
92 break;
93 case CM_NO_BATTERY:
94 break;
74 case CM_FUEL_GAUGE: 95 case CM_FUEL_GAUGE:
75 ret = cm->fuel_gauge->get_property(cm->fuel_gauge, 96 ret = cm->fuel_gauge->get_property(cm->fuel_gauge,
76 POWER_SUPPLY_PROP_PRESENT, &val); 97 POWER_SUPPLY_PROP_PRESENT, &val);
@@ -279,6 +300,26 @@ static int try_charger_enable(struct charger_manager *cm, bool enable)
279} 300}
280 301
281/** 302/**
303 * try_charger_restart - Restart charging.
304 * @cm: the Charger Manager representing the battery.
305 *
306 * Restart charging by turning off and on the charger.
307 */
308static int try_charger_restart(struct charger_manager *cm)
309{
310 int err;
311
312 if (cm->emergency_stop)
313 return -EAGAIN;
314
315 err = try_charger_enable(cm, false);
316 if (err)
317 return err;
318
319 return try_charger_enable(cm, true);
320}
321
322/**
282 * uevent_notify - Let users know something has changed. 323 * uevent_notify - Let users know something has changed.
283 * @cm: the Charger Manager representing the battery. 324 * @cm: the Charger Manager representing the battery.
284 * @event: the event string. 325 * @event: the event string.
@@ -334,6 +375,46 @@ static void uevent_notify(struct charger_manager *cm, const char *event)
334} 375}
335 376
336/** 377/**
378 * fullbatt_vchk - Check voltage drop some times after "FULL" event.
379 * @work: the work_struct appointing the function
380 *
381 * If a user has designated "fullbatt_vchkdrop_ms/uV" values with
382 * charger_desc, Charger Manager checks voltage drop after the battery
383 * "FULL" event. It checks whether the voltage has dropped more than
384 * fullbatt_vchkdrop_uV by calling this function after fullbatt_vchkrop_ms.
385 */
386static void fullbatt_vchk(struct work_struct *work)
387{
388 struct delayed_work *dwork = to_delayed_work(work);
389 struct charger_manager *cm = container_of(dwork,
390 struct charger_manager, fullbatt_vchk_work);
391 struct charger_desc *desc = cm->desc;
392 int batt_uV, err, diff;
393
394 /* remove the appointment for fullbatt_vchk */
395 cm->fullbatt_vchk_jiffies_at = 0;
396
397 if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
398 return;
399
400 err = get_batt_uV(cm, &batt_uV);
401 if (err) {
402 dev_err(cm->dev, "%s: get_batt_uV error(%d).\n", __func__, err);
403 return;
404 }
405
406 diff = cm->fullbatt_vchk_uV;
407 diff -= batt_uV;
408
409 dev_dbg(cm->dev, "VBATT dropped %duV after full-batt.\n", diff);
410
411 if (diff > desc->fullbatt_vchkdrop_uV) {
412 try_charger_restart(cm);
413 uevent_notify(cm, "Recharge");
414 }
415}
416
417/**
337 * _cm_monitor - Monitor the temperature and return true for exceptions. 418 * _cm_monitor - Monitor the temperature and return true for exceptions.
338 * @cm: the Charger Manager representing the battery. 419 * @cm: the Charger Manager representing the battery.
339 * 420 *
@@ -392,6 +473,131 @@ static bool cm_monitor(void)
392 return stop; 473 return stop;
393} 474}
394 475
476/**
477 * _setup_polling - Setup the next instance of polling.
478 * @work: work_struct of the function _setup_polling.
479 */
480static void _setup_polling(struct work_struct *work)
481{
482 unsigned long min = ULONG_MAX;
483 struct charger_manager *cm;
484 bool keep_polling = false;
485 unsigned long _next_polling;
486
487 mutex_lock(&cm_list_mtx);
488
489 list_for_each_entry(cm, &cm_list, entry) {
490 if (is_polling_required(cm) && cm->desc->polling_interval_ms) {
491 keep_polling = true;
492
493 if (min > cm->desc->polling_interval_ms)
494 min = cm->desc->polling_interval_ms;
495 }
496 }
497
498 polling_jiffy = msecs_to_jiffies(min);
499 if (polling_jiffy <= CM_JIFFIES_SMALL)
500 polling_jiffy = CM_JIFFIES_SMALL + 1;
501
502 if (!keep_polling)
503 polling_jiffy = ULONG_MAX;
504 if (polling_jiffy == ULONG_MAX)
505 goto out;
506
507 WARN(cm_wq == NULL, "charger-manager: workqueue not initialized"
508 ". try it later. %s\n", __func__);
509
510 _next_polling = jiffies + polling_jiffy;
511
512 if (!delayed_work_pending(&cm_monitor_work) ||
513 (delayed_work_pending(&cm_monitor_work) &&
514 time_after(next_polling, _next_polling))) {
515 cancel_delayed_work_sync(&cm_monitor_work);
516 next_polling = jiffies + polling_jiffy;
517 queue_delayed_work(cm_wq, &cm_monitor_work, polling_jiffy);
518 }
519
520out:
521 mutex_unlock(&cm_list_mtx);
522}
523static DECLARE_WORK(setup_polling, _setup_polling);
524
525/**
526 * cm_monitor_poller - The Monitor / Poller.
527 * @work: work_struct of the function cm_monitor_poller
528 *
529 * During non-suspended state, cm_monitor_poller is used to poll and monitor
530 * the batteries.
531 */
532static void cm_monitor_poller(struct work_struct *work)
533{
534 cm_monitor();
535 schedule_work(&setup_polling);
536}
537
538/**
539 * fullbatt_handler - Event handler for CM_EVENT_BATT_FULL
540 * @cm: the Charger Manager representing the battery.
541 */
542static void fullbatt_handler(struct charger_manager *cm)
543{
544 struct charger_desc *desc = cm->desc;
545
546 if (!desc->fullbatt_vchkdrop_uV || !desc->fullbatt_vchkdrop_ms)
547 goto out;
548
549 if (cm_suspended)
550 device_set_wakeup_capable(cm->dev, true);
551
552 if (delayed_work_pending(&cm->fullbatt_vchk_work))
553 cancel_delayed_work(&cm->fullbatt_vchk_work);
554 queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
555 msecs_to_jiffies(desc->fullbatt_vchkdrop_ms));
556 cm->fullbatt_vchk_jiffies_at = jiffies + msecs_to_jiffies(
557 desc->fullbatt_vchkdrop_ms);
558
559 if (cm->fullbatt_vchk_jiffies_at == 0)
560 cm->fullbatt_vchk_jiffies_at = 1;
561
562out:
563 dev_info(cm->dev, "EVENT_HANDLE: Battery Fully Charged.\n");
564 uevent_notify(cm, default_event_names[CM_EVENT_BATT_FULL]);
565}
566
567/**
568 * battout_handler - Event handler for CM_EVENT_BATT_OUT
569 * @cm: the Charger Manager representing the battery.
570 */
571static void battout_handler(struct charger_manager *cm)
572{
573 if (cm_suspended)
574 device_set_wakeup_capable(cm->dev, true);
575
576 if (!is_batt_present(cm)) {
577 dev_emerg(cm->dev, "Battery Pulled Out!\n");
578 uevent_notify(cm, default_event_names[CM_EVENT_BATT_OUT]);
579 } else {
580 uevent_notify(cm, "Battery Reinserted?");
581 }
582}
583
584/**
585 * misc_event_handler - Handler for other evnets
586 * @cm: the Charger Manager representing the battery.
587 * @type: the Charger Manager representing the battery.
588 */
589static void misc_event_handler(struct charger_manager *cm,
590 enum cm_event_types type)
591{
592 if (cm_suspended)
593 device_set_wakeup_capable(cm->dev, true);
594
595 if (!delayed_work_pending(&cm_monitor_work) &&
596 is_polling_required(cm) && cm->desc->polling_interval_ms)
597 schedule_work(&setup_polling);
598 uevent_notify(cm, default_event_names[type]);
599}
600
395static int charger_get_property(struct power_supply *psy, 601static int charger_get_property(struct power_supply *psy,
396 enum power_supply_property psp, 602 enum power_supply_property psp,
397 union power_supply_propval *val) 603 union power_supply_propval *val)
@@ -613,6 +819,21 @@ static bool cm_setup_timer(void)
613 mutex_lock(&cm_list_mtx); 819 mutex_lock(&cm_list_mtx);
614 820
615 list_for_each_entry(cm, &cm_list, entry) { 821 list_for_each_entry(cm, &cm_list, entry) {
822 unsigned int fbchk_ms = 0;
823
824 /* fullbatt_vchk is required. setup timer for that */
825 if (cm->fullbatt_vchk_jiffies_at) {
826 fbchk_ms = jiffies_to_msecs(cm->fullbatt_vchk_jiffies_at
827 - jiffies);
828 if (time_is_before_eq_jiffies(
829 cm->fullbatt_vchk_jiffies_at) ||
830 msecs_to_jiffies(fbchk_ms) < CM_JIFFIES_SMALL) {
831 fullbatt_vchk(&cm->fullbatt_vchk_work.work);
832 fbchk_ms = 0;
833 }
834 }
835 CM_MIN_VALID(wakeup_ms, fbchk_ms);
836
616 /* Skip if polling is not required for this CM */ 837 /* Skip if polling is not required for this CM */
617 if (!is_polling_required(cm) && !cm->emergency_stop) 838 if (!is_polling_required(cm) && !cm->emergency_stop)
618 continue; 839 continue;
@@ -672,6 +893,23 @@ static bool cm_setup_timer(void)
672 return false; 893 return false;
673} 894}
674 895
896static void _cm_fbchk_in_suspend(struct charger_manager *cm)
897{
898 unsigned long jiffy_now = jiffies;
899
900 if (!cm->fullbatt_vchk_jiffies_at)
901 return;
902
903 if (g_desc && g_desc->assume_timer_stops_in_suspend)
904 jiffy_now += msecs_to_jiffies(cm_suspend_duration_ms);
905
906 /* Execute now if it's going to be executed not too long after */
907 jiffy_now += CM_JIFFIES_SMALL;
908
909 if (time_after_eq(jiffy_now, cm->fullbatt_vchk_jiffies_at))
910 fullbatt_vchk(&cm->fullbatt_vchk_work.work);
911}
912
675/** 913/**
676 * cm_suspend_again - Determine whether suspend again or not 914 * cm_suspend_again - Determine whether suspend again or not
677 * 915 *
@@ -693,6 +931,8 @@ bool cm_suspend_again(void)
693 ret = true; 931 ret = true;
694 mutex_lock(&cm_list_mtx); 932 mutex_lock(&cm_list_mtx);
695 list_for_each_entry(cm, &cm_list, entry) { 933 list_for_each_entry(cm, &cm_list, entry) {
934 _cm_fbchk_in_suspend(cm);
935
696 if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) || 936 if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) ||
697 cm->status_save_batt != is_batt_present(cm)) { 937 cm->status_save_batt != is_batt_present(cm)) {
698 ret = false; 938 ret = false;
@@ -796,6 +1036,21 @@ static int charger_manager_probe(struct platform_device *pdev)
796 memcpy(cm->desc, desc, sizeof(struct charger_desc)); 1036 memcpy(cm->desc, desc, sizeof(struct charger_desc));
797 cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */ 1037 cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */
798 1038
1039 /*
1040 * The following two do not need to be errors.
1041 * Users may intentionally ignore those two features.
1042 */
1043 if (desc->fullbatt_uV == 0) {
1044 dev_info(&pdev->dev, "Ignoring full-battery voltage threshold"
1045 " as it is not supplied.");
1046 }
1047 if (!desc->fullbatt_vchkdrop_ms || !desc->fullbatt_vchkdrop_uV) {
1048 dev_info(&pdev->dev, "Disabling full-battery voltage drop "
1049 "checking mechanism as it is not supplied.");
1050 desc->fullbatt_vchkdrop_ms = 0;
1051 desc->fullbatt_vchkdrop_uV = 0;
1052 }
1053
799 if (!desc->charger_regulators || desc->num_charger_regulators < 1) { 1054 if (!desc->charger_regulators || desc->num_charger_regulators < 1) {
800 ret = -EINVAL; 1055 ret = -EINVAL;
801 dev_err(&pdev->dev, "charger_regulators undefined.\n"); 1056 dev_err(&pdev->dev, "charger_regulators undefined.\n");
@@ -903,6 +1158,8 @@ static int charger_manager_probe(struct platform_device *pdev)
903 cm->charger_psy.num_properties++; 1158 cm->charger_psy.num_properties++;
904 } 1159 }
905 1160
1161 INIT_DELAYED_WORK(&cm->fullbatt_vchk_work, fullbatt_vchk);
1162
906 ret = power_supply_register(NULL, &cm->charger_psy); 1163 ret = power_supply_register(NULL, &cm->charger_psy);
907 if (ret) { 1164 if (ret) {
908 dev_err(&pdev->dev, "Cannot register charger-manager with" 1165 dev_err(&pdev->dev, "Cannot register charger-manager with"
@@ -928,6 +1185,15 @@ static int charger_manager_probe(struct platform_device *pdev)
928 list_add(&cm->entry, &cm_list); 1185 list_add(&cm->entry, &cm_list);
929 mutex_unlock(&cm_list_mtx); 1186 mutex_unlock(&cm_list_mtx);
930 1187
1188 /*
1189 * Charger-manager is capable of waking up the systme from sleep
1190 * when event is happend through cm_notify_event()
1191 */
1192 device_init_wakeup(&pdev->dev, true);
1193 device_set_wakeup_capable(&pdev->dev, false);
1194
1195 schedule_work(&setup_polling);
1196
931 return 0; 1197 return 0;
932 1198
933err_chg_enable: 1199err_chg_enable:
@@ -958,9 +1224,17 @@ static int __devexit charger_manager_remove(struct platform_device *pdev)
958 list_del(&cm->entry); 1224 list_del(&cm->entry);
959 mutex_unlock(&cm_list_mtx); 1225 mutex_unlock(&cm_list_mtx);
960 1226
1227 if (work_pending(&setup_polling))
1228 cancel_work_sync(&setup_polling);
1229 if (delayed_work_pending(&cm_monitor_work))
1230 cancel_delayed_work_sync(&cm_monitor_work);
1231
961 regulator_bulk_free(desc->num_charger_regulators, 1232 regulator_bulk_free(desc->num_charger_regulators,
962 desc->charger_regulators); 1233 desc->charger_regulators);
963 power_supply_unregister(&cm->charger_psy); 1234 power_supply_unregister(&cm->charger_psy);
1235
1236 try_charger_enable(cm, false);
1237
964 kfree(cm->charger_psy.properties); 1238 kfree(cm->charger_psy.properties);
965 kfree(cm->charger_stat); 1239 kfree(cm->charger_stat);
966 kfree(cm->desc); 1240 kfree(cm->desc);
@@ -975,6 +1249,18 @@ static const struct platform_device_id charger_manager_id[] = {
975}; 1249};
976MODULE_DEVICE_TABLE(platform, charger_manager_id); 1250MODULE_DEVICE_TABLE(platform, charger_manager_id);
977 1251
1252static int cm_suspend_noirq(struct device *dev)
1253{
1254 int ret = 0;
1255
1256 if (device_may_wakeup(dev)) {
1257 device_set_wakeup_capable(dev, false);
1258 ret = -EAGAIN;
1259 }
1260
1261 return ret;
1262}
1263
978static int cm_suspend_prepare(struct device *dev) 1264static int cm_suspend_prepare(struct device *dev)
979{ 1265{
980 struct charger_manager *cm = dev_get_drvdata(dev); 1266 struct charger_manager *cm = dev_get_drvdata(dev);
@@ -1000,6 +1286,8 @@ static int cm_suspend_prepare(struct device *dev)
1000 cm_suspended = true; 1286 cm_suspended = true;
1001 } 1287 }
1002 1288
1289 if (delayed_work_pending(&cm->fullbatt_vchk_work))
1290 cancel_delayed_work(&cm->fullbatt_vchk_work);
1003 cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm); 1291 cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm);
1004 cm->status_save_batt = is_batt_present(cm); 1292 cm->status_save_batt = is_batt_present(cm);
1005 1293
@@ -1027,11 +1315,40 @@ static void cm_suspend_complete(struct device *dev)
1027 cm_rtc_set = false; 1315 cm_rtc_set = false;
1028 } 1316 }
1029 1317
1318 /* Re-enqueue delayed work (fullbatt_vchk_work) */
1319 if (cm->fullbatt_vchk_jiffies_at) {
1320 unsigned long delay = 0;
1321 unsigned long now = jiffies + CM_JIFFIES_SMALL;
1322
1323 if (time_after_eq(now, cm->fullbatt_vchk_jiffies_at)) {
1324 delay = (unsigned long)((long)now
1325 - (long)(cm->fullbatt_vchk_jiffies_at));
1326 delay = jiffies_to_msecs(delay);
1327 } else {
1328 delay = 0;
1329 }
1330
1331 /*
1332 * Account for cm_suspend_duration_ms if
1333 * assume_timer_stops_in_suspend is active
1334 */
1335 if (g_desc && g_desc->assume_timer_stops_in_suspend) {
1336 if (delay > cm_suspend_duration_ms)
1337 delay -= cm_suspend_duration_ms;
1338 else
1339 delay = 0;
1340 }
1341
1342 queue_delayed_work(cm_wq, &cm->fullbatt_vchk_work,
1343 msecs_to_jiffies(delay));
1344 }
1345 device_set_wakeup_capable(cm->dev, false);
1030 uevent_notify(cm, NULL); 1346 uevent_notify(cm, NULL);
1031} 1347}
1032 1348
1033static const struct dev_pm_ops charger_manager_pm = { 1349static const struct dev_pm_ops charger_manager_pm = {
1034 .prepare = cm_suspend_prepare, 1350 .prepare = cm_suspend_prepare,
1351 .suspend_noirq = cm_suspend_noirq,
1035 .complete = cm_suspend_complete, 1352 .complete = cm_suspend_complete,
1036}; 1353};
1037 1354
@@ -1048,16 +1365,91 @@ static struct platform_driver charger_manager_driver = {
1048 1365
1049static int __init charger_manager_init(void) 1366static int __init charger_manager_init(void)
1050{ 1367{
1368 cm_wq = create_freezable_workqueue("charger_manager");
1369 INIT_DELAYED_WORK(&cm_monitor_work, cm_monitor_poller);
1370
1051 return platform_driver_register(&charger_manager_driver); 1371 return platform_driver_register(&charger_manager_driver);
1052} 1372}
1053late_initcall(charger_manager_init); 1373late_initcall(charger_manager_init);
1054 1374
1055static void __exit charger_manager_cleanup(void) 1375static void __exit charger_manager_cleanup(void)
1056{ 1376{
1377 destroy_workqueue(cm_wq);
1378 cm_wq = NULL;
1379
1057 platform_driver_unregister(&charger_manager_driver); 1380 platform_driver_unregister(&charger_manager_driver);
1058} 1381}
1059module_exit(charger_manager_cleanup); 1382module_exit(charger_manager_cleanup);
1060 1383
1384/**
1385 * find_power_supply - find the associated power_supply of charger
1386 * @cm: the Charger Manager representing the battery
1387 * @psy: pointer to instance of charger's power_supply
1388 */
1389static bool find_power_supply(struct charger_manager *cm,
1390 struct power_supply *psy)
1391{
1392 int i;
1393 bool found = false;
1394
1395 for (i = 0; cm->charger_stat[i]; i++) {
1396 if (psy == cm->charger_stat[i]) {
1397 found = true;
1398 break;
1399 }
1400 }
1401
1402 return found;
1403}
1404
1405/**
1406 * cm_notify_event - charger driver notify Charger Manager of charger event
1407 * @psy: pointer to instance of charger's power_supply
1408 * @type: type of charger event
1409 * @msg: optional message passed to uevent_notify fuction
1410 */
1411void cm_notify_event(struct power_supply *psy, enum cm_event_types type,
1412 char *msg)
1413{
1414 struct charger_manager *cm;
1415 bool found_power_supply = false;
1416
1417 if (psy == NULL)
1418 return;
1419
1420 mutex_lock(&cm_list_mtx);
1421 list_for_each_entry(cm, &cm_list, entry) {
1422 found_power_supply = find_power_supply(cm, psy);
1423 if (found_power_supply)
1424 break;
1425 }
1426 mutex_unlock(&cm_list_mtx);
1427
1428 if (!found_power_supply)
1429 return;
1430
1431 switch (type) {
1432 case CM_EVENT_BATT_FULL:
1433 fullbatt_handler(cm);
1434 break;
1435 case CM_EVENT_BATT_OUT:
1436 battout_handler(cm);
1437 break;
1438 case CM_EVENT_BATT_IN:
1439 case CM_EVENT_EXT_PWR_IN_OUT ... CM_EVENT_CHG_START_STOP:
1440 misc_event_handler(cm, type);
1441 break;
1442 case CM_EVENT_UNKNOWN:
1443 case CM_EVENT_OTHERS:
1444 uevent_notify(cm, msg ? msg : default_event_names[type]);
1445 break;
1446 default:
1447 dev_err(cm->dev, "%s type not specified.\n", __func__);
1448 break;
1449 }
1450}
1451EXPORT_SYMBOL_GPL(cm_notify_event);
1452
1061MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); 1453MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
1062MODULE_DESCRIPTION("Charger Manager"); 1454MODULE_DESCRIPTION("Charger Manager");
1063MODULE_LICENSE("GPL"); 1455MODULE_LICENSE("GPL");
diff --git a/drivers/power/ds2781_battery.c b/drivers/power/ds2781_battery.c
index ca0d653d0a7a..975684a40f15 100644
--- a/drivers/power/ds2781_battery.c
+++ b/drivers/power/ds2781_battery.c
@@ -643,9 +643,7 @@ static ssize_t ds2781_read_param_eeprom_bin(struct file *filp,
643 struct power_supply *psy = to_power_supply(dev); 643 struct power_supply *psy = to_power_supply(dev);
644 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); 644 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
645 645
646 count = min_t(loff_t, count, 646 count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
647 DS2781_EEPROM_BLOCK1_END -
648 DS2781_EEPROM_BLOCK1_START + 1 - off);
649 647
650 return ds2781_read_block(dev_info, buf, 648 return ds2781_read_block(dev_info, buf,
651 DS2781_EEPROM_BLOCK1_START + off, count); 649 DS2781_EEPROM_BLOCK1_START + off, count);
@@ -661,9 +659,7 @@ static ssize_t ds2781_write_param_eeprom_bin(struct file *filp,
661 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); 659 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
662 int ret; 660 int ret;
663 661
664 count = min_t(loff_t, count, 662 count = min_t(loff_t, count, DS2781_PARAM_EEPROM_SIZE - off);
665 DS2781_EEPROM_BLOCK1_END -
666 DS2781_EEPROM_BLOCK1_START + 1 - off);
667 663
668 ret = ds2781_write(dev_info, buf, 664 ret = ds2781_write(dev_info, buf,
669 DS2781_EEPROM_BLOCK1_START + off, count); 665 DS2781_EEPROM_BLOCK1_START + off, count);
@@ -682,7 +678,7 @@ static struct bin_attribute ds2781_param_eeprom_bin_attr = {
682 .name = "param_eeprom", 678 .name = "param_eeprom",
683 .mode = S_IRUGO | S_IWUSR, 679 .mode = S_IRUGO | S_IWUSR,
684 }, 680 },
685 .size = DS2781_EEPROM_BLOCK1_END - DS2781_EEPROM_BLOCK1_START + 1, 681 .size = DS2781_PARAM_EEPROM_SIZE,
686 .read = ds2781_read_param_eeprom_bin, 682 .read = ds2781_read_param_eeprom_bin,
687 .write = ds2781_write_param_eeprom_bin, 683 .write = ds2781_write_param_eeprom_bin,
688}; 684};
@@ -696,9 +692,7 @@ static ssize_t ds2781_read_user_eeprom_bin(struct file *filp,
696 struct power_supply *psy = to_power_supply(dev); 692 struct power_supply *psy = to_power_supply(dev);
697 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); 693 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
698 694
699 count = min_t(loff_t, count, 695 count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
700 DS2781_EEPROM_BLOCK0_END -
701 DS2781_EEPROM_BLOCK0_START + 1 - off);
702 696
703 return ds2781_read_block(dev_info, buf, 697 return ds2781_read_block(dev_info, buf,
704 DS2781_EEPROM_BLOCK0_START + off, count); 698 DS2781_EEPROM_BLOCK0_START + off, count);
@@ -715,9 +709,7 @@ static ssize_t ds2781_write_user_eeprom_bin(struct file *filp,
715 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); 709 struct ds2781_device_info *dev_info = to_ds2781_device_info(psy);
716 int ret; 710 int ret;
717 711
718 count = min_t(loff_t, count, 712 count = min_t(loff_t, count, DS2781_USER_EEPROM_SIZE - off);
719 DS2781_EEPROM_BLOCK0_END -
720 DS2781_EEPROM_BLOCK0_START + 1 - off);
721 713
722 ret = ds2781_write(dev_info, buf, 714 ret = ds2781_write(dev_info, buf,
723 DS2781_EEPROM_BLOCK0_START + off, count); 715 DS2781_EEPROM_BLOCK0_START + off, count);
@@ -736,7 +728,7 @@ static struct bin_attribute ds2781_user_eeprom_bin_attr = {
736 .name = "user_eeprom", 728 .name = "user_eeprom",
737 .mode = S_IRUGO | S_IWUSR, 729 .mode = S_IRUGO | S_IWUSR,
738 }, 730 },
739 .size = DS2781_EEPROM_BLOCK0_END - DS2781_EEPROM_BLOCK0_START + 1, 731 .size = DS2781_USER_EEPROM_SIZE,
740 .read = ds2781_read_user_eeprom_bin, 732 .read = ds2781_read_user_eeprom_bin,
741 .write = ds2781_write_user_eeprom_bin, 733 .write = ds2781_write_user_eeprom_bin,
742}; 734};
diff --git a/drivers/power/isp1704_charger.c b/drivers/power/isp1704_charger.c
index 39eb50f35f09..e5ccd2979773 100644
--- a/drivers/power/isp1704_charger.c
+++ b/drivers/power/isp1704_charger.c
@@ -474,13 +474,13 @@ static int __devinit isp1704_charger_probe(struct platform_device *pdev)
474fail2: 474fail2:
475 power_supply_unregister(&isp->psy); 475 power_supply_unregister(&isp->psy);
476fail1: 476fail1:
477 isp1704_charger_set_power(isp, 0);
477 usb_put_transceiver(isp->phy); 478 usb_put_transceiver(isp->phy);
478fail0: 479fail0:
479 kfree(isp); 480 kfree(isp);
480 481
481 dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret); 482 dev_err(&pdev->dev, "failed to register isp1704 with error %d\n", ret);
482 483
483 isp1704_charger_set_power(isp, 0);
484 return ret; 484 return ret;
485} 485}
486 486
diff --git a/drivers/power/max17042_battery.c b/drivers/power/max17042_battery.c
index 04620c2cb388..140788b309f8 100644
--- a/drivers/power/max17042_battery.c
+++ b/drivers/power/max17042_battery.c
@@ -28,6 +28,7 @@
28#include <linux/i2c.h> 28#include <linux/i2c.h>
29#include <linux/delay.h> 29#include <linux/delay.h>
30#include <linux/interrupt.h> 30#include <linux/interrupt.h>
31#include <linux/pm.h>
31#include <linux/mod_devicetable.h> 32#include <linux/mod_devicetable.h>
32#include <linux/power_supply.h> 33#include <linux/power_supply.h>
33#include <linux/power/max17042_battery.h> 34#include <linux/power/max17042_battery.h>
@@ -61,9 +62,13 @@
61#define dP_ACC_100 0x1900 62#define dP_ACC_100 0x1900
62#define dP_ACC_200 0x3200 63#define dP_ACC_200 0x3200
63 64
65#define MAX17042_IC_VERSION 0x0092
66#define MAX17047_IC_VERSION 0x00AC /* same for max17050 */
67
64struct max17042_chip { 68struct max17042_chip {
65 struct i2c_client *client; 69 struct i2c_client *client;
66 struct power_supply battery; 70 struct power_supply battery;
71 enum max170xx_chip_type chip_type;
67 struct max17042_platform_data *pdata; 72 struct max17042_platform_data *pdata;
68 struct work_struct work; 73 struct work_struct work;
69 int init_complete; 74 int init_complete;
@@ -105,6 +110,7 @@ static enum power_supply_property max17042_battery_props[] = {
105 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 110 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
106 POWER_SUPPLY_PROP_VOLTAGE_NOW, 111 POWER_SUPPLY_PROP_VOLTAGE_NOW,
107 POWER_SUPPLY_PROP_VOLTAGE_AVG, 112 POWER_SUPPLY_PROP_VOLTAGE_AVG,
113 POWER_SUPPLY_PROP_VOLTAGE_OCV,
108 POWER_SUPPLY_PROP_CAPACITY, 114 POWER_SUPPLY_PROP_CAPACITY,
109 POWER_SUPPLY_PROP_CHARGE_FULL, 115 POWER_SUPPLY_PROP_CHARGE_FULL,
110 POWER_SUPPLY_PROP_TEMP, 116 POWER_SUPPLY_PROP_TEMP,
@@ -150,7 +156,10 @@ static int max17042_get_property(struct power_supply *psy,
150 val->intval *= 20000; /* Units of LSB = 20mV */ 156 val->intval *= 20000; /* Units of LSB = 20mV */
151 break; 157 break;
152 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: 158 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
153 ret = max17042_read_reg(chip->client, MAX17042_V_empty); 159 if (chip->chip_type == MAX17042)
160 ret = max17042_read_reg(chip->client, MAX17042_V_empty);
161 else
162 ret = max17042_read_reg(chip->client, MAX17047_V_empty);
154 if (ret < 0) 163 if (ret < 0)
155 return ret; 164 return ret;
156 165
@@ -171,6 +180,13 @@ static int max17042_get_property(struct power_supply *psy,
171 180
172 val->intval = ret * 625 / 8; 181 val->intval = ret * 625 / 8;
173 break; 182 break;
183 case POWER_SUPPLY_PROP_VOLTAGE_OCV:
184 ret = max17042_read_reg(chip->client, MAX17042_OCVInternal);
185 if (ret < 0)
186 return ret;
187
188 val->intval = ret * 625 / 8;
189 break;
174 case POWER_SUPPLY_PROP_CAPACITY: 190 case POWER_SUPPLY_PROP_CAPACITY:
175 ret = max17042_read_reg(chip->client, MAX17042_RepSOC); 191 ret = max17042_read_reg(chip->client, MAX17042_RepSOC);
176 if (ret < 0) 192 if (ret < 0)
@@ -325,11 +341,10 @@ static inline int max17042_model_data_compare(struct max17042_chip *chip,
325static int max17042_init_model(struct max17042_chip *chip) 341static int max17042_init_model(struct max17042_chip *chip)
326{ 342{
327 int ret; 343 int ret;
328 int table_size = 344 int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
329 sizeof(chip->pdata->config_data->cell_char_tbl)/sizeof(u16);
330 u16 *temp_data; 345 u16 *temp_data;
331 346
332 temp_data = kzalloc(table_size, GFP_KERNEL); 347 temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
333 if (!temp_data) 348 if (!temp_data)
334 return -ENOMEM; 349 return -ENOMEM;
335 350
@@ -354,12 +369,11 @@ static int max17042_init_model(struct max17042_chip *chip)
354static int max17042_verify_model_lock(struct max17042_chip *chip) 369static int max17042_verify_model_lock(struct max17042_chip *chip)
355{ 370{
356 int i; 371 int i;
357 int table_size = 372 int table_size = ARRAY_SIZE(chip->pdata->config_data->cell_char_tbl);
358 sizeof(chip->pdata->config_data->cell_char_tbl);
359 u16 *temp_data; 373 u16 *temp_data;
360 int ret = 0; 374 int ret = 0;
361 375
362 temp_data = kzalloc(table_size, GFP_KERNEL); 376 temp_data = kcalloc(table_size, sizeof(*temp_data), GFP_KERNEL);
363 if (!temp_data) 377 if (!temp_data)
364 return -ENOMEM; 378 return -ENOMEM;
365 379
@@ -382,6 +396,9 @@ static void max17042_write_config_regs(struct max17042_chip *chip)
382 max17042_write_reg(chip->client, MAX17042_FilterCFG, 396 max17042_write_reg(chip->client, MAX17042_FilterCFG,
383 config->filter_cfg); 397 config->filter_cfg);
384 max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg); 398 max17042_write_reg(chip->client, MAX17042_RelaxCFG, config->relax_cfg);
399 if (chip->chip_type == MAX17047)
400 max17042_write_reg(chip->client, MAX17047_FullSOCThr,
401 config->full_soc_thresh);
385} 402}
386 403
387static void max17042_write_custom_regs(struct max17042_chip *chip) 404static void max17042_write_custom_regs(struct max17042_chip *chip)
@@ -392,12 +409,23 @@ static void max17042_write_custom_regs(struct max17042_chip *chip)
392 config->rcomp0); 409 config->rcomp0);
393 max17042_write_verify_reg(chip->client, MAX17042_TempCo, 410 max17042_write_verify_reg(chip->client, MAX17042_TempCo,
394 config->tcompc0); 411 config->tcompc0);
395 max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
396 config->empty_tempco);
397 max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
398 config->kempty0);
399 max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm, 412 max17042_write_verify_reg(chip->client, MAX17042_ICHGTerm,
400 config->ichgt_term); 413 config->ichgt_term);
414 if (chip->chip_type == MAX17042) {
415 max17042_write_reg(chip->client, MAX17042_EmptyTempCo,
416 config->empty_tempco);
417 max17042_write_verify_reg(chip->client, MAX17042_K_empty0,
418 config->kempty0);
419 } else {
420 max17042_write_verify_reg(chip->client, MAX17047_QRTbl00,
421 config->qrtbl00);
422 max17042_write_verify_reg(chip->client, MAX17047_QRTbl10,
423 config->qrtbl10);
424 max17042_write_verify_reg(chip->client, MAX17047_QRTbl20,
425 config->qrtbl20);
426 max17042_write_verify_reg(chip->client, MAX17047_QRTbl30,
427 config->qrtbl30);
428 }
401} 429}
402 430
403static void max17042_update_capacity_regs(struct max17042_chip *chip) 431static void max17042_update_capacity_regs(struct max17042_chip *chip)
@@ -453,6 +481,8 @@ static void max17042_load_new_capacity_params(struct max17042_chip *chip)
453 config->design_cap); 481 config->design_cap);
454 max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom, 482 max17042_write_verify_reg(chip->client, MAX17042_FullCAPNom,
455 config->fullcapnom); 483 config->fullcapnom);
484 /* Update SOC register with new SOC */
485 max17042_write_reg(chip->client, MAX17042_RepSOC, vfSoc);
456} 486}
457 487
458/* 488/*
@@ -489,20 +519,28 @@ static inline void max17042_override_por_values(struct max17042_chip *chip)
489 519
490 max17042_override_por(client, MAX17042_FullCAP, config->fullcap); 520 max17042_override_por(client, MAX17042_FullCAP, config->fullcap);
491 max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom); 521 max17042_override_por(client, MAX17042_FullCAPNom, config->fullcapnom);
492 max17042_override_por(client, MAX17042_SOC_empty, config->socempty); 522 if (chip->chip_type == MAX17042)
523 max17042_override_por(client, MAX17042_SOC_empty,
524 config->socempty);
493 max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty); 525 max17042_override_por(client, MAX17042_LAvg_empty, config->lavg_empty);
494 max17042_override_por(client, MAX17042_dQacc, config->dqacc); 526 max17042_override_por(client, MAX17042_dQacc, config->dqacc);
495 max17042_override_por(client, MAX17042_dPacc, config->dpacc); 527 max17042_override_por(client, MAX17042_dPacc, config->dpacc);
496 528
497 max17042_override_por(client, MAX17042_V_empty, config->vempty); 529 if (chip->chip_type == MAX17042)
530 max17042_override_por(client, MAX17042_V_empty, config->vempty);
531 else
532 max17042_override_por(client, MAX17047_V_empty, config->vempty);
498 max17042_override_por(client, MAX17042_TempNom, config->temp_nom); 533 max17042_override_por(client, MAX17042_TempNom, config->temp_nom);
499 max17042_override_por(client, MAX17042_TempLim, config->temp_lim); 534 max17042_override_por(client, MAX17042_TempLim, config->temp_lim);
500 max17042_override_por(client, MAX17042_FCTC, config->fctc); 535 max17042_override_por(client, MAX17042_FCTC, config->fctc);
501 max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0); 536 max17042_override_por(client, MAX17042_RCOMP0, config->rcomp0);
502 max17042_override_por(client, MAX17042_TempCo, config->tcompc0); 537 max17042_override_por(client, MAX17042_TempCo, config->tcompc0);
503 max17042_override_por(client, MAX17042_EmptyTempCo, 538 if (chip->chip_type) {
504 config->empty_tempco); 539 max17042_override_por(client, MAX17042_EmptyTempCo,
505 max17042_override_por(client, MAX17042_K_empty0, config->kempty0); 540 config->empty_tempco);
541 max17042_override_por(client, MAX17042_K_empty0,
542 config->kempty0);
543 }
506} 544}
507 545
508static int max17042_init_chip(struct max17042_chip *chip) 546static int max17042_init_chip(struct max17042_chip *chip)
@@ -659,7 +697,19 @@ static int __devinit max17042_probe(struct i2c_client *client,
659 697
660 i2c_set_clientdata(client, chip); 698 i2c_set_clientdata(client, chip);
661 699
662 chip->battery.name = "max17042_battery"; 700 ret = max17042_read_reg(chip->client, MAX17042_DevName);
701 if (ret == MAX17042_IC_VERSION) {
702 dev_dbg(&client->dev, "chip type max17042 detected\n");
703 chip->chip_type = MAX17042;
704 } else if (ret == MAX17047_IC_VERSION) {
705 dev_dbg(&client->dev, "chip type max17047/50 detected\n");
706 chip->chip_type = MAX17047;
707 } else {
708 dev_err(&client->dev, "device version mismatch: %x\n", ret);
709 return -EIO;
710 }
711
712 chip->battery.name = "max170xx_battery";
663 chip->battery.type = POWER_SUPPLY_TYPE_BATTERY; 713 chip->battery.type = POWER_SUPPLY_TYPE_BATTERY;
664 chip->battery.get_property = max17042_get_property; 714 chip->battery.get_property = max17042_get_property;
665 chip->battery.properties = max17042_battery_props; 715 chip->battery.properties = max17042_battery_props;
@@ -683,6 +733,12 @@ static int __devinit max17042_probe(struct i2c_client *client,
683 max17042_write_reg(client, MAX17042_LearnCFG, 0x0007); 733 max17042_write_reg(client, MAX17042_LearnCFG, 0x0007);
684 } 734 }
685 735
736 ret = power_supply_register(&client->dev, &chip->battery);
737 if (ret) {
738 dev_err(&client->dev, "failed: power supply register\n");
739 return ret;
740 }
741
686 if (client->irq) { 742 if (client->irq) {
687 ret = request_threaded_irq(client->irq, NULL, 743 ret = request_threaded_irq(client->irq, NULL,
688 max17042_thread_handler, 744 max17042_thread_handler,
@@ -693,13 +749,14 @@ static int __devinit max17042_probe(struct i2c_client *client,
693 reg |= CONFIG_ALRT_BIT_ENBL; 749 reg |= CONFIG_ALRT_BIT_ENBL;
694 max17042_write_reg(client, MAX17042_CONFIG, reg); 750 max17042_write_reg(client, MAX17042_CONFIG, reg);
695 max17042_set_soc_threshold(chip, 1); 751 max17042_set_soc_threshold(chip, 1);
696 } else 752 } else {
753 client->irq = 0;
697 dev_err(&client->dev, "%s(): cannot get IRQ\n", 754 dev_err(&client->dev, "%s(): cannot get IRQ\n",
698 __func__); 755 __func__);
756 }
699 } 757 }
700 758
701 reg = max17042_read_reg(chip->client, MAX17042_STATUS); 759 reg = max17042_read_reg(chip->client, MAX17042_STATUS);
702
703 if (reg & STATUS_POR_BIT) { 760 if (reg & STATUS_POR_BIT) {
704 INIT_WORK(&chip->work, max17042_init_worker); 761 INIT_WORK(&chip->work, max17042_init_worker);
705 schedule_work(&chip->work); 762 schedule_work(&chip->work);
@@ -707,23 +764,65 @@ static int __devinit max17042_probe(struct i2c_client *client,
707 chip->init_complete = 1; 764 chip->init_complete = 1;
708 } 765 }
709 766
710 ret = power_supply_register(&client->dev, &chip->battery); 767 return 0;
711 if (ret)
712 dev_err(&client->dev, "failed: power supply register\n");
713 return ret;
714} 768}
715 769
716static int __devexit max17042_remove(struct i2c_client *client) 770static int __devexit max17042_remove(struct i2c_client *client)
717{ 771{
718 struct max17042_chip *chip = i2c_get_clientdata(client); 772 struct max17042_chip *chip = i2c_get_clientdata(client);
719 773
774 if (client->irq)
775 free_irq(client->irq, chip);
720 power_supply_unregister(&chip->battery); 776 power_supply_unregister(&chip->battery);
721 return 0; 777 return 0;
722} 778}
723 779
780#ifdef CONFIG_PM
781static int max17042_suspend(struct device *dev)
782{
783 struct max17042_chip *chip = dev_get_drvdata(dev);
784
785 /*
786 * disable the irq and enable irq_wake
787 * capability to the interrupt line.
788 */
789 if (chip->client->irq) {
790 disable_irq(chip->client->irq);
791 enable_irq_wake(chip->client->irq);
792 }
793
794 return 0;
795}
796
797static int max17042_resume(struct device *dev)
798{
799 struct max17042_chip *chip = dev_get_drvdata(dev);
800
801 if (chip->client->irq) {
802 disable_irq_wake(chip->client->irq);
803 enable_irq(chip->client->irq);
804 /* re-program the SOC thresholds to 1% change */
805 max17042_set_soc_threshold(chip, 1);
806 }
807
808 return 0;
809}
810
811static const struct dev_pm_ops max17042_pm_ops = {
812 .suspend = max17042_suspend,
813 .resume = max17042_resume,
814};
815
816#define MAX17042_PM_OPS (&max17042_pm_ops)
817#else
818#define MAX17042_PM_OPS NULL
819#endif
820
724#ifdef CONFIG_OF 821#ifdef CONFIG_OF
725static const struct of_device_id max17042_dt_match[] = { 822static const struct of_device_id max17042_dt_match[] = {
726 { .compatible = "maxim,max17042" }, 823 { .compatible = "maxim,max17042" },
824 { .compatible = "maxim,max17047" },
825 { .compatible = "maxim,max17050" },
727 { }, 826 { },
728}; 827};
729MODULE_DEVICE_TABLE(of, max17042_dt_match); 828MODULE_DEVICE_TABLE(of, max17042_dt_match);
@@ -731,6 +830,8 @@ MODULE_DEVICE_TABLE(of, max17042_dt_match);
731 830
732static const struct i2c_device_id max17042_id[] = { 831static const struct i2c_device_id max17042_id[] = {
733 { "max17042", 0 }, 832 { "max17042", 0 },
833 { "max17047", 1 },
834 { "max17050", 2 },
734 { } 835 { }
735}; 836};
736MODULE_DEVICE_TABLE(i2c, max17042_id); 837MODULE_DEVICE_TABLE(i2c, max17042_id);
@@ -739,6 +840,7 @@ static struct i2c_driver max17042_i2c_driver = {
739 .driver = { 840 .driver = {
740 .name = "max17042", 841 .name = "max17042",
741 .of_match_table = of_match_ptr(max17042_dt_match), 842 .of_match_table = of_match_ptr(max17042_dt_match),
843 .pm = MAX17042_PM_OPS,
742 }, 844 },
743 .probe = max17042_probe, 845 .probe = max17042_probe,
744 .remove = __devexit_p(max17042_remove), 846 .remove = __devexit_p(max17042_remove),
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 4368e7d61316..4150747f9186 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -146,6 +146,7 @@ static struct device_attribute power_supply_attrs[] = {
146 POWER_SUPPLY_ATTR(voltage_min_design), 146 POWER_SUPPLY_ATTR(voltage_min_design),
147 POWER_SUPPLY_ATTR(voltage_now), 147 POWER_SUPPLY_ATTR(voltage_now),
148 POWER_SUPPLY_ATTR(voltage_avg), 148 POWER_SUPPLY_ATTR(voltage_avg),
149 POWER_SUPPLY_ATTR(voltage_ocv),
149 POWER_SUPPLY_ATTR(current_max), 150 POWER_SUPPLY_ATTR(current_max),
150 POWER_SUPPLY_ATTR(current_now), 151 POWER_SUPPLY_ATTR(current_now),
151 POWER_SUPPLY_ATTR(current_avg), 152 POWER_SUPPLY_ATTR(current_avg),
diff --git a/drivers/power/sbs-battery.c b/drivers/power/sbs-battery.c
index 06b659d91790..a5b6849d4123 100644
--- a/drivers/power/sbs-battery.c
+++ b/drivers/power/sbs-battery.c
@@ -89,7 +89,7 @@ static const struct chip_data {
89 [REG_CURRENT] = 89 [REG_CURRENT] =
90 SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767), 90 SBS_DATA(POWER_SUPPLY_PROP_CURRENT_NOW, 0x0A, -32768, 32767),
91 [REG_CAPACITY] = 91 [REG_CAPACITY] =
92 SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0E, 0, 100), 92 SBS_DATA(POWER_SUPPLY_PROP_CAPACITY, 0x0D, 0, 100),
93 [REG_REMAINING_CAPACITY] = 93 [REG_REMAINING_CAPACITY] =
94 SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535), 94 SBS_DATA(POWER_SUPPLY_PROP_ENERGY_NOW, 0x0F, 0, 65535),
95 [REG_REMAINING_CAPACITY_CHARGE] = 95 [REG_REMAINING_CAPACITY_CHARGE] =
diff --git a/drivers/power/smb347-charger.c b/drivers/power/smb347-charger.c
index ce1694d1a365..f8eedd8a676f 100644
--- a/drivers/power/smb347-charger.c
+++ b/drivers/power/smb347-charger.c
@@ -11,7 +11,7 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#include <linux/debugfs.h> 14#include <linux/err.h>
15#include <linux/gpio.h> 15#include <linux/gpio.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/module.h> 17#include <linux/module.h>
@@ -21,7 +21,7 @@
21#include <linux/mutex.h> 21#include <linux/mutex.h>
22#include <linux/power_supply.h> 22#include <linux/power_supply.h>
23#include <linux/power/smb347-charger.h> 23#include <linux/power/smb347-charger.h>
24#include <linux/seq_file.h> 24#include <linux/regmap.h>
25 25
26/* 26/*
27 * Configuration registers. These are mirrored to volatile RAM and can be 27 * Configuration registers. These are mirrored to volatile RAM and can be
@@ -39,6 +39,7 @@
39#define CFG_CURRENT_LIMIT_DC_SHIFT 4 39#define CFG_CURRENT_LIMIT_DC_SHIFT 4
40#define CFG_CURRENT_LIMIT_USB_MASK 0x0f 40#define CFG_CURRENT_LIMIT_USB_MASK 0x0f
41#define CFG_FLOAT_VOLTAGE 0x03 41#define CFG_FLOAT_VOLTAGE 0x03
42#define CFG_FLOAT_VOLTAGE_FLOAT_MASK 0x3f
42#define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK 0xc0 43#define CFG_FLOAT_VOLTAGE_THRESHOLD_MASK 0xc0
43#define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT 6 44#define CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT 6
44#define CFG_STAT 0x05 45#define CFG_STAT 0x05
@@ -113,29 +114,31 @@
113#define STAT_C_CHARGER_ERROR BIT(6) 114#define STAT_C_CHARGER_ERROR BIT(6)
114#define STAT_E 0x3f 115#define STAT_E 0x3f
115 116
117#define SMB347_MAX_REGISTER 0x3f
118
116/** 119/**
117 * struct smb347_charger - smb347 charger instance 120 * struct smb347_charger - smb347 charger instance
118 * @lock: protects concurrent access to online variables 121 * @lock: protects concurrent access to online variables
119 * @client: pointer to i2c client 122 * @dev: pointer to device
123 * @regmap: pointer to driver regmap
120 * @mains: power_supply instance for AC/DC power 124 * @mains: power_supply instance for AC/DC power
121 * @usb: power_supply instance for USB power 125 * @usb: power_supply instance for USB power
122 * @battery: power_supply instance for battery 126 * @battery: power_supply instance for battery
123 * @mains_online: is AC/DC input connected 127 * @mains_online: is AC/DC input connected
124 * @usb_online: is USB input connected 128 * @usb_online: is USB input connected
125 * @charging_enabled: is charging enabled 129 * @charging_enabled: is charging enabled
126 * @dentry: for debugfs
127 * @pdata: pointer to platform data 130 * @pdata: pointer to platform data
128 */ 131 */
129struct smb347_charger { 132struct smb347_charger {
130 struct mutex lock; 133 struct mutex lock;
131 struct i2c_client *client; 134 struct device *dev;
135 struct regmap *regmap;
132 struct power_supply mains; 136 struct power_supply mains;
133 struct power_supply usb; 137 struct power_supply usb;
134 struct power_supply battery; 138 struct power_supply battery;
135 bool mains_online; 139 bool mains_online;
136 bool usb_online; 140 bool usb_online;
137 bool charging_enabled; 141 bool charging_enabled;
138 struct dentry *dentry;
139 const struct smb347_charger_platform_data *pdata; 142 const struct smb347_charger_platform_data *pdata;
140}; 143};
141 144
@@ -193,14 +196,6 @@ static const unsigned int ccc_tbl[] = {
193 1200000, 196 1200000,
194}; 197};
195 198
196/* Convert register value to current using lookup table */
197static int hw_to_current(const unsigned int *tbl, size_t size, unsigned int val)
198{
199 if (val >= size)
200 return -EINVAL;
201 return tbl[val];
202}
203
204/* Convert current to register value using lookup table */ 199/* Convert current to register value using lookup table */
205static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val) 200static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
206{ 201{
@@ -212,43 +207,22 @@ static int current_to_hw(const unsigned int *tbl, size_t size, unsigned int val)
212 return i > 0 ? i - 1 : -EINVAL; 207 return i > 0 ? i - 1 : -EINVAL;
213} 208}
214 209
215static int smb347_read(struct smb347_charger *smb, u8 reg)
216{
217 int ret;
218
219 ret = i2c_smbus_read_byte_data(smb->client, reg);
220 if (ret < 0)
221 dev_warn(&smb->client->dev, "failed to read reg 0x%x: %d\n",
222 reg, ret);
223 return ret;
224}
225
226static int smb347_write(struct smb347_charger *smb, u8 reg, u8 val)
227{
228 int ret;
229
230 ret = i2c_smbus_write_byte_data(smb->client, reg, val);
231 if (ret < 0)
232 dev_warn(&smb->client->dev, "failed to write reg 0x%x: %d\n",
233 reg, ret);
234 return ret;
235}
236
237/** 210/**
238 * smb347_update_status - updates the charging status 211 * smb347_update_ps_status - refreshes the power source status
239 * @smb: pointer to smb347 charger instance 212 * @smb: pointer to smb347 charger instance
240 * 213 *
241 * Function checks status of the charging and updates internal state 214 * Function checks whether any power source is connected to the charger and
242 * accordingly. Returns %0 if there is no change in status, %1 if the 215 * updates internal state accordingly. If there is a change to previous state
243 * status has changed and negative errno in case of failure. 216 * function returns %1, otherwise %0 and negative errno in case of errror.
244 */ 217 */
245static int smb347_update_status(struct smb347_charger *smb) 218static int smb347_update_ps_status(struct smb347_charger *smb)
246{ 219{
247 bool usb = false; 220 bool usb = false;
248 bool dc = false; 221 bool dc = false;
222 unsigned int val;
249 int ret; 223 int ret;
250 224
251 ret = smb347_read(smb, IRQSTAT_E); 225 ret = regmap_read(smb->regmap, IRQSTAT_E, &val);
252 if (ret < 0) 226 if (ret < 0)
253 return ret; 227 return ret;
254 228
@@ -257,9 +231,9 @@ static int smb347_update_status(struct smb347_charger *smb)
257 * platform data _and_ whether corresponding undervoltage is set. 231 * platform data _and_ whether corresponding undervoltage is set.
258 */ 232 */
259 if (smb->pdata->use_mains) 233 if (smb->pdata->use_mains)
260 dc = !(ret & IRQSTAT_E_DCIN_UV_STAT); 234 dc = !(val & IRQSTAT_E_DCIN_UV_STAT);
261 if (smb->pdata->use_usb) 235 if (smb->pdata->use_usb)
262 usb = !(ret & IRQSTAT_E_USBIN_UV_STAT); 236 usb = !(val & IRQSTAT_E_USBIN_UV_STAT);
263 237
264 mutex_lock(&smb->lock); 238 mutex_lock(&smb->lock);
265 ret = smb->mains_online != dc || smb->usb_online != usb; 239 ret = smb->mains_online != dc || smb->usb_online != usb;
@@ -271,15 +245,15 @@ static int smb347_update_status(struct smb347_charger *smb)
271} 245}
272 246
273/* 247/*
274 * smb347_is_online - returns whether input power source is connected 248 * smb347_is_ps_online - returns whether input power source is connected
275 * @smb: pointer to smb347 charger instance 249 * @smb: pointer to smb347 charger instance
276 * 250 *
277 * Returns %true if input power source is connected. Note that this is 251 * Returns %true if input power source is connected. Note that this is
278 * dependent on what platform has configured for usable power sources. For 252 * dependent on what platform has configured for usable power sources. For
279 * example if USB is disabled, this will return %false even if the USB 253 * example if USB is disabled, this will return %false even if the USB cable
280 * cable is connected. 254 * is connected.
281 */ 255 */
282static bool smb347_is_online(struct smb347_charger *smb) 256static bool smb347_is_ps_online(struct smb347_charger *smb)
283{ 257{
284 bool ret; 258 bool ret;
285 259
@@ -299,16 +273,17 @@ static bool smb347_is_online(struct smb347_charger *smb)
299 */ 273 */
300static int smb347_charging_status(struct smb347_charger *smb) 274static int smb347_charging_status(struct smb347_charger *smb)
301{ 275{
276 unsigned int val;
302 int ret; 277 int ret;
303 278
304 if (!smb347_is_online(smb)) 279 if (!smb347_is_ps_online(smb))
305 return 0; 280 return 0;
306 281
307 ret = smb347_read(smb, STAT_C); 282 ret = regmap_read(smb->regmap, STAT_C, &val);
308 if (ret < 0) 283 if (ret < 0)
309 return 0; 284 return 0;
310 285
311 return (ret & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT; 286 return (val & STAT_C_CHG_MASK) >> STAT_C_CHG_SHIFT;
312} 287}
313 288
314static int smb347_charging_set(struct smb347_charger *smb, bool enable) 289static int smb347_charging_set(struct smb347_charger *smb, bool enable)
@@ -316,27 +291,17 @@ static int smb347_charging_set(struct smb347_charger *smb, bool enable)
316 int ret = 0; 291 int ret = 0;
317 292
318 if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) { 293 if (smb->pdata->enable_control != SMB347_CHG_ENABLE_SW) {
319 dev_dbg(&smb->client->dev, 294 dev_dbg(smb->dev, "charging enable/disable in SW disabled\n");
320 "charging enable/disable in SW disabled\n");
321 return 0; 295 return 0;
322 } 296 }
323 297
324 mutex_lock(&smb->lock); 298 mutex_lock(&smb->lock);
325 if (smb->charging_enabled != enable) { 299 if (smb->charging_enabled != enable) {
326 ret = smb347_read(smb, CMD_A); 300 ret = regmap_update_bits(smb->regmap, CMD_A, CMD_A_CHG_ENABLED,
327 if (ret < 0) 301 enable ? CMD_A_CHG_ENABLED : 0);
328 goto out; 302 if (!ret)
329 303 smb->charging_enabled = enable;
330 smb->charging_enabled = enable;
331
332 if (enable)
333 ret |= CMD_A_CHG_ENABLED;
334 else
335 ret &= ~CMD_A_CHG_ENABLED;
336
337 ret = smb347_write(smb, CMD_A, ret);
338 } 304 }
339out:
340 mutex_unlock(&smb->lock); 305 mutex_unlock(&smb->lock);
341 return ret; 306 return ret;
342} 307}
@@ -351,7 +316,7 @@ static inline int smb347_charging_disable(struct smb347_charger *smb)
351 return smb347_charging_set(smb, false); 316 return smb347_charging_set(smb, false);
352} 317}
353 318
354static int smb347_update_online(struct smb347_charger *smb) 319static int smb347_start_stop_charging(struct smb347_charger *smb)
355{ 320{
356 int ret; 321 int ret;
357 322
@@ -360,16 +325,14 @@ static int smb347_update_online(struct smb347_charger *smb)
360 * disable or enable the charging. We do it manually because it 325 * disable or enable the charging. We do it manually because it
361 * depends on how the platform has configured the valid inputs. 326 * depends on how the platform has configured the valid inputs.
362 */ 327 */
363 if (smb347_is_online(smb)) { 328 if (smb347_is_ps_online(smb)) {
364 ret = smb347_charging_enable(smb); 329 ret = smb347_charging_enable(smb);
365 if (ret < 0) 330 if (ret < 0)
366 dev_err(&smb->client->dev, 331 dev_err(smb->dev, "failed to enable charging\n");
367 "failed to enable charging\n");
368 } else { 332 } else {
369 ret = smb347_charging_disable(smb); 333 ret = smb347_charging_disable(smb);
370 if (ret < 0) 334 if (ret < 0)
371 dev_err(&smb->client->dev, 335 dev_err(smb->dev, "failed to disable charging\n");
372 "failed to disable charging\n");
373 } 336 }
374 337
375 return ret; 338 return ret;
@@ -377,112 +340,120 @@ static int smb347_update_online(struct smb347_charger *smb)
377 340
378static int smb347_set_charge_current(struct smb347_charger *smb) 341static int smb347_set_charge_current(struct smb347_charger *smb)
379{ 342{
380 int ret, val; 343 int ret;
381
382 ret = smb347_read(smb, CFG_CHARGE_CURRENT);
383 if (ret < 0)
384 return ret;
385 344
386 if (smb->pdata->max_charge_current) { 345 if (smb->pdata->max_charge_current) {
387 val = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl), 346 ret = current_to_hw(fcc_tbl, ARRAY_SIZE(fcc_tbl),
388 smb->pdata->max_charge_current); 347 smb->pdata->max_charge_current);
389 if (val < 0) 348 if (ret < 0)
390 return val; 349 return ret;
391 350
392 ret &= ~CFG_CHARGE_CURRENT_FCC_MASK; 351 ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
393 ret |= val << CFG_CHARGE_CURRENT_FCC_SHIFT; 352 CFG_CHARGE_CURRENT_FCC_MASK,
353 ret << CFG_CHARGE_CURRENT_FCC_SHIFT);
354 if (ret < 0)
355 return ret;
394 } 356 }
395 357
396 if (smb->pdata->pre_charge_current) { 358 if (smb->pdata->pre_charge_current) {
397 val = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl), 359 ret = current_to_hw(pcc_tbl, ARRAY_SIZE(pcc_tbl),
398 smb->pdata->pre_charge_current); 360 smb->pdata->pre_charge_current);
399 if (val < 0) 361 if (ret < 0)
400 return val; 362 return ret;
401 363
402 ret &= ~CFG_CHARGE_CURRENT_PCC_MASK; 364 ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
403 ret |= val << CFG_CHARGE_CURRENT_PCC_SHIFT; 365 CFG_CHARGE_CURRENT_PCC_MASK,
366 ret << CFG_CHARGE_CURRENT_PCC_SHIFT);
367 if (ret < 0)
368 return ret;
404 } 369 }
405 370
406 if (smb->pdata->termination_current) { 371 if (smb->pdata->termination_current) {
407 val = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl), 372 ret = current_to_hw(tc_tbl, ARRAY_SIZE(tc_tbl),
408 smb->pdata->termination_current); 373 smb->pdata->termination_current);
409 if (val < 0) 374 if (ret < 0)
410 return val; 375 return ret;
411 376
412 ret &= ~CFG_CHARGE_CURRENT_TC_MASK; 377 ret = regmap_update_bits(smb->regmap, CFG_CHARGE_CURRENT,
413 ret |= val; 378 CFG_CHARGE_CURRENT_TC_MASK, ret);
379 if (ret < 0)
380 return ret;
414 } 381 }
415 382
416 return smb347_write(smb, CFG_CHARGE_CURRENT, ret); 383 return 0;
417} 384}
418 385
419static int smb347_set_current_limits(struct smb347_charger *smb) 386static int smb347_set_current_limits(struct smb347_charger *smb)
420{ 387{
421 int ret, val; 388 int ret;
422
423 ret = smb347_read(smb, CFG_CURRENT_LIMIT);
424 if (ret < 0)
425 return ret;
426 389
427 if (smb->pdata->mains_current_limit) { 390 if (smb->pdata->mains_current_limit) {
428 val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl), 391 ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
429 smb->pdata->mains_current_limit); 392 smb->pdata->mains_current_limit);
430 if (val < 0) 393 if (ret < 0)
431 return val; 394 return ret;
432 395
433 ret &= ~CFG_CURRENT_LIMIT_DC_MASK; 396 ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
434 ret |= val << CFG_CURRENT_LIMIT_DC_SHIFT; 397 CFG_CURRENT_LIMIT_DC_MASK,
398 ret << CFG_CURRENT_LIMIT_DC_SHIFT);
399 if (ret < 0)
400 return ret;
435 } 401 }
436 402
437 if (smb->pdata->usb_hc_current_limit) { 403 if (smb->pdata->usb_hc_current_limit) {
438 val = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl), 404 ret = current_to_hw(icl_tbl, ARRAY_SIZE(icl_tbl),
439 smb->pdata->usb_hc_current_limit); 405 smb->pdata->usb_hc_current_limit);
440 if (val < 0) 406 if (ret < 0)
441 return val; 407 return ret;
442 408
443 ret &= ~CFG_CURRENT_LIMIT_USB_MASK; 409 ret = regmap_update_bits(smb->regmap, CFG_CURRENT_LIMIT,
444 ret |= val; 410 CFG_CURRENT_LIMIT_USB_MASK, ret);
411 if (ret < 0)
412 return ret;
445 } 413 }
446 414
447 return smb347_write(smb, CFG_CURRENT_LIMIT, ret); 415 return 0;
448} 416}
449 417
450static int smb347_set_voltage_limits(struct smb347_charger *smb) 418static int smb347_set_voltage_limits(struct smb347_charger *smb)
451{ 419{
452 int ret, val; 420 int ret;
453
454 ret = smb347_read(smb, CFG_FLOAT_VOLTAGE);
455 if (ret < 0)
456 return ret;
457 421
458 if (smb->pdata->pre_to_fast_voltage) { 422 if (smb->pdata->pre_to_fast_voltage) {
459 val = smb->pdata->pre_to_fast_voltage; 423 ret = smb->pdata->pre_to_fast_voltage;
460 424
461 /* uV */ 425 /* uV */
462 val = clamp_val(val, 2400000, 3000000) - 2400000; 426 ret = clamp_val(ret, 2400000, 3000000) - 2400000;
463 val /= 200000; 427 ret /= 200000;
464 428
465 ret &= ~CFG_FLOAT_VOLTAGE_THRESHOLD_MASK; 429 ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
466 ret |= val << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT; 430 CFG_FLOAT_VOLTAGE_THRESHOLD_MASK,
431 ret << CFG_FLOAT_VOLTAGE_THRESHOLD_SHIFT);
432 if (ret < 0)
433 return ret;
467 } 434 }
468 435
469 if (smb->pdata->max_charge_voltage) { 436 if (smb->pdata->max_charge_voltage) {
470 val = smb->pdata->max_charge_voltage; 437 ret = smb->pdata->max_charge_voltage;
471 438
472 /* uV */ 439 /* uV */
473 val = clamp_val(val, 3500000, 4500000) - 3500000; 440 ret = clamp_val(ret, 3500000, 4500000) - 3500000;
474 val /= 20000; 441 ret /= 20000;
475 442
476 ret |= val; 443 ret = regmap_update_bits(smb->regmap, CFG_FLOAT_VOLTAGE,
444 CFG_FLOAT_VOLTAGE_FLOAT_MASK, ret);
445 if (ret < 0)
446 return ret;
477 } 447 }
478 448
479 return smb347_write(smb, CFG_FLOAT_VOLTAGE, ret); 449 return 0;
480} 450}
481 451
482static int smb347_set_temp_limits(struct smb347_charger *smb) 452static int smb347_set_temp_limits(struct smb347_charger *smb)
483{ 453{
484 bool enable_therm_monitor = false; 454 bool enable_therm_monitor = false;
485 int ret, val; 455 int ret = 0;
456 int val;
486 457
487 if (smb->pdata->chip_temp_threshold) { 458 if (smb->pdata->chip_temp_threshold) {
488 val = smb->pdata->chip_temp_threshold; 459 val = smb->pdata->chip_temp_threshold;
@@ -491,22 +462,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
491 val = clamp_val(val, 100, 130) - 100; 462 val = clamp_val(val, 100, 130) - 100;
492 val /= 10; 463 val /= 10;
493 464
494 ret = smb347_read(smb, CFG_OTG); 465 ret = regmap_update_bits(smb->regmap, CFG_OTG,
495 if (ret < 0) 466 CFG_OTG_TEMP_THRESHOLD_MASK,
496 return ret; 467 val << CFG_OTG_TEMP_THRESHOLD_SHIFT);
497
498 ret &= ~CFG_OTG_TEMP_THRESHOLD_MASK;
499 ret |= val << CFG_OTG_TEMP_THRESHOLD_SHIFT;
500
501 ret = smb347_write(smb, CFG_OTG, ret);
502 if (ret < 0) 468 if (ret < 0)
503 return ret; 469 return ret;
504 } 470 }
505 471
506 ret = smb347_read(smb, CFG_TEMP_LIMIT);
507 if (ret < 0)
508 return ret;
509
510 if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) { 472 if (smb->pdata->soft_cold_temp_limit != SMB347_TEMP_USE_DEFAULT) {
511 val = smb->pdata->soft_cold_temp_limit; 473 val = smb->pdata->soft_cold_temp_limit;
512 474
@@ -515,8 +477,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
515 /* this goes from higher to lower so invert the value */ 477 /* this goes from higher to lower so invert the value */
516 val = ~val & 0x3; 478 val = ~val & 0x3;
517 479
518 ret &= ~CFG_TEMP_LIMIT_SOFT_COLD_MASK; 480 ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
519 ret |= val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT; 481 CFG_TEMP_LIMIT_SOFT_COLD_MASK,
482 val << CFG_TEMP_LIMIT_SOFT_COLD_SHIFT);
483 if (ret < 0)
484 return ret;
520 485
521 enable_therm_monitor = true; 486 enable_therm_monitor = true;
522 } 487 }
@@ -527,8 +492,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
527 val = clamp_val(val, 40, 55) - 40; 492 val = clamp_val(val, 40, 55) - 40;
528 val /= 5; 493 val /= 5;
529 494
530 ret &= ~CFG_TEMP_LIMIT_SOFT_HOT_MASK; 495 ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
531 ret |= val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT; 496 CFG_TEMP_LIMIT_SOFT_HOT_MASK,
497 val << CFG_TEMP_LIMIT_SOFT_HOT_SHIFT);
498 if (ret < 0)
499 return ret;
532 500
533 enable_therm_monitor = true; 501 enable_therm_monitor = true;
534 } 502 }
@@ -541,8 +509,11 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
541 /* this goes from higher to lower so invert the value */ 509 /* this goes from higher to lower so invert the value */
542 val = ~val & 0x3; 510 val = ~val & 0x3;
543 511
544 ret &= ~CFG_TEMP_LIMIT_HARD_COLD_MASK; 512 ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
545 ret |= val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT; 513 CFG_TEMP_LIMIT_HARD_COLD_MASK,
514 val << CFG_TEMP_LIMIT_HARD_COLD_SHIFT);
515 if (ret < 0)
516 return ret;
546 517
547 enable_therm_monitor = true; 518 enable_therm_monitor = true;
548 } 519 }
@@ -553,16 +524,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
553 val = clamp_val(val, 50, 65) - 50; 524 val = clamp_val(val, 50, 65) - 50;
554 val /= 5; 525 val /= 5;
555 526
556 ret &= ~CFG_TEMP_LIMIT_HARD_HOT_MASK; 527 ret = regmap_update_bits(smb->regmap, CFG_TEMP_LIMIT,
557 ret |= val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT; 528 CFG_TEMP_LIMIT_HARD_HOT_MASK,
529 val << CFG_TEMP_LIMIT_HARD_HOT_SHIFT);
530 if (ret < 0)
531 return ret;
558 532
559 enable_therm_monitor = true; 533 enable_therm_monitor = true;
560 } 534 }
561 535
562 ret = smb347_write(smb, CFG_TEMP_LIMIT, ret);
563 if (ret < 0)
564 return ret;
565
566 /* 536 /*
567 * If any of the temperature limits are set, we also enable the 537 * If any of the temperature limits are set, we also enable the
568 * thermistor monitoring. 538 * thermistor monitoring.
@@ -574,25 +544,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
574 * depending on the configuration. 544 * depending on the configuration.
575 */ 545 */
576 if (enable_therm_monitor) { 546 if (enable_therm_monitor) {
577 ret = smb347_read(smb, CFG_THERM); 547 ret = regmap_update_bits(smb->regmap, CFG_THERM,
578 if (ret < 0) 548 CFG_THERM_MONITOR_DISABLED, 0);
579 return ret;
580
581 ret &= ~CFG_THERM_MONITOR_DISABLED;
582
583 ret = smb347_write(smb, CFG_THERM, ret);
584 if (ret < 0) 549 if (ret < 0)
585 return ret; 550 return ret;
586 } 551 }
587 552
588 if (smb->pdata->suspend_on_hard_temp_limit) { 553 if (smb->pdata->suspend_on_hard_temp_limit) {
589 ret = smb347_read(smb, CFG_SYSOK); 554 ret = regmap_update_bits(smb->regmap, CFG_SYSOK,
590 if (ret < 0) 555 CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED, 0);
591 return ret;
592
593 ret &= ~CFG_SYSOK_SUSPEND_HARD_LIMIT_DISABLED;
594
595 ret = smb347_write(smb, CFG_SYSOK, ret);
596 if (ret < 0) 556 if (ret < 0)
597 return ret; 557 return ret;
598 } 558 }
@@ -601,17 +561,15 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
601 SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) { 561 SMB347_SOFT_TEMP_COMPENSATE_DEFAULT) {
602 val = smb->pdata->soft_temp_limit_compensation & 0x3; 562 val = smb->pdata->soft_temp_limit_compensation & 0x3;
603 563
604 ret = smb347_read(smb, CFG_THERM); 564 ret = regmap_update_bits(smb->regmap, CFG_THERM,
565 CFG_THERM_SOFT_HOT_COMPENSATION_MASK,
566 val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT);
605 if (ret < 0) 567 if (ret < 0)
606 return ret; 568 return ret;
607 569
608 ret &= ~CFG_THERM_SOFT_HOT_COMPENSATION_MASK; 570 ret = regmap_update_bits(smb->regmap, CFG_THERM,
609 ret |= val << CFG_THERM_SOFT_HOT_COMPENSATION_SHIFT; 571 CFG_THERM_SOFT_COLD_COMPENSATION_MASK,
610 572 val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT);
611 ret &= ~CFG_THERM_SOFT_COLD_COMPENSATION_MASK;
612 ret |= val << CFG_THERM_SOFT_COLD_COMPENSATION_SHIFT;
613
614 ret = smb347_write(smb, CFG_THERM, ret);
615 if (ret < 0) 573 if (ret < 0)
616 return ret; 574 return ret;
617 } 575 }
@@ -622,14 +580,9 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
622 if (val < 0) 580 if (val < 0)
623 return val; 581 return val;
624 582
625 ret = smb347_read(smb, CFG_OTG); 583 ret = regmap_update_bits(smb->regmap, CFG_OTG,
626 if (ret < 0) 584 CFG_OTG_CC_COMPENSATION_MASK,
627 return ret; 585 (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT);
628
629 ret &= ~CFG_OTG_CC_COMPENSATION_MASK;
630 ret |= (val & 0x3) << CFG_OTG_CC_COMPENSATION_SHIFT;
631
632 ret = smb347_write(smb, CFG_OTG, ret);
633 if (ret < 0) 586 if (ret < 0)
634 return ret; 587 return ret;
635 } 588 }
@@ -648,22 +601,13 @@ static int smb347_set_temp_limits(struct smb347_charger *smb)
648 */ 601 */
649static int smb347_set_writable(struct smb347_charger *smb, bool writable) 602static int smb347_set_writable(struct smb347_charger *smb, bool writable)
650{ 603{
651 int ret; 604 return regmap_update_bits(smb->regmap, CMD_A, CMD_A_ALLOW_WRITE,
652 605 writable ? CMD_A_ALLOW_WRITE : 0);
653 ret = smb347_read(smb, CMD_A);
654 if (ret < 0)
655 return ret;
656
657 if (writable)
658 ret |= CMD_A_ALLOW_WRITE;
659 else
660 ret &= ~CMD_A_ALLOW_WRITE;
661
662 return smb347_write(smb, CMD_A, ret);
663} 606}
664 607
665static int smb347_hw_init(struct smb347_charger *smb) 608static int smb347_hw_init(struct smb347_charger *smb)
666{ 609{
610 unsigned int val;
667 int ret; 611 int ret;
668 612
669 ret = smb347_set_writable(smb, true); 613 ret = smb347_set_writable(smb, true);
@@ -692,34 +636,19 @@ static int smb347_hw_init(struct smb347_charger *smb)
692 636
693 /* If USB charging is disabled we put the USB in suspend mode */ 637 /* If USB charging is disabled we put the USB in suspend mode */
694 if (!smb->pdata->use_usb) { 638 if (!smb->pdata->use_usb) {
695 ret = smb347_read(smb, CMD_A); 639 ret = regmap_update_bits(smb->regmap, CMD_A,
696 if (ret < 0) 640 CMD_A_SUSPEND_ENABLED,
697 goto fail; 641 CMD_A_SUSPEND_ENABLED);
698
699 ret |= CMD_A_SUSPEND_ENABLED;
700
701 ret = smb347_write(smb, CMD_A, ret);
702 if (ret < 0) 642 if (ret < 0)
703 goto fail; 643 goto fail;
704 } 644 }
705 645
706 ret = smb347_read(smb, CFG_OTHER);
707 if (ret < 0)
708 goto fail;
709
710 /* 646 /*
711 * If configured by platform data, we enable hardware Auto-OTG 647 * If configured by platform data, we enable hardware Auto-OTG
712 * support for driving VBUS. Otherwise we disable it. 648 * support for driving VBUS. Otherwise we disable it.
713 */ 649 */
714 ret &= ~CFG_OTHER_RID_MASK; 650 ret = regmap_update_bits(smb->regmap, CFG_OTHER, CFG_OTHER_RID_MASK,
715 if (smb->pdata->use_usb_otg) 651 smb->pdata->use_usb_otg ? CFG_OTHER_RID_ENABLED_AUTO_OTG : 0);
716 ret |= CFG_OTHER_RID_ENABLED_AUTO_OTG;
717
718 ret = smb347_write(smb, CFG_OTHER, ret);
719 if (ret < 0)
720 goto fail;
721
722 ret = smb347_read(smb, CFG_PIN);
723 if (ret < 0) 652 if (ret < 0)
724 goto fail; 653 goto fail;
725 654
@@ -728,32 +657,33 @@ static int smb347_hw_init(struct smb347_charger *smb)
728 * command register unless pin control is specified in the platform 657 * command register unless pin control is specified in the platform
729 * data. 658 * data.
730 */ 659 */
731 ret &= ~CFG_PIN_EN_CTRL_MASK;
732
733 switch (smb->pdata->enable_control) { 660 switch (smb->pdata->enable_control) {
734 case SMB347_CHG_ENABLE_SW:
735 /* Do nothing, 0 means i2c control */
736 break;
737 case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW: 661 case SMB347_CHG_ENABLE_PIN_ACTIVE_LOW:
738 ret |= CFG_PIN_EN_CTRL_ACTIVE_LOW; 662 val = CFG_PIN_EN_CTRL_ACTIVE_LOW;
739 break; 663 break;
740 case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH: 664 case SMB347_CHG_ENABLE_PIN_ACTIVE_HIGH:
741 ret |= CFG_PIN_EN_CTRL_ACTIVE_HIGH; 665 val = CFG_PIN_EN_CTRL_ACTIVE_HIGH;
666 break;
667 default:
668 val = 0;
742 break; 669 break;
743 } 670 }
744 671
745 /* Disable Automatic Power Source Detection (APSD) interrupt. */ 672 ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CTRL_MASK,
746 ret &= ~CFG_PIN_EN_APSD_IRQ; 673 val);
674 if (ret < 0)
675 goto fail;
747 676
748 ret = smb347_write(smb, CFG_PIN, ret); 677 /* Disable Automatic Power Source Detection (APSD) interrupt. */
678 ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_APSD_IRQ, 0);
749 if (ret < 0) 679 if (ret < 0)
750 goto fail; 680 goto fail;
751 681
752 ret = smb347_update_status(smb); 682 ret = smb347_update_ps_status(smb);
753 if (ret < 0) 683 if (ret < 0)
754 goto fail; 684 goto fail;
755 685
756 ret = smb347_update_online(smb); 686 ret = smb347_start_stop_charging(smb);
757 687
758fail: 688fail:
759 smb347_set_writable(smb, false); 689 smb347_set_writable(smb, false);
@@ -763,24 +693,25 @@ fail:
763static irqreturn_t smb347_interrupt(int irq, void *data) 693static irqreturn_t smb347_interrupt(int irq, void *data)
764{ 694{
765 struct smb347_charger *smb = data; 695 struct smb347_charger *smb = data;
766 int stat_c, irqstat_e, irqstat_c; 696 unsigned int stat_c, irqstat_e, irqstat_c;
767 irqreturn_t ret = IRQ_NONE; 697 bool handled = false;
698 int ret;
768 699
769 stat_c = smb347_read(smb, STAT_C); 700 ret = regmap_read(smb->regmap, STAT_C, &stat_c);
770 if (stat_c < 0) { 701 if (ret < 0) {
771 dev_warn(&smb->client->dev, "reading STAT_C failed\n"); 702 dev_warn(smb->dev, "reading STAT_C failed\n");
772 return IRQ_NONE; 703 return IRQ_NONE;
773 } 704 }
774 705
775 irqstat_c = smb347_read(smb, IRQSTAT_C); 706 ret = regmap_read(smb->regmap, IRQSTAT_C, &irqstat_c);
776 if (irqstat_c < 0) { 707 if (ret < 0) {
777 dev_warn(&smb->client->dev, "reading IRQSTAT_C failed\n"); 708 dev_warn(smb->dev, "reading IRQSTAT_C failed\n");
778 return IRQ_NONE; 709 return IRQ_NONE;
779 } 710 }
780 711
781 irqstat_e = smb347_read(smb, IRQSTAT_E); 712 ret = regmap_read(smb->regmap, IRQSTAT_E, &irqstat_e);
782 if (irqstat_e < 0) { 713 if (ret < 0) {
783 dev_warn(&smb->client->dev, "reading IRQSTAT_E failed\n"); 714 dev_warn(smb->dev, "reading IRQSTAT_E failed\n");
784 return IRQ_NONE; 715 return IRQ_NONE;
785 } 716 }
786 717
@@ -789,13 +720,11 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
789 * disable charging. 720 * disable charging.
790 */ 721 */
791 if (stat_c & STAT_C_CHARGER_ERROR) { 722 if (stat_c & STAT_C_CHARGER_ERROR) {
792 dev_err(&smb->client->dev, 723 dev_err(smb->dev, "error in charger, disabling charging\n");
793 "error in charger, disabling charging\n");
794 724
795 smb347_charging_disable(smb); 725 smb347_charging_disable(smb);
796 power_supply_changed(&smb->battery); 726 power_supply_changed(&smb->battery);
797 727 handled = true;
798 ret = IRQ_HANDLED;
799 } 728 }
800 729
801 /* 730 /*
@@ -806,7 +735,7 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
806 if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) { 735 if (irqstat_c & (IRQSTAT_C_TERMINATION_IRQ | IRQSTAT_C_TAPER_IRQ)) {
807 if (irqstat_c & IRQSTAT_C_TERMINATION_STAT) 736 if (irqstat_c & IRQSTAT_C_TERMINATION_STAT)
808 power_supply_changed(&smb->battery); 737 power_supply_changed(&smb->battery);
809 ret = IRQ_HANDLED; 738 handled = true;
810 } 739 }
811 740
812 /* 741 /*
@@ -814,15 +743,17 @@ static irqreturn_t smb347_interrupt(int irq, void *data)
814 * was connected or disconnected. 743 * was connected or disconnected.
815 */ 744 */
816 if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) { 745 if (irqstat_e & (IRQSTAT_E_USBIN_UV_IRQ | IRQSTAT_E_DCIN_UV_IRQ)) {
817 if (smb347_update_status(smb) > 0) { 746 if (smb347_update_ps_status(smb) > 0) {
818 smb347_update_online(smb); 747 smb347_start_stop_charging(smb);
819 power_supply_changed(&smb->mains); 748 if (smb->pdata->use_mains)
820 power_supply_changed(&smb->usb); 749 power_supply_changed(&smb->mains);
750 if (smb->pdata->use_usb)
751 power_supply_changed(&smb->usb);
821 } 752 }
822 ret = IRQ_HANDLED; 753 handled = true;
823 } 754 }
824 755
825 return ret; 756 return handled ? IRQ_HANDLED : IRQ_NONE;
826} 757}
827 758
828static int smb347_irq_set(struct smb347_charger *smb, bool enable) 759static int smb347_irq_set(struct smb347_charger *smb, bool enable)
@@ -839,41 +770,18 @@ static int smb347_irq_set(struct smb347_charger *smb, bool enable)
839 * - termination current reached 770 * - termination current reached
840 * - charger error 771 * - charger error
841 */ 772 */
842 if (enable) { 773 ret = regmap_update_bits(smb->regmap, CFG_FAULT_IRQ, 0xff,
843 ret = smb347_write(smb, CFG_FAULT_IRQ, CFG_FAULT_IRQ_DCIN_UV); 774 enable ? CFG_FAULT_IRQ_DCIN_UV : 0);
844 if (ret < 0) 775 if (ret < 0)
845 goto fail; 776 goto fail;
846
847 ret = smb347_write(smb, CFG_STATUS_IRQ,
848 CFG_STATUS_IRQ_TERMINATION_OR_TAPER);
849 if (ret < 0)
850 goto fail;
851
852 ret = smb347_read(smb, CFG_PIN);
853 if (ret < 0)
854 goto fail;
855
856 ret |= CFG_PIN_EN_CHARGER_ERROR;
857
858 ret = smb347_write(smb, CFG_PIN, ret);
859 } else {
860 ret = smb347_write(smb, CFG_FAULT_IRQ, 0);
861 if (ret < 0)
862 goto fail;
863
864 ret = smb347_write(smb, CFG_STATUS_IRQ, 0);
865 if (ret < 0)
866 goto fail;
867
868 ret = smb347_read(smb, CFG_PIN);
869 if (ret < 0)
870 goto fail;
871
872 ret &= ~CFG_PIN_EN_CHARGER_ERROR;
873 777
874 ret = smb347_write(smb, CFG_PIN, ret); 778 ret = regmap_update_bits(smb->regmap, CFG_STATUS_IRQ, 0xff,
875 } 779 enable ? CFG_STATUS_IRQ_TERMINATION_OR_TAPER : 0);
780 if (ret < 0)
781 goto fail;
876 782
783 ret = regmap_update_bits(smb->regmap, CFG_PIN, CFG_PIN_EN_CHARGER_ERROR,
784 enable ? CFG_PIN_EN_CHARGER_ERROR : 0);
877fail: 785fail:
878 smb347_set_writable(smb, false); 786 smb347_set_writable(smb, false);
879 return ret; 787 return ret;
@@ -889,18 +797,18 @@ static inline int smb347_irq_disable(struct smb347_charger *smb)
889 return smb347_irq_set(smb, false); 797 return smb347_irq_set(smb, false);
890} 798}
891 799
892static int smb347_irq_init(struct smb347_charger *smb) 800static int smb347_irq_init(struct smb347_charger *smb,
801 struct i2c_client *client)
893{ 802{
894 const struct smb347_charger_platform_data *pdata = smb->pdata; 803 const struct smb347_charger_platform_data *pdata = smb->pdata;
895 int ret, irq = gpio_to_irq(pdata->irq_gpio); 804 int ret, irq = gpio_to_irq(pdata->irq_gpio);
896 805
897 ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, smb->client->name); 806 ret = gpio_request_one(pdata->irq_gpio, GPIOF_IN, client->name);
898 if (ret < 0) 807 if (ret < 0)
899 goto fail; 808 goto fail;
900 809
901 ret = request_threaded_irq(irq, NULL, smb347_interrupt, 810 ret = request_threaded_irq(irq, NULL, smb347_interrupt,
902 IRQF_TRIGGER_FALLING, smb->client->name, 811 IRQF_TRIGGER_FALLING, client->name, smb);
903 smb);
904 if (ret < 0) 812 if (ret < 0)
905 goto fail_gpio; 813 goto fail_gpio;
906 814
@@ -912,23 +820,14 @@ static int smb347_irq_init(struct smb347_charger *smb)
912 * Configure the STAT output to be suitable for interrupts: disable 820 * Configure the STAT output to be suitable for interrupts: disable
913 * all other output (except interrupts) and make it active low. 821 * all other output (except interrupts) and make it active low.
914 */ 822 */
915 ret = smb347_read(smb, CFG_STAT); 823 ret = regmap_update_bits(smb->regmap, CFG_STAT,
916 if (ret < 0) 824 CFG_STAT_ACTIVE_HIGH | CFG_STAT_DISABLED,
917 goto fail_readonly; 825 CFG_STAT_DISABLED);
918
919 ret &= ~CFG_STAT_ACTIVE_HIGH;
920 ret |= CFG_STAT_DISABLED;
921
922 ret = smb347_write(smb, CFG_STAT, ret);
923 if (ret < 0)
924 goto fail_readonly;
925
926 ret = smb347_irq_enable(smb);
927 if (ret < 0) 826 if (ret < 0)
928 goto fail_readonly; 827 goto fail_readonly;
929 828
930 smb347_set_writable(smb, false); 829 smb347_set_writable(smb, false);
931 smb->client->irq = irq; 830 client->irq = irq;
932 return 0; 831 return 0;
933 832
934fail_readonly: 833fail_readonly:
@@ -938,7 +837,7 @@ fail_irq:
938fail_gpio: 837fail_gpio:
939 gpio_free(pdata->irq_gpio); 838 gpio_free(pdata->irq_gpio);
940fail: 839fail:
941 smb->client->irq = 0; 840 client->irq = 0;
942 return ret; 841 return ret;
943} 842}
944 843
@@ -987,13 +886,13 @@ static int smb347_battery_get_property(struct power_supply *psy,
987 const struct smb347_charger_platform_data *pdata = smb->pdata; 886 const struct smb347_charger_platform_data *pdata = smb->pdata;
988 int ret; 887 int ret;
989 888
990 ret = smb347_update_status(smb); 889 ret = smb347_update_ps_status(smb);
991 if (ret < 0) 890 if (ret < 0)
992 return ret; 891 return ret;
993 892
994 switch (prop) { 893 switch (prop) {
995 case POWER_SUPPLY_PROP_STATUS: 894 case POWER_SUPPLY_PROP_STATUS:
996 if (!smb347_is_online(smb)) { 895 if (!smb347_is_ps_online(smb)) {
997 val->intval = POWER_SUPPLY_STATUS_DISCHARGING; 896 val->intval = POWER_SUPPLY_STATUS_DISCHARGING;
998 break; 897 break;
999 } 898 }
@@ -1004,7 +903,7 @@ static int smb347_battery_get_property(struct power_supply *psy,
1004 break; 903 break;
1005 904
1006 case POWER_SUPPLY_PROP_CHARGE_TYPE: 905 case POWER_SUPPLY_PROP_CHARGE_TYPE:
1007 if (!smb347_is_online(smb)) 906 if (!smb347_is_ps_online(smb))
1008 return -ENODATA; 907 return -ENODATA;
1009 908
1010 /* 909 /*
@@ -1036,44 +935,6 @@ static int smb347_battery_get_property(struct power_supply *psy,
1036 val->intval = pdata->battery_info.voltage_max_design; 935 val->intval = pdata->battery_info.voltage_max_design;
1037 break; 936 break;
1038 937
1039 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
1040 if (!smb347_is_online(smb))
1041 return -ENODATA;
1042 ret = smb347_read(smb, STAT_A);
1043 if (ret < 0)
1044 return ret;
1045
1046 ret &= STAT_A_FLOAT_VOLTAGE_MASK;
1047 if (ret > 0x3d)
1048 ret = 0x3d;
1049
1050 val->intval = 3500000 + ret * 20000;
1051 break;
1052
1053 case POWER_SUPPLY_PROP_CURRENT_NOW:
1054 if (!smb347_is_online(smb))
1055 return -ENODATA;
1056
1057 ret = smb347_read(smb, STAT_B);
1058 if (ret < 0)
1059 return ret;
1060
1061 /*
1062 * The current value is composition of FCC and PCC values
1063 * and we can detect which table to use from bit 5.
1064 */
1065 if (ret & 0x20) {
1066 val->intval = hw_to_current(fcc_tbl,
1067 ARRAY_SIZE(fcc_tbl),
1068 ret & 7);
1069 } else {
1070 ret >>= 3;
1071 val->intval = hw_to_current(pcc_tbl,
1072 ARRAY_SIZE(pcc_tbl),
1073 ret & 7);
1074 }
1075 break;
1076
1077 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN: 938 case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
1078 val->intval = pdata->battery_info.charge_full_design; 939 val->intval = pdata->battery_info.charge_full_design;
1079 break; 940 break;
@@ -1095,64 +956,58 @@ static enum power_supply_property smb347_battery_properties[] = {
1095 POWER_SUPPLY_PROP_TECHNOLOGY, 956 POWER_SUPPLY_PROP_TECHNOLOGY,
1096 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 957 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
1097 POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN, 958 POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
1098 POWER_SUPPLY_PROP_VOLTAGE_NOW,
1099 POWER_SUPPLY_PROP_CURRENT_NOW,
1100 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN, 959 POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN,
1101 POWER_SUPPLY_PROP_MODEL_NAME, 960 POWER_SUPPLY_PROP_MODEL_NAME,
1102}; 961};
1103 962
1104static int smb347_debugfs_show(struct seq_file *s, void *data) 963static bool smb347_volatile_reg(struct device *dev, unsigned int reg)
1105{ 964{
1106 struct smb347_charger *smb = s->private; 965 switch (reg) {
1107 int ret; 966 case IRQSTAT_A:
1108 u8 reg; 967 case IRQSTAT_C:
1109 968 case IRQSTAT_E:
1110 seq_printf(s, "Control registers:\n"); 969 case IRQSTAT_F:
1111 seq_printf(s, "==================\n"); 970 case STAT_A:
1112 for (reg = CFG_CHARGE_CURRENT; reg <= CFG_ADDRESS; reg++) { 971 case STAT_B:
1113 ret = smb347_read(smb, reg); 972 case STAT_C:
1114 seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret); 973 case STAT_E:
1115 } 974 return true;
1116 seq_printf(s, "\n");
1117
1118 seq_printf(s, "Command registers:\n");
1119 seq_printf(s, "==================\n");
1120 ret = smb347_read(smb, CMD_A);
1121 seq_printf(s, "0x%02x:\t0x%02x\n", CMD_A, ret);
1122 ret = smb347_read(smb, CMD_B);
1123 seq_printf(s, "0x%02x:\t0x%02x\n", CMD_B, ret);
1124 ret = smb347_read(smb, CMD_C);
1125 seq_printf(s, "0x%02x:\t0x%02x\n", CMD_C, ret);
1126 seq_printf(s, "\n");
1127
1128 seq_printf(s, "Interrupt status registers:\n");
1129 seq_printf(s, "===========================\n");
1130 for (reg = IRQSTAT_A; reg <= IRQSTAT_F; reg++) {
1131 ret = smb347_read(smb, reg);
1132 seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
1133 }
1134 seq_printf(s, "\n");
1135
1136 seq_printf(s, "Status registers:\n");
1137 seq_printf(s, "=================\n");
1138 for (reg = STAT_A; reg <= STAT_E; reg++) {
1139 ret = smb347_read(smb, reg);
1140 seq_printf(s, "0x%02x:\t0x%02x\n", reg, ret);
1141 } 975 }
1142 976
1143 return 0; 977 return false;
1144} 978}
1145 979
1146static int smb347_debugfs_open(struct inode *inode, struct file *file) 980static bool smb347_readable_reg(struct device *dev, unsigned int reg)
1147{ 981{
1148 return single_open(file, smb347_debugfs_show, inode->i_private); 982 switch (reg) {
983 case CFG_CHARGE_CURRENT:
984 case CFG_CURRENT_LIMIT:
985 case CFG_FLOAT_VOLTAGE:
986 case CFG_STAT:
987 case CFG_PIN:
988 case CFG_THERM:
989 case CFG_SYSOK:
990 case CFG_OTHER:
991 case CFG_OTG:
992 case CFG_TEMP_LIMIT:
993 case CFG_FAULT_IRQ:
994 case CFG_STATUS_IRQ:
995 case CFG_ADDRESS:
996 case CMD_A:
997 case CMD_B:
998 case CMD_C:
999 return true;
1000 }
1001
1002 return smb347_volatile_reg(dev, reg);
1149} 1003}
1150 1004
1151static const struct file_operations smb347_debugfs_fops = { 1005static const struct regmap_config smb347_regmap = {
1152 .open = smb347_debugfs_open, 1006 .reg_bits = 8,
1153 .read = seq_read, 1007 .val_bits = 8,
1154 .llseek = seq_lseek, 1008 .max_register = SMB347_MAX_REGISTER,
1155 .release = single_release, 1009 .volatile_reg = smb347_volatile_reg,
1010 .readable_reg = smb347_readable_reg,
1156}; 1011};
1157 1012
1158static int smb347_probe(struct i2c_client *client, 1013static int smb347_probe(struct i2c_client *client,
@@ -1178,28 +1033,45 @@ static int smb347_probe(struct i2c_client *client,
1178 i2c_set_clientdata(client, smb); 1033 i2c_set_clientdata(client, smb);
1179 1034
1180 mutex_init(&smb->lock); 1035 mutex_init(&smb->lock);
1181 smb->client = client; 1036 smb->dev = &client->dev;
1182 smb->pdata = pdata; 1037 smb->pdata = pdata;
1183 1038
1039 smb->regmap = devm_regmap_init_i2c(client, &smb347_regmap);
1040 if (IS_ERR(smb->regmap))
1041 return PTR_ERR(smb->regmap);
1042
1184 ret = smb347_hw_init(smb); 1043 ret = smb347_hw_init(smb);
1185 if (ret < 0) 1044 if (ret < 0)
1186 return ret; 1045 return ret;
1187 1046
1188 smb->mains.name = "smb347-mains"; 1047 if (smb->pdata->use_mains) {
1189 smb->mains.type = POWER_SUPPLY_TYPE_MAINS; 1048 smb->mains.name = "smb347-mains";
1190 smb->mains.get_property = smb347_mains_get_property; 1049 smb->mains.type = POWER_SUPPLY_TYPE_MAINS;
1191 smb->mains.properties = smb347_mains_properties; 1050 smb->mains.get_property = smb347_mains_get_property;
1192 smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties); 1051 smb->mains.properties = smb347_mains_properties;
1193 smb->mains.supplied_to = battery; 1052 smb->mains.num_properties = ARRAY_SIZE(smb347_mains_properties);
1194 smb->mains.num_supplicants = ARRAY_SIZE(battery); 1053 smb->mains.supplied_to = battery;
1195 1054 smb->mains.num_supplicants = ARRAY_SIZE(battery);
1196 smb->usb.name = "smb347-usb"; 1055 ret = power_supply_register(dev, &smb->mains);
1197 smb->usb.type = POWER_SUPPLY_TYPE_USB; 1056 if (ret < 0)
1198 smb->usb.get_property = smb347_usb_get_property; 1057 return ret;
1199 smb->usb.properties = smb347_usb_properties; 1058 }
1200 smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties); 1059
1201 smb->usb.supplied_to = battery; 1060 if (smb->pdata->use_usb) {
1202 smb->usb.num_supplicants = ARRAY_SIZE(battery); 1061 smb->usb.name = "smb347-usb";
1062 smb->usb.type = POWER_SUPPLY_TYPE_USB;
1063 smb->usb.get_property = smb347_usb_get_property;
1064 smb->usb.properties = smb347_usb_properties;
1065 smb->usb.num_properties = ARRAY_SIZE(smb347_usb_properties);
1066 smb->usb.supplied_to = battery;
1067 smb->usb.num_supplicants = ARRAY_SIZE(battery);
1068 ret = power_supply_register(dev, &smb->usb);
1069 if (ret < 0) {
1070 if (smb->pdata->use_mains)
1071 power_supply_unregister(&smb->mains);
1072 return ret;
1073 }
1074 }
1203 1075
1204 smb->battery.name = "smb347-battery"; 1076 smb->battery.name = "smb347-battery";
1205 smb->battery.type = POWER_SUPPLY_TYPE_BATTERY; 1077 smb->battery.type = POWER_SUPPLY_TYPE_BATTERY;
@@ -1207,20 +1079,13 @@ static int smb347_probe(struct i2c_client *client,
1207 smb->battery.properties = smb347_battery_properties; 1079 smb->battery.properties = smb347_battery_properties;
1208 smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties); 1080 smb->battery.num_properties = ARRAY_SIZE(smb347_battery_properties);
1209 1081
1210 ret = power_supply_register(dev, &smb->mains);
1211 if (ret < 0)
1212 return ret;
1213
1214 ret = power_supply_register(dev, &smb->usb);
1215 if (ret < 0) {
1216 power_supply_unregister(&smb->mains);
1217 return ret;
1218 }
1219 1082
1220 ret = power_supply_register(dev, &smb->battery); 1083 ret = power_supply_register(dev, &smb->battery);
1221 if (ret < 0) { 1084 if (ret < 0) {
1222 power_supply_unregister(&smb->usb); 1085 if (smb->pdata->use_usb)
1223 power_supply_unregister(&smb->mains); 1086 power_supply_unregister(&smb->usb);
1087 if (smb->pdata->use_mains)
1088 power_supply_unregister(&smb->mains);
1224 return ret; 1089 return ret;
1225 } 1090 }
1226 1091
@@ -1229,15 +1094,15 @@ static int smb347_probe(struct i2c_client *client,
1229 * interrupt support here. 1094 * interrupt support here.
1230 */ 1095 */
1231 if (pdata->irq_gpio >= 0) { 1096 if (pdata->irq_gpio >= 0) {
1232 ret = smb347_irq_init(smb); 1097 ret = smb347_irq_init(smb, client);
1233 if (ret < 0) { 1098 if (ret < 0) {
1234 dev_warn(dev, "failed to initialize IRQ: %d\n", ret); 1099 dev_warn(dev, "failed to initialize IRQ: %d\n", ret);
1235 dev_warn(dev, "disabling IRQ support\n"); 1100 dev_warn(dev, "disabling IRQ support\n");
1101 } else {
1102 smb347_irq_enable(smb);
1236 } 1103 }
1237 } 1104 }
1238 1105
1239 smb->dentry = debugfs_create_file("smb347-regs", S_IRUSR, NULL, smb,
1240 &smb347_debugfs_fops);
1241 return 0; 1106 return 0;
1242} 1107}
1243 1108
@@ -1245,9 +1110,6 @@ static int smb347_remove(struct i2c_client *client)
1245{ 1110{
1246 struct smb347_charger *smb = i2c_get_clientdata(client); 1111 struct smb347_charger *smb = i2c_get_clientdata(client);
1247 1112
1248 if (!IS_ERR_OR_NULL(smb->dentry))
1249 debugfs_remove(smb->dentry);
1250
1251 if (client->irq) { 1113 if (client->irq) {
1252 smb347_irq_disable(smb); 1114 smb347_irq_disable(smb);
1253 free_irq(client->irq, smb); 1115 free_irq(client->irq, smb);
@@ -1255,8 +1117,10 @@ static int smb347_remove(struct i2c_client *client)
1255 } 1117 }
1256 1118
1257 power_supply_unregister(&smb->battery); 1119 power_supply_unregister(&smb->battery);
1258 power_supply_unregister(&smb->usb); 1120 if (smb->pdata->use_usb)
1259 power_supply_unregister(&smb->mains); 1121 power_supply_unregister(&smb->usb);
1122 if (smb->pdata->use_mains)
1123 power_supply_unregister(&smb->mains);
1260 return 0; 1124 return 0;
1261} 1125}
1262 1126
@@ -1275,17 +1139,7 @@ static struct i2c_driver smb347_driver = {
1275 .id_table = smb347_id, 1139 .id_table = smb347_id,
1276}; 1140};
1277 1141
1278static int __init smb347_init(void) 1142module_i2c_driver(smb347_driver);
1279{
1280 return i2c_add_driver(&smb347_driver);
1281}
1282module_init(smb347_init);
1283
1284static void __exit smb347_exit(void)
1285{
1286 i2c_del_driver(&smb347_driver);
1287}
1288module_exit(smb347_exit);
1289 1143
1290MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>"); 1144MODULE_AUTHOR("Bruce E. Robertson <bruce.e.robertson@intel.com>");
1291MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>"); 1145MODULE_AUTHOR("Mika Westerberg <mika.westerberg@linux.intel.com>");
diff --git a/drivers/rapidio/Kconfig b/drivers/rapidio/Kconfig
index bc8719238793..6194d35ebb97 100644
--- a/drivers/rapidio/Kconfig
+++ b/drivers/rapidio/Kconfig
@@ -22,6 +22,20 @@ config RAPIDIO_ENABLE_RX_TX_PORTS
22 ports for Input/Output direction to allow other traffic 22 ports for Input/Output direction to allow other traffic
23 than Maintenance transfers. 23 than Maintenance transfers.
24 24
25config RAPIDIO_DMA_ENGINE
26 bool "DMA Engine support for RapidIO"
27 depends on RAPIDIO
28 select DMADEVICES
29 select DMA_ENGINE
30 help
31 Say Y here if you want to use DMA Engine frameork for RapidIO data
32 transfers to/from target RIO devices. RapidIO uses NREAD and
33 NWRITE (NWRITE_R, SWRITE) requests to transfer data between local
34 memory and memory on remote target device. You need a DMA controller
35 capable to perform data transfers to/from RapidIO.
36
37 If you are unsure about this, say Y here.
38
25config RAPIDIO_DEBUG 39config RAPIDIO_DEBUG
26 bool "RapidIO subsystem debug messages" 40 bool "RapidIO subsystem debug messages"
27 depends on RAPIDIO 41 depends on RAPIDIO
diff --git a/drivers/rapidio/devices/Makefile b/drivers/rapidio/devices/Makefile
index 3b7b4e2dff7c..7b62860f34f8 100644
--- a/drivers/rapidio/devices/Makefile
+++ b/drivers/rapidio/devices/Makefile
@@ -3,3 +3,6 @@
3# 3#
4 4
5obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o 5obj-$(CONFIG_RAPIDIO_TSI721) += tsi721.o
6ifeq ($(CONFIG_RAPIDIO_DMA_ENGINE),y)
7obj-$(CONFIG_RAPIDIO_TSI721) += tsi721_dma.o
8endif
diff --git a/drivers/rapidio/devices/tsi721.c b/drivers/rapidio/devices/tsi721.c
index 30d2072f480b..722246cf20ab 100644
--- a/drivers/rapidio/devices/tsi721.c
+++ b/drivers/rapidio/devices/tsi721.c
@@ -108,6 +108,7 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
108 u16 destid, u8 hopcount, u32 offset, int len, 108 u16 destid, u8 hopcount, u32 offset, int len,
109 u32 *data, int do_wr) 109 u32 *data, int do_wr)
110{ 110{
111 void __iomem *regs = priv->regs + TSI721_DMAC_BASE(priv->mdma.ch_id);
111 struct tsi721_dma_desc *bd_ptr; 112 struct tsi721_dma_desc *bd_ptr;
112 u32 rd_count, swr_ptr, ch_stat; 113 u32 rd_count, swr_ptr, ch_stat;
113 int i, err = 0; 114 int i, err = 0;
@@ -116,10 +117,9 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
116 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32))) 117 if (offset > (RIO_MAINT_SPACE_SZ - len) || (len != sizeof(u32)))
117 return -EINVAL; 118 return -EINVAL;
118 119
119 bd_ptr = priv->bdma[TSI721_DMACH_MAINT].bd_base; 120 bd_ptr = priv->mdma.bd_base;
120 121
121 rd_count = ioread32( 122 rd_count = ioread32(regs + TSI721_DMAC_DRDCNT);
122 priv->regs + TSI721_DMAC_DRDCNT(TSI721_DMACH_MAINT));
123 123
124 /* Initialize DMA descriptor */ 124 /* Initialize DMA descriptor */
125 bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid); 125 bd_ptr[0].type_id = cpu_to_le32((DTYPE2 << 29) | (op << 19) | destid);
@@ -134,19 +134,18 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
134 mb(); 134 mb();
135 135
136 /* Start DMA operation */ 136 /* Start DMA operation */
137 iowrite32(rd_count + 2, 137 iowrite32(rd_count + 2, regs + TSI721_DMAC_DWRCNT);
138 priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT)); 138 ioread32(regs + TSI721_DMAC_DWRCNT);
139 ioread32(priv->regs + TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
140 i = 0; 139 i = 0;
141 140
142 /* Wait until DMA transfer is finished */ 141 /* Wait until DMA transfer is finished */
143 while ((ch_stat = ioread32(priv->regs + 142 while ((ch_stat = ioread32(regs + TSI721_DMAC_STS))
144 TSI721_DMAC_STS(TSI721_DMACH_MAINT))) & TSI721_DMAC_STS_RUN) { 143 & TSI721_DMAC_STS_RUN) {
145 udelay(1); 144 udelay(1);
146 if (++i >= 5000000) { 145 if (++i >= 5000000) {
147 dev_dbg(&priv->pdev->dev, 146 dev_dbg(&priv->pdev->dev,
148 "%s : DMA[%d] read timeout ch_status=%x\n", 147 "%s : DMA[%d] read timeout ch_status=%x\n",
149 __func__, TSI721_DMACH_MAINT, ch_stat); 148 __func__, priv->mdma.ch_id, ch_stat);
150 if (!do_wr) 149 if (!do_wr)
151 *data = 0xffffffff; 150 *data = 0xffffffff;
152 err = -EIO; 151 err = -EIO;
@@ -162,13 +161,10 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
162 __func__, ch_stat); 161 __func__, ch_stat);
163 dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n", 162 dev_dbg(&priv->pdev->dev, "OP=%d : destid=%x hc=%x off=%x\n",
164 do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset); 163 do_wr ? MAINT_WR : MAINT_RD, destid, hopcount, offset);
165 iowrite32(TSI721_DMAC_INT_ALL, 164 iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
166 priv->regs + TSI721_DMAC_INT(TSI721_DMACH_MAINT)); 165 iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
167 iowrite32(TSI721_DMAC_CTL_INIT,
168 priv->regs + TSI721_DMAC_CTL(TSI721_DMACH_MAINT));
169 udelay(10); 166 udelay(10);
170 iowrite32(0, priv->regs + 167 iowrite32(0, regs + TSI721_DMAC_DWRCNT);
171 TSI721_DMAC_DWRCNT(TSI721_DMACH_MAINT));
172 udelay(1); 168 udelay(1);
173 if (!do_wr) 169 if (!do_wr)
174 *data = 0xffffffff; 170 *data = 0xffffffff;
@@ -184,8 +180,8 @@ static int tsi721_maint_dma(struct tsi721_device *priv, u32 sys_size,
184 * NOTE: Skipping check and clear FIFO entries because we are waiting 180 * NOTE: Skipping check and clear FIFO entries because we are waiting
185 * for transfer to be completed. 181 * for transfer to be completed.
186 */ 182 */
187 swr_ptr = ioread32(priv->regs + TSI721_DMAC_DSWP(TSI721_DMACH_MAINT)); 183 swr_ptr = ioread32(regs + TSI721_DMAC_DSWP);
188 iowrite32(swr_ptr, priv->regs + TSI721_DMAC_DSRP(TSI721_DMACH_MAINT)); 184 iowrite32(swr_ptr, regs + TSI721_DMAC_DSRP);
189err_out: 185err_out:
190 186
191 return err; 187 return err;
@@ -541,6 +537,22 @@ static irqreturn_t tsi721_irqhandler(int irq, void *ptr)
541 tsi721_pw_handler(mport); 537 tsi721_pw_handler(mport);
542 } 538 }
543 539
540#ifdef CONFIG_RAPIDIO_DMA_ENGINE
541 if (dev_int & TSI721_DEV_INT_BDMA_CH) {
542 int ch;
543
544 if (dev_ch_int & TSI721_INT_BDMA_CHAN_M) {
545 dev_dbg(&priv->pdev->dev,
546 "IRQ from DMA channel 0x%08x\n", dev_ch_int);
547
548 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) {
549 if (!(dev_ch_int & TSI721_INT_BDMA_CHAN(ch)))
550 continue;
551 tsi721_bdma_handler(&priv->bdma[ch]);
552 }
553 }
554 }
555#endif
544 return IRQ_HANDLED; 556 return IRQ_HANDLED;
545} 557}
546 558
@@ -553,18 +565,26 @@ static void tsi721_interrupts_init(struct tsi721_device *priv)
553 priv->regs + TSI721_SR_CHINT(IDB_QUEUE)); 565 priv->regs + TSI721_SR_CHINT(IDB_QUEUE));
554 iowrite32(TSI721_SR_CHINT_IDBQRCV, 566 iowrite32(TSI721_SR_CHINT_IDBQRCV,
555 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE)); 567 priv->regs + TSI721_SR_CHINTE(IDB_QUEUE));
556 iowrite32(TSI721_INT_SR2PC_CHAN(IDB_QUEUE),
557 priv->regs + TSI721_DEV_CHAN_INTE);
558 568
559 /* Enable SRIO MAC interrupts */ 569 /* Enable SRIO MAC interrupts */
560 iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT, 570 iowrite32(TSI721_RIO_EM_DEV_INT_EN_INT,
561 priv->regs + TSI721_RIO_EM_DEV_INT_EN); 571 priv->regs + TSI721_RIO_EM_DEV_INT_EN);
562 572
573 /* Enable interrupts from channels in use */
574#ifdef CONFIG_RAPIDIO_DMA_ENGINE
575 intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE) |
576 (TSI721_INT_BDMA_CHAN_M &
577 ~TSI721_INT_BDMA_CHAN(TSI721_DMACH_MAINT));
578#else
579 intr = TSI721_INT_SR2PC_CHAN(IDB_QUEUE);
580#endif
581 iowrite32(intr, priv->regs + TSI721_DEV_CHAN_INTE);
582
563 if (priv->flags & TSI721_USING_MSIX) 583 if (priv->flags & TSI721_USING_MSIX)
564 intr = TSI721_DEV_INT_SRIO; 584 intr = TSI721_DEV_INT_SRIO;
565 else 585 else
566 intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO | 586 intr = TSI721_DEV_INT_SR2PC_CH | TSI721_DEV_INT_SRIO |
567 TSI721_DEV_INT_SMSG_CH; 587 TSI721_DEV_INT_SMSG_CH | TSI721_DEV_INT_BDMA_CH;
568 588
569 iowrite32(intr, priv->regs + TSI721_DEV_INTE); 589 iowrite32(intr, priv->regs + TSI721_DEV_INTE);
570 ioread32(priv->regs + TSI721_DEV_INTE); 590 ioread32(priv->regs + TSI721_DEV_INTE);
@@ -715,12 +735,29 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
715 TSI721_MSIX_OMSG_INT(i); 735 TSI721_MSIX_OMSG_INT(i);
716 } 736 }
717 737
738#ifdef CONFIG_RAPIDIO_DMA_ENGINE
739 /*
740 * Initialize MSI-X entries for Block DMA Engine:
741 * this driver supports XXX DMA channels
742 * (one is reserved for SRIO maintenance transactions)
743 */
744 for (i = 0; i < TSI721_DMA_CHNUM; i++) {
745 entries[TSI721_VECT_DMA0_DONE + i].entry =
746 TSI721_MSIX_DMACH_DONE(i);
747 entries[TSI721_VECT_DMA0_INT + i].entry =
748 TSI721_MSIX_DMACH_INT(i);
749 }
750#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
751
718 err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries)); 752 err = pci_enable_msix(priv->pdev, entries, ARRAY_SIZE(entries));
719 if (err) { 753 if (err) {
720 if (err > 0) 754 if (err > 0)
721 dev_info(&priv->pdev->dev, 755 dev_info(&priv->pdev->dev,
722 "Only %d MSI-X vectors available, " 756 "Only %d MSI-X vectors available, "
723 "not using MSI-X\n", err); 757 "not using MSI-X\n", err);
758 else
759 dev_err(&priv->pdev->dev,
760 "Failed to enable MSI-X (err=%d)\n", err);
724 return err; 761 return err;
725 } 762 }
726 763
@@ -760,6 +797,22 @@ static int tsi721_enable_msix(struct tsi721_device *priv)
760 i, pci_name(priv->pdev)); 797 i, pci_name(priv->pdev));
761 } 798 }
762 799
800#ifdef CONFIG_RAPIDIO_DMA_ENGINE
801 for (i = 0; i < TSI721_DMA_CHNUM; i++) {
802 priv->msix[TSI721_VECT_DMA0_DONE + i].vector =
803 entries[TSI721_VECT_DMA0_DONE + i].vector;
804 snprintf(priv->msix[TSI721_VECT_DMA0_DONE + i].irq_name,
805 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmad%d@pci:%s",
806 i, pci_name(priv->pdev));
807
808 priv->msix[TSI721_VECT_DMA0_INT + i].vector =
809 entries[TSI721_VECT_DMA0_INT + i].vector;
810 snprintf(priv->msix[TSI721_VECT_DMA0_INT + i].irq_name,
811 IRQ_DEVICE_NAME_MAX, DRV_NAME "-dmai%d@pci:%s",
812 i, pci_name(priv->pdev));
813 }
814#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
815
763 return 0; 816 return 0;
764} 817}
765#endif /* CONFIG_PCI_MSI */ 818#endif /* CONFIG_PCI_MSI */
@@ -888,20 +941,34 @@ static void tsi721_doorbell_free(struct tsi721_device *priv)
888 priv->idb_base = NULL; 941 priv->idb_base = NULL;
889} 942}
890 943
891static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum) 944/**
945 * tsi721_bdma_maint_init - Initialize maintenance request BDMA channel.
946 * @priv: pointer to tsi721 private data
947 *
948 * Initialize BDMA channel allocated for RapidIO maintenance read/write
949 * request generation
950 * Returns %0 on success or %-ENOMEM on failure.
951 */
952static int tsi721_bdma_maint_init(struct tsi721_device *priv)
892{ 953{
893 struct tsi721_dma_desc *bd_ptr; 954 struct tsi721_dma_desc *bd_ptr;
894 u64 *sts_ptr; 955 u64 *sts_ptr;
895 dma_addr_t bd_phys, sts_phys; 956 dma_addr_t bd_phys, sts_phys;
896 int sts_size; 957 int sts_size;
897 int bd_num = priv->bdma[chnum].bd_num; 958 int bd_num = 2;
959 void __iomem *regs;
898 960
899 dev_dbg(&priv->pdev->dev, "Init Block DMA Engine, CH%d\n", chnum); 961 dev_dbg(&priv->pdev->dev,
962 "Init Block DMA Engine for Maintenance requests, CH%d\n",
963 TSI721_DMACH_MAINT);
900 964
901 /* 965 /*
902 * Initialize DMA channel for maintenance requests 966 * Initialize DMA channel for maintenance requests
903 */ 967 */
904 968
969 priv->mdma.ch_id = TSI721_DMACH_MAINT;
970 regs = priv->regs + TSI721_DMAC_BASE(TSI721_DMACH_MAINT);
971
905 /* Allocate space for DMA descriptors */ 972 /* Allocate space for DMA descriptors */
906 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev, 973 bd_ptr = dma_zalloc_coherent(&priv->pdev->dev,
907 bd_num * sizeof(struct tsi721_dma_desc), 974 bd_num * sizeof(struct tsi721_dma_desc),
@@ -909,8 +976,9 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
909 if (!bd_ptr) 976 if (!bd_ptr)
910 return -ENOMEM; 977 return -ENOMEM;
911 978
912 priv->bdma[chnum].bd_phys = bd_phys; 979 priv->mdma.bd_num = bd_num;
913 priv->bdma[chnum].bd_base = bd_ptr; 980 priv->mdma.bd_phys = bd_phys;
981 priv->mdma.bd_base = bd_ptr;
914 982
915 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n", 983 dev_dbg(&priv->pdev->dev, "DMA descriptors @ %p (phys = %llx)\n",
916 bd_ptr, (unsigned long long)bd_phys); 984 bd_ptr, (unsigned long long)bd_phys);
@@ -927,13 +995,13 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
927 dma_free_coherent(&priv->pdev->dev, 995 dma_free_coherent(&priv->pdev->dev,
928 bd_num * sizeof(struct tsi721_dma_desc), 996 bd_num * sizeof(struct tsi721_dma_desc),
929 bd_ptr, bd_phys); 997 bd_ptr, bd_phys);
930 priv->bdma[chnum].bd_base = NULL; 998 priv->mdma.bd_base = NULL;
931 return -ENOMEM; 999 return -ENOMEM;
932 } 1000 }
933 1001
934 priv->bdma[chnum].sts_phys = sts_phys; 1002 priv->mdma.sts_phys = sts_phys;
935 priv->bdma[chnum].sts_base = sts_ptr; 1003 priv->mdma.sts_base = sts_ptr;
936 priv->bdma[chnum].sts_size = sts_size; 1004 priv->mdma.sts_size = sts_size;
937 1005
938 dev_dbg(&priv->pdev->dev, 1006 dev_dbg(&priv->pdev->dev,
939 "desc status FIFO @ %p (phys = %llx) size=0x%x\n", 1007 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
@@ -946,83 +1014,61 @@ static int tsi721_bdma_ch_init(struct tsi721_device *priv, int chnum)
946 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32); 1014 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
947 1015
948 /* Setup DMA descriptor pointers */ 1016 /* Setup DMA descriptor pointers */
949 iowrite32(((u64)bd_phys >> 32), 1017 iowrite32(((u64)bd_phys >> 32), regs + TSI721_DMAC_DPTRH);
950 priv->regs + TSI721_DMAC_DPTRH(chnum));
951 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK), 1018 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
952 priv->regs + TSI721_DMAC_DPTRL(chnum)); 1019 regs + TSI721_DMAC_DPTRL);
953 1020
954 /* Setup descriptor status FIFO */ 1021 /* Setup descriptor status FIFO */
955 iowrite32(((u64)sts_phys >> 32), 1022 iowrite32(((u64)sts_phys >> 32), regs + TSI721_DMAC_DSBH);
956 priv->regs + TSI721_DMAC_DSBH(chnum));
957 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK), 1023 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
958 priv->regs + TSI721_DMAC_DSBL(chnum)); 1024 regs + TSI721_DMAC_DSBL);
959 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size), 1025 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
960 priv->regs + TSI721_DMAC_DSSZ(chnum)); 1026 regs + TSI721_DMAC_DSSZ);
961 1027
962 /* Clear interrupt bits */ 1028 /* Clear interrupt bits */
963 iowrite32(TSI721_DMAC_INT_ALL, 1029 iowrite32(TSI721_DMAC_INT_ALL, regs + TSI721_DMAC_INT);
964 priv->regs + TSI721_DMAC_INT(chnum));
965 1030
966 ioread32(priv->regs + TSI721_DMAC_INT(chnum)); 1031 ioread32(regs + TSI721_DMAC_INT);
967 1032
968 /* Toggle DMA channel initialization */ 1033 /* Toggle DMA channel initialization */
969 iowrite32(TSI721_DMAC_CTL_INIT, priv->regs + TSI721_DMAC_CTL(chnum)); 1034 iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
970 ioread32(priv->regs + TSI721_DMAC_CTL(chnum)); 1035 ioread32(regs + TSI721_DMAC_CTL);
971 udelay(10); 1036 udelay(10);
972 1037
973 return 0; 1038 return 0;
974} 1039}
975 1040
976static int tsi721_bdma_ch_free(struct tsi721_device *priv, int chnum) 1041static int tsi721_bdma_maint_free(struct tsi721_device *priv)
977{ 1042{
978 u32 ch_stat; 1043 u32 ch_stat;
1044 struct tsi721_bdma_maint *mdma = &priv->mdma;
1045 void __iomem *regs = priv->regs + TSI721_DMAC_BASE(mdma->ch_id);
979 1046
980 if (priv->bdma[chnum].bd_base == NULL) 1047 if (mdma->bd_base == NULL)
981 return 0; 1048 return 0;
982 1049
983 /* Check if DMA channel still running */ 1050 /* Check if DMA channel still running */
984 ch_stat = ioread32(priv->regs + TSI721_DMAC_STS(chnum)); 1051 ch_stat = ioread32(regs + TSI721_DMAC_STS);
985 if (ch_stat & TSI721_DMAC_STS_RUN) 1052 if (ch_stat & TSI721_DMAC_STS_RUN)
986 return -EFAULT; 1053 return -EFAULT;
987 1054
988 /* Put DMA channel into init state */ 1055 /* Put DMA channel into init state */
989 iowrite32(TSI721_DMAC_CTL_INIT, 1056 iowrite32(TSI721_DMAC_CTL_INIT, regs + TSI721_DMAC_CTL);
990 priv->regs + TSI721_DMAC_CTL(chnum));
991 1057
992 /* Free space allocated for DMA descriptors */ 1058 /* Free space allocated for DMA descriptors */
993 dma_free_coherent(&priv->pdev->dev, 1059 dma_free_coherent(&priv->pdev->dev,
994 priv->bdma[chnum].bd_num * sizeof(struct tsi721_dma_desc), 1060 mdma->bd_num * sizeof(struct tsi721_dma_desc),
995 priv->bdma[chnum].bd_base, priv->bdma[chnum].bd_phys); 1061 mdma->bd_base, mdma->bd_phys);
996 priv->bdma[chnum].bd_base = NULL; 1062 mdma->bd_base = NULL;
997 1063
998 /* Free space allocated for status FIFO */ 1064 /* Free space allocated for status FIFO */
999 dma_free_coherent(&priv->pdev->dev, 1065 dma_free_coherent(&priv->pdev->dev,
1000 priv->bdma[chnum].sts_size * sizeof(struct tsi721_dma_sts), 1066 mdma->sts_size * sizeof(struct tsi721_dma_sts),
1001 priv->bdma[chnum].sts_base, priv->bdma[chnum].sts_phys); 1067 mdma->sts_base, mdma->sts_phys);
1002 priv->bdma[chnum].sts_base = NULL; 1068 mdma->sts_base = NULL;
1003 return 0;
1004}
1005
1006static int tsi721_bdma_init(struct tsi721_device *priv)
1007{
1008 /* Initialize BDMA channel allocated for RapidIO maintenance read/write
1009 * request generation
1010 */
1011 priv->bdma[TSI721_DMACH_MAINT].bd_num = 2;
1012 if (tsi721_bdma_ch_init(priv, TSI721_DMACH_MAINT)) {
1013 dev_err(&priv->pdev->dev, "Unable to initialize maintenance DMA"
1014 " channel %d, aborting\n", TSI721_DMACH_MAINT);
1015 return -ENOMEM;
1016 }
1017
1018 return 0; 1069 return 0;
1019} 1070}
1020 1071
1021static void tsi721_bdma_free(struct tsi721_device *priv)
1022{
1023 tsi721_bdma_ch_free(priv, TSI721_DMACH_MAINT);
1024}
1025
1026/* Enable Inbound Messaging Interrupts */ 1072/* Enable Inbound Messaging Interrupts */
1027static void 1073static void
1028tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch, 1074tsi721_imsg_interrupt_enable(struct tsi721_device *priv, int ch,
@@ -2035,7 +2081,8 @@ static void tsi721_disable_ints(struct tsi721_device *priv)
2035 2081
2036 /* Disable all BDMA Channel interrupts */ 2082 /* Disable all BDMA Channel interrupts */
2037 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++) 2083 for (ch = 0; ch < TSI721_DMA_MAXCH; ch++)
2038 iowrite32(0, priv->regs + TSI721_DMAC_INTE(ch)); 2084 iowrite32(0,
2085 priv->regs + TSI721_DMAC_BASE(ch) + TSI721_DMAC_INTE);
2039 2086
2040 /* Disable all general BDMA interrupts */ 2087 /* Disable all general BDMA interrupts */
2041 iowrite32(0, priv->regs + TSI721_BDMA_INTE); 2088 iowrite32(0, priv->regs + TSI721_BDMA_INTE);
@@ -2104,6 +2151,7 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
2104 mport->phy_type = RIO_PHY_SERIAL; 2151 mport->phy_type = RIO_PHY_SERIAL;
2105 mport->priv = (void *)priv; 2152 mport->priv = (void *)priv;
2106 mport->phys_efptr = 0x100; 2153 mport->phys_efptr = 0x100;
2154 priv->mport = mport;
2107 2155
2108 INIT_LIST_HEAD(&mport->dbells); 2156 INIT_LIST_HEAD(&mport->dbells);
2109 2157
@@ -2129,17 +2177,21 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
2129 if (!err) { 2177 if (!err) {
2130 tsi721_interrupts_init(priv); 2178 tsi721_interrupts_init(priv);
2131 ops->pwenable = tsi721_pw_enable; 2179 ops->pwenable = tsi721_pw_enable;
2132 } else 2180 } else {
2133 dev_err(&pdev->dev, "Unable to get assigned PCI IRQ " 2181 dev_err(&pdev->dev, "Unable to get assigned PCI IRQ "
2134 "vector %02X err=0x%x\n", pdev->irq, err); 2182 "vector %02X err=0x%x\n", pdev->irq, err);
2183 goto err_exit;
2184 }
2135 2185
2186#ifdef CONFIG_RAPIDIO_DMA_ENGINE
2187 tsi721_register_dma(priv);
2188#endif
2136 /* Enable SRIO link */ 2189 /* Enable SRIO link */
2137 iowrite32(ioread32(priv->regs + TSI721_DEVCTL) | 2190 iowrite32(ioread32(priv->regs + TSI721_DEVCTL) |
2138 TSI721_DEVCTL_SRBOOT_CMPL, 2191 TSI721_DEVCTL_SRBOOT_CMPL,
2139 priv->regs + TSI721_DEVCTL); 2192 priv->regs + TSI721_DEVCTL);
2140 2193
2141 rio_register_mport(mport); 2194 rio_register_mport(mport);
2142 priv->mport = mport;
2143 2195
2144 if (mport->host_deviceid >= 0) 2196 if (mport->host_deviceid >= 0)
2145 iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER | 2197 iowrite32(RIO_PORT_GEN_HOST | RIO_PORT_GEN_MASTER |
@@ -2149,6 +2201,11 @@ static int __devinit tsi721_setup_mport(struct tsi721_device *priv)
2149 iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR)); 2201 iowrite32(0, priv->regs + (0x100 + RIO_PORT_GEN_CTL_CSR));
2150 2202
2151 return 0; 2203 return 0;
2204
2205err_exit:
2206 kfree(mport);
2207 kfree(ops);
2208 return err;
2152} 2209}
2153 2210
2154static int __devinit tsi721_probe(struct pci_dev *pdev, 2211static int __devinit tsi721_probe(struct pci_dev *pdev,
@@ -2294,7 +2351,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2294 tsi721_init_pc2sr_mapping(priv); 2351 tsi721_init_pc2sr_mapping(priv);
2295 tsi721_init_sr2pc_mapping(priv); 2352 tsi721_init_sr2pc_mapping(priv);
2296 2353
2297 if (tsi721_bdma_init(priv)) { 2354 if (tsi721_bdma_maint_init(priv)) {
2298 dev_err(&pdev->dev, "BDMA initialization failed, aborting\n"); 2355 dev_err(&pdev->dev, "BDMA initialization failed, aborting\n");
2299 err = -ENOMEM; 2356 err = -ENOMEM;
2300 goto err_unmap_bars; 2357 goto err_unmap_bars;
@@ -2319,7 +2376,7 @@ static int __devinit tsi721_probe(struct pci_dev *pdev,
2319err_free_consistent: 2376err_free_consistent:
2320 tsi721_doorbell_free(priv); 2377 tsi721_doorbell_free(priv);
2321err_free_bdma: 2378err_free_bdma:
2322 tsi721_bdma_free(priv); 2379 tsi721_bdma_maint_free(priv);
2323err_unmap_bars: 2380err_unmap_bars:
2324 if (priv->regs) 2381 if (priv->regs)
2325 iounmap(priv->regs); 2382 iounmap(priv->regs);
diff --git a/drivers/rapidio/devices/tsi721.h b/drivers/rapidio/devices/tsi721.h
index 1c226b31af13..59de9d7be346 100644
--- a/drivers/rapidio/devices/tsi721.h
+++ b/drivers/rapidio/devices/tsi721.h
@@ -167,6 +167,8 @@
167#define TSI721_DEV_INTE 0x29840 167#define TSI721_DEV_INTE 0x29840
168#define TSI721_DEV_INT 0x29844 168#define TSI721_DEV_INT 0x29844
169#define TSI721_DEV_INTSET 0x29848 169#define TSI721_DEV_INTSET 0x29848
170#define TSI721_DEV_INT_BDMA_CH 0x00002000
171#define TSI721_DEV_INT_BDMA_NCH 0x00001000
170#define TSI721_DEV_INT_SMSG_CH 0x00000800 172#define TSI721_DEV_INT_SMSG_CH 0x00000800
171#define TSI721_DEV_INT_SMSG_NCH 0x00000400 173#define TSI721_DEV_INT_SMSG_NCH 0x00000400
172#define TSI721_DEV_INT_SR2PC_CH 0x00000200 174#define TSI721_DEV_INT_SR2PC_CH 0x00000200
@@ -181,6 +183,8 @@
181#define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x))) 183#define TSI721_INT_IMSG_CHAN(x) (1 << (16 + (x)))
182#define TSI721_INT_OMSG_CHAN_M 0x0000ff00 184#define TSI721_INT_OMSG_CHAN_M 0x0000ff00
183#define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x))) 185#define TSI721_INT_OMSG_CHAN(x) (1 << (8 + (x)))
186#define TSI721_INT_BDMA_CHAN_M 0x000000ff
187#define TSI721_INT_BDMA_CHAN(x) (1 << (x))
184 188
185/* 189/*
186 * PC2SR block registers 190 * PC2SR block registers
@@ -235,14 +239,16 @@
235 * x = 0..7 239 * x = 0..7
236 */ 240 */
237 241
238#define TSI721_DMAC_DWRCNT(x) (0x51000 + (x) * 0x1000) 242#define TSI721_DMAC_BASE(x) (0x51000 + (x) * 0x1000)
239#define TSI721_DMAC_DRDCNT(x) (0x51004 + (x) * 0x1000)
240 243
241#define TSI721_DMAC_CTL(x) (0x51008 + (x) * 0x1000) 244#define TSI721_DMAC_DWRCNT 0x000
245#define TSI721_DMAC_DRDCNT 0x004
246
247#define TSI721_DMAC_CTL 0x008
242#define TSI721_DMAC_CTL_SUSP 0x00000002 248#define TSI721_DMAC_CTL_SUSP 0x00000002
243#define TSI721_DMAC_CTL_INIT 0x00000001 249#define TSI721_DMAC_CTL_INIT 0x00000001
244 250
245#define TSI721_DMAC_INT(x) (0x5100c + (x) * 0x1000) 251#define TSI721_DMAC_INT 0x00c
246#define TSI721_DMAC_INT_STFULL 0x00000010 252#define TSI721_DMAC_INT_STFULL 0x00000010
247#define TSI721_DMAC_INT_DONE 0x00000008 253#define TSI721_DMAC_INT_DONE 0x00000008
248#define TSI721_DMAC_INT_SUSP 0x00000004 254#define TSI721_DMAC_INT_SUSP 0x00000004
@@ -250,34 +256,33 @@
250#define TSI721_DMAC_INT_IOFDONE 0x00000001 256#define TSI721_DMAC_INT_IOFDONE 0x00000001
251#define TSI721_DMAC_INT_ALL 0x0000001f 257#define TSI721_DMAC_INT_ALL 0x0000001f
252 258
253#define TSI721_DMAC_INTSET(x) (0x51010 + (x) * 0x1000) 259#define TSI721_DMAC_INTSET 0x010
254 260
255#define TSI721_DMAC_STS(x) (0x51014 + (x) * 0x1000) 261#define TSI721_DMAC_STS 0x014
256#define TSI721_DMAC_STS_ABORT 0x00400000 262#define TSI721_DMAC_STS_ABORT 0x00400000
257#define TSI721_DMAC_STS_RUN 0x00200000 263#define TSI721_DMAC_STS_RUN 0x00200000
258#define TSI721_DMAC_STS_CS 0x001f0000 264#define TSI721_DMAC_STS_CS 0x001f0000
259 265
260#define TSI721_DMAC_INTE(x) (0x51018 + (x) * 0x1000) 266#define TSI721_DMAC_INTE 0x018
261 267
262#define TSI721_DMAC_DPTRL(x) (0x51024 + (x) * 0x1000) 268#define TSI721_DMAC_DPTRL 0x024
263#define TSI721_DMAC_DPTRL_MASK 0xffffffe0 269#define TSI721_DMAC_DPTRL_MASK 0xffffffe0
264 270
265#define TSI721_DMAC_DPTRH(x) (0x51028 + (x) * 0x1000) 271#define TSI721_DMAC_DPTRH 0x028
266 272
267#define TSI721_DMAC_DSBL(x) (0x5102c + (x) * 0x1000) 273#define TSI721_DMAC_DSBL 0x02c
268#define TSI721_DMAC_DSBL_MASK 0xffffffc0 274#define TSI721_DMAC_DSBL_MASK 0xffffffc0
269 275
270#define TSI721_DMAC_DSBH(x) (0x51030 + (x) * 0x1000) 276#define TSI721_DMAC_DSBH 0x030
271 277
272#define TSI721_DMAC_DSSZ(x) (0x51034 + (x) * 0x1000) 278#define TSI721_DMAC_DSSZ 0x034
273#define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f 279#define TSI721_DMAC_DSSZ_SIZE_M 0x0000000f
274#define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4) 280#define TSI721_DMAC_DSSZ_SIZE(size) (__fls(size) - 4)
275 281
276 282#define TSI721_DMAC_DSRP 0x038
277#define TSI721_DMAC_DSRP(x) (0x51038 + (x) * 0x1000)
278#define TSI721_DMAC_DSRP_MASK 0x0007ffff 283#define TSI721_DMAC_DSRP_MASK 0x0007ffff
279 284
280#define TSI721_DMAC_DSWP(x) (0x5103c + (x) * 0x1000) 285#define TSI721_DMAC_DSWP 0x03c
281#define TSI721_DMAC_DSWP_MASK 0x0007ffff 286#define TSI721_DMAC_DSWP_MASK 0x0007ffff
282 287
283#define TSI721_BDMA_INTE 0x5f000 288#define TSI721_BDMA_INTE 0x5f000
@@ -612,6 +617,8 @@ enum dma_rtype {
612#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */ 617#define TSI721_DMACH_MAINT 0 /* DMA channel for maint requests */
613#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */ 618#define TSI721_DMACH_MAINT_NBD 32 /* Number of BDs for maint requests */
614 619
620#define TSI721_DMACH_DMA 1 /* DMA channel for data transfers */
621
615#define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0) 622#define MSG_DMA_ENTRY_INX_TO_SIZE(x) ((0x10 << (x)) & 0xFFFF0)
616 623
617enum tsi721_smsg_int_flag { 624enum tsi721_smsg_int_flag {
@@ -626,7 +633,48 @@ enum tsi721_smsg_int_flag {
626 633
627/* Structures */ 634/* Structures */
628 635
636#ifdef CONFIG_RAPIDIO_DMA_ENGINE
637
638struct tsi721_tx_desc {
639 struct dma_async_tx_descriptor txd;
640 struct tsi721_dma_desc *hw_desc;
641 u16 destid;
642 /* low 64-bits of 66-bit RIO address */
643 u64 rio_addr;
644 /* upper 2-bits of 66-bit RIO address */
645 u8 rio_addr_u;
646 bool interrupt;
647 struct list_head desc_node;
648 struct list_head tx_list;
649};
650
629struct tsi721_bdma_chan { 651struct tsi721_bdma_chan {
652 int id;
653 void __iomem *regs;
654 int bd_num; /* number of buffer descriptors */
655 void *bd_base; /* start of DMA descriptors */
656 dma_addr_t bd_phys;
657 void *sts_base; /* start of DMA BD status FIFO */
658 dma_addr_t sts_phys;
659 int sts_size;
660 u32 sts_rdptr;
661 u32 wr_count;
662 u32 wr_count_next;
663
664 struct dma_chan dchan;
665 struct tsi721_tx_desc *tx_desc;
666 spinlock_t lock;
667 struct list_head active_list;
668 struct list_head queue;
669 struct list_head free_list;
670 dma_cookie_t completed_cookie;
671 struct tasklet_struct tasklet;
672};
673
674#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
675
676struct tsi721_bdma_maint {
677 int ch_id; /* BDMA channel number */
630 int bd_num; /* number of buffer descriptors */ 678 int bd_num; /* number of buffer descriptors */
631 void *bd_base; /* start of DMA descriptors */ 679 void *bd_base; /* start of DMA descriptors */
632 dma_addr_t bd_phys; 680 dma_addr_t bd_phys;
@@ -721,6 +769,24 @@ enum tsi721_msix_vect {
721 TSI721_VECT_IMB1_INT, 769 TSI721_VECT_IMB1_INT,
722 TSI721_VECT_IMB2_INT, 770 TSI721_VECT_IMB2_INT,
723 TSI721_VECT_IMB3_INT, 771 TSI721_VECT_IMB3_INT,
772#ifdef CONFIG_RAPIDIO_DMA_ENGINE
773 TSI721_VECT_DMA0_DONE,
774 TSI721_VECT_DMA1_DONE,
775 TSI721_VECT_DMA2_DONE,
776 TSI721_VECT_DMA3_DONE,
777 TSI721_VECT_DMA4_DONE,
778 TSI721_VECT_DMA5_DONE,
779 TSI721_VECT_DMA6_DONE,
780 TSI721_VECT_DMA7_DONE,
781 TSI721_VECT_DMA0_INT,
782 TSI721_VECT_DMA1_INT,
783 TSI721_VECT_DMA2_INT,
784 TSI721_VECT_DMA3_INT,
785 TSI721_VECT_DMA4_INT,
786 TSI721_VECT_DMA5_INT,
787 TSI721_VECT_DMA6_INT,
788 TSI721_VECT_DMA7_INT,
789#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
724 TSI721_VECT_MAX 790 TSI721_VECT_MAX
725}; 791};
726 792
@@ -754,7 +820,11 @@ struct tsi721_device {
754 u32 pw_discard_count; 820 u32 pw_discard_count;
755 821
756 /* BDMA Engine */ 822 /* BDMA Engine */
823 struct tsi721_bdma_maint mdma; /* Maintenance rd/wr request channel */
824
825#ifdef CONFIG_RAPIDIO_DMA_ENGINE
757 struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM]; 826 struct tsi721_bdma_chan bdma[TSI721_DMA_CHNUM];
827#endif
758 828
759 /* Inbound Messaging */ 829 /* Inbound Messaging */
760 int imsg_init[TSI721_IMSG_CHNUM]; 830 int imsg_init[TSI721_IMSG_CHNUM];
@@ -765,4 +835,9 @@ struct tsi721_device {
765 struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM]; 835 struct tsi721_omsg_ring omsg_ring[TSI721_OMSG_CHNUM];
766}; 836};
767 837
838#ifdef CONFIG_RAPIDIO_DMA_ENGINE
839extern void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan);
840extern int __devinit tsi721_register_dma(struct tsi721_device *priv);
841#endif
842
768#endif 843#endif
diff --git a/drivers/rapidio/devices/tsi721_dma.c b/drivers/rapidio/devices/tsi721_dma.c
new file mode 100644
index 000000000000..92e06a5c62ec
--- /dev/null
+++ b/drivers/rapidio/devices/tsi721_dma.c
@@ -0,0 +1,823 @@
1/*
2 * DMA Engine support for Tsi721 PCIExpress-to-SRIO bridge
3 *
4 * Copyright 2011 Integrated Device Technology, Inc.
5 * Alexandre Bounine <alexandre.bounine@idt.com>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License as published by the Free
9 * Software Foundation; either version 2 of the License, or (at your option)
10 * any later version.
11 *
12 * This program is distributed in the hope that it will be useful, but WITHOUT
13 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * more details.
16 *
17 * You should have received a copy of the GNU General Public License along with
18 * this program; if not, write to the Free Software Foundation, Inc., 59
19 * Temple Place - Suite 330, Boston, MA 02111-1307, USA.
20 */
21
22#include <linux/io.h>
23#include <linux/errno.h>
24#include <linux/init.h>
25#include <linux/ioport.h>
26#include <linux/kernel.h>
27#include <linux/module.h>
28#include <linux/pci.h>
29#include <linux/rio.h>
30#include <linux/rio_drv.h>
31#include <linux/dma-mapping.h>
32#include <linux/interrupt.h>
33#include <linux/kfifo.h>
34#include <linux/delay.h>
35
36#include "tsi721.h"
37
38static inline struct tsi721_bdma_chan *to_tsi721_chan(struct dma_chan *chan)
39{
40 return container_of(chan, struct tsi721_bdma_chan, dchan);
41}
42
43static inline struct tsi721_device *to_tsi721(struct dma_device *ddev)
44{
45 return container_of(ddev, struct rio_mport, dma)->priv;
46}
47
48static inline
49struct tsi721_tx_desc *to_tsi721_desc(struct dma_async_tx_descriptor *txd)
50{
51 return container_of(txd, struct tsi721_tx_desc, txd);
52}
53
54static inline
55struct tsi721_tx_desc *tsi721_dma_first_active(
56 struct tsi721_bdma_chan *bdma_chan)
57{
58 return list_first_entry(&bdma_chan->active_list,
59 struct tsi721_tx_desc, desc_node);
60}
61
62static int tsi721_bdma_ch_init(struct tsi721_bdma_chan *bdma_chan)
63{
64 struct tsi721_dma_desc *bd_ptr;
65 struct device *dev = bdma_chan->dchan.device->dev;
66 u64 *sts_ptr;
67 dma_addr_t bd_phys;
68 dma_addr_t sts_phys;
69 int sts_size;
70 int bd_num = bdma_chan->bd_num;
71
72 dev_dbg(dev, "Init Block DMA Engine, CH%d\n", bdma_chan->id);
73
74 /* Allocate space for DMA descriptors */
75 bd_ptr = dma_zalloc_coherent(dev,
76 bd_num * sizeof(struct tsi721_dma_desc),
77 &bd_phys, GFP_KERNEL);
78 if (!bd_ptr)
79 return -ENOMEM;
80
81 bdma_chan->bd_phys = bd_phys;
82 bdma_chan->bd_base = bd_ptr;
83
84 dev_dbg(dev, "DMA descriptors @ %p (phys = %llx)\n",
85 bd_ptr, (unsigned long long)bd_phys);
86
87 /* Allocate space for descriptor status FIFO */
88 sts_size = (bd_num >= TSI721_DMA_MINSTSSZ) ?
89 bd_num : TSI721_DMA_MINSTSSZ;
90 sts_size = roundup_pow_of_two(sts_size);
91 sts_ptr = dma_zalloc_coherent(dev,
92 sts_size * sizeof(struct tsi721_dma_sts),
93 &sts_phys, GFP_KERNEL);
94 if (!sts_ptr) {
95 /* Free space allocated for DMA descriptors */
96 dma_free_coherent(dev,
97 bd_num * sizeof(struct tsi721_dma_desc),
98 bd_ptr, bd_phys);
99 bdma_chan->bd_base = NULL;
100 return -ENOMEM;
101 }
102
103 bdma_chan->sts_phys = sts_phys;
104 bdma_chan->sts_base = sts_ptr;
105 bdma_chan->sts_size = sts_size;
106
107 dev_dbg(dev,
108 "desc status FIFO @ %p (phys = %llx) size=0x%x\n",
109 sts_ptr, (unsigned long long)sts_phys, sts_size);
110
111 /* Initialize DMA descriptors ring */
112 bd_ptr[bd_num - 1].type_id = cpu_to_le32(DTYPE3 << 29);
113 bd_ptr[bd_num - 1].next_lo = cpu_to_le32((u64)bd_phys &
114 TSI721_DMAC_DPTRL_MASK);
115 bd_ptr[bd_num - 1].next_hi = cpu_to_le32((u64)bd_phys >> 32);
116
117 /* Setup DMA descriptor pointers */
118 iowrite32(((u64)bd_phys >> 32),
119 bdma_chan->regs + TSI721_DMAC_DPTRH);
120 iowrite32(((u64)bd_phys & TSI721_DMAC_DPTRL_MASK),
121 bdma_chan->regs + TSI721_DMAC_DPTRL);
122
123 /* Setup descriptor status FIFO */
124 iowrite32(((u64)sts_phys >> 32),
125 bdma_chan->regs + TSI721_DMAC_DSBH);
126 iowrite32(((u64)sts_phys & TSI721_DMAC_DSBL_MASK),
127 bdma_chan->regs + TSI721_DMAC_DSBL);
128 iowrite32(TSI721_DMAC_DSSZ_SIZE(sts_size),
129 bdma_chan->regs + TSI721_DMAC_DSSZ);
130
131 /* Clear interrupt bits */
132 iowrite32(TSI721_DMAC_INT_ALL,
133 bdma_chan->regs + TSI721_DMAC_INT);
134
135 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
136
137 /* Toggle DMA channel initialization */
138 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
139 ioread32(bdma_chan->regs + TSI721_DMAC_CTL);
140 bdma_chan->wr_count = bdma_chan->wr_count_next = 0;
141 bdma_chan->sts_rdptr = 0;
142 udelay(10);
143
144 return 0;
145}
146
147static int tsi721_bdma_ch_free(struct tsi721_bdma_chan *bdma_chan)
148{
149 u32 ch_stat;
150
151 if (bdma_chan->bd_base == NULL)
152 return 0;
153
154 /* Check if DMA channel still running */
155 ch_stat = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
156 if (ch_stat & TSI721_DMAC_STS_RUN)
157 return -EFAULT;
158
159 /* Put DMA channel into init state */
160 iowrite32(TSI721_DMAC_CTL_INIT, bdma_chan->regs + TSI721_DMAC_CTL);
161
162 /* Free space allocated for DMA descriptors */
163 dma_free_coherent(bdma_chan->dchan.device->dev,
164 bdma_chan->bd_num * sizeof(struct tsi721_dma_desc),
165 bdma_chan->bd_base, bdma_chan->bd_phys);
166 bdma_chan->bd_base = NULL;
167
168 /* Free space allocated for status FIFO */
169 dma_free_coherent(bdma_chan->dchan.device->dev,
170 bdma_chan->sts_size * sizeof(struct tsi721_dma_sts),
171 bdma_chan->sts_base, bdma_chan->sts_phys);
172 bdma_chan->sts_base = NULL;
173 return 0;
174}
175
176static void
177tsi721_bdma_interrupt_enable(struct tsi721_bdma_chan *bdma_chan, int enable)
178{
179 if (enable) {
180 /* Clear pending BDMA channel interrupts */
181 iowrite32(TSI721_DMAC_INT_ALL,
182 bdma_chan->regs + TSI721_DMAC_INT);
183 ioread32(bdma_chan->regs + TSI721_DMAC_INT);
184 /* Enable BDMA channel interrupts */
185 iowrite32(TSI721_DMAC_INT_ALL,
186 bdma_chan->regs + TSI721_DMAC_INTE);
187 } else {
188 /* Disable BDMA channel interrupts */
189 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
190 /* Clear pending BDMA channel interrupts */
191 iowrite32(TSI721_DMAC_INT_ALL,
192 bdma_chan->regs + TSI721_DMAC_INT);
193 }
194
195}
196
197static bool tsi721_dma_is_idle(struct tsi721_bdma_chan *bdma_chan)
198{
199 u32 sts;
200
201 sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
202 return ((sts & TSI721_DMAC_STS_RUN) == 0);
203}
204
205void tsi721_bdma_handler(struct tsi721_bdma_chan *bdma_chan)
206{
207 /* Disable BDMA channel interrupts */
208 iowrite32(0, bdma_chan->regs + TSI721_DMAC_INTE);
209
210 tasklet_schedule(&bdma_chan->tasklet);
211}
212
213#ifdef CONFIG_PCI_MSI
214/**
215 * tsi721_omsg_msix - MSI-X interrupt handler for BDMA channels
216 * @irq: Linux interrupt number
217 * @ptr: Pointer to interrupt-specific data (BDMA channel structure)
218 *
219 * Handles BDMA channel interrupts signaled using MSI-X.
220 */
221static irqreturn_t tsi721_bdma_msix(int irq, void *ptr)
222{
223 struct tsi721_bdma_chan *bdma_chan = ptr;
224
225 tsi721_bdma_handler(bdma_chan);
226 return IRQ_HANDLED;
227}
228#endif /* CONFIG_PCI_MSI */
229
230/* Must be called with the spinlock held */
231static void tsi721_start_dma(struct tsi721_bdma_chan *bdma_chan)
232{
233 if (!tsi721_dma_is_idle(bdma_chan)) {
234 dev_err(bdma_chan->dchan.device->dev,
235 "BUG: Attempt to start non-idle channel\n");
236 return;
237 }
238
239 if (bdma_chan->wr_count == bdma_chan->wr_count_next) {
240 dev_err(bdma_chan->dchan.device->dev,
241 "BUG: Attempt to start DMA with no BDs ready\n");
242 return;
243 }
244
245 dev_dbg(bdma_chan->dchan.device->dev,
246 "tx_chan: %p, chan: %d, regs: %p\n",
247 bdma_chan, bdma_chan->dchan.chan_id, bdma_chan->regs);
248
249 iowrite32(bdma_chan->wr_count_next,
250 bdma_chan->regs + TSI721_DMAC_DWRCNT);
251 ioread32(bdma_chan->regs + TSI721_DMAC_DWRCNT);
252
253 bdma_chan->wr_count = bdma_chan->wr_count_next;
254}
255
256static void tsi721_desc_put(struct tsi721_bdma_chan *bdma_chan,
257 struct tsi721_tx_desc *desc)
258{
259 dev_dbg(bdma_chan->dchan.device->dev,
260 "Put desc: %p into free list\n", desc);
261
262 if (desc) {
263 spin_lock_bh(&bdma_chan->lock);
264 list_splice_init(&desc->tx_list, &bdma_chan->free_list);
265 list_add(&desc->desc_node, &bdma_chan->free_list);
266 bdma_chan->wr_count_next = bdma_chan->wr_count;
267 spin_unlock_bh(&bdma_chan->lock);
268 }
269}
270
271static
272struct tsi721_tx_desc *tsi721_desc_get(struct tsi721_bdma_chan *bdma_chan)
273{
274 struct tsi721_tx_desc *tx_desc, *_tx_desc;
275 struct tsi721_tx_desc *ret = NULL;
276 int i;
277
278 spin_lock_bh(&bdma_chan->lock);
279 list_for_each_entry_safe(tx_desc, _tx_desc,
280 &bdma_chan->free_list, desc_node) {
281 if (async_tx_test_ack(&tx_desc->txd)) {
282 list_del(&tx_desc->desc_node);
283 ret = tx_desc;
284 break;
285 }
286 dev_dbg(bdma_chan->dchan.device->dev,
287 "desc %p not ACKed\n", tx_desc);
288 }
289
290 i = bdma_chan->wr_count_next % bdma_chan->bd_num;
291 if (i == bdma_chan->bd_num - 1) {
292 i = 0;
293 bdma_chan->wr_count_next++; /* skip link descriptor */
294 }
295
296 bdma_chan->wr_count_next++;
297 tx_desc->txd.phys = bdma_chan->bd_phys +
298 i * sizeof(struct tsi721_dma_desc);
299 tx_desc->hw_desc = &((struct tsi721_dma_desc *)bdma_chan->bd_base)[i];
300
301 spin_unlock_bh(&bdma_chan->lock);
302
303 return ret;
304}
305
306static int
307tsi721_fill_desc(struct tsi721_bdma_chan *bdma_chan,
308 struct tsi721_tx_desc *desc, struct scatterlist *sg,
309 enum dma_rtype rtype, u32 sys_size)
310{
311 struct tsi721_dma_desc *bd_ptr = desc->hw_desc;
312 u64 rio_addr;
313
314 if (sg_dma_len(sg) > TSI721_DMAD_BCOUNT1 + 1) {
315 dev_err(bdma_chan->dchan.device->dev,
316 "SG element is too large\n");
317 return -EINVAL;
318 }
319
320 dev_dbg(bdma_chan->dchan.device->dev,
321 "desc: 0x%llx, addr: 0x%llx len: 0x%x\n",
322 (u64)desc->txd.phys, (unsigned long long)sg_dma_address(sg),
323 sg_dma_len(sg));
324
325 dev_dbg(bdma_chan->dchan.device->dev,
326 "bd_ptr = %p did=%d raddr=0x%llx\n",
327 bd_ptr, desc->destid, desc->rio_addr);
328
329 /* Initialize DMA descriptor */
330 bd_ptr->type_id = cpu_to_le32((DTYPE1 << 29) |
331 (rtype << 19) | desc->destid);
332 if (desc->interrupt)
333 bd_ptr->type_id |= cpu_to_le32(TSI721_DMAD_IOF);
334 bd_ptr->bcount = cpu_to_le32(((desc->rio_addr & 0x3) << 30) |
335 (sys_size << 26) | sg_dma_len(sg));
336 rio_addr = (desc->rio_addr >> 2) |
337 ((u64)(desc->rio_addr_u & 0x3) << 62);
338 bd_ptr->raddr_lo = cpu_to_le32(rio_addr & 0xffffffff);
339 bd_ptr->raddr_hi = cpu_to_le32(rio_addr >> 32);
340 bd_ptr->t1.bufptr_lo = cpu_to_le32(
341 (u64)sg_dma_address(sg) & 0xffffffff);
342 bd_ptr->t1.bufptr_hi = cpu_to_le32((u64)sg_dma_address(sg) >> 32);
343 bd_ptr->t1.s_dist = 0;
344 bd_ptr->t1.s_size = 0;
345
346 return 0;
347}
348
349static void tsi721_dma_chain_complete(struct tsi721_bdma_chan *bdma_chan,
350 struct tsi721_tx_desc *desc)
351{
352 struct dma_async_tx_descriptor *txd = &desc->txd;
353 dma_async_tx_callback callback = txd->callback;
354 void *param = txd->callback_param;
355
356 list_splice_init(&desc->tx_list, &bdma_chan->free_list);
357 list_move(&desc->desc_node, &bdma_chan->free_list);
358 bdma_chan->completed_cookie = txd->cookie;
359
360 if (callback)
361 callback(param);
362}
363
364static void tsi721_dma_complete_all(struct tsi721_bdma_chan *bdma_chan)
365{
366 struct tsi721_tx_desc *desc, *_d;
367 LIST_HEAD(list);
368
369 BUG_ON(!tsi721_dma_is_idle(bdma_chan));
370
371 if (!list_empty(&bdma_chan->queue))
372 tsi721_start_dma(bdma_chan);
373
374 list_splice_init(&bdma_chan->active_list, &list);
375 list_splice_init(&bdma_chan->queue, &bdma_chan->active_list);
376
377 list_for_each_entry_safe(desc, _d, &list, desc_node)
378 tsi721_dma_chain_complete(bdma_chan, desc);
379}
380
381static void tsi721_clr_stat(struct tsi721_bdma_chan *bdma_chan)
382{
383 u32 srd_ptr;
384 u64 *sts_ptr;
385 int i, j;
386
387 /* Check and clear descriptor status FIFO entries */
388 srd_ptr = bdma_chan->sts_rdptr;
389 sts_ptr = bdma_chan->sts_base;
390 j = srd_ptr * 8;
391 while (sts_ptr[j]) {
392 for (i = 0; i < 8 && sts_ptr[j]; i++, j++)
393 sts_ptr[j] = 0;
394
395 ++srd_ptr;
396 srd_ptr %= bdma_chan->sts_size;
397 j = srd_ptr * 8;
398 }
399
400 iowrite32(srd_ptr, bdma_chan->regs + TSI721_DMAC_DSRP);
401 bdma_chan->sts_rdptr = srd_ptr;
402}
403
404static void tsi721_advance_work(struct tsi721_bdma_chan *bdma_chan)
405{
406 if (list_empty(&bdma_chan->active_list) ||
407 list_is_singular(&bdma_chan->active_list)) {
408 dev_dbg(bdma_chan->dchan.device->dev,
409 "%s: Active_list empty\n", __func__);
410 tsi721_dma_complete_all(bdma_chan);
411 } else {
412 dev_dbg(bdma_chan->dchan.device->dev,
413 "%s: Active_list NOT empty\n", __func__);
414 tsi721_dma_chain_complete(bdma_chan,
415 tsi721_dma_first_active(bdma_chan));
416 tsi721_start_dma(bdma_chan);
417 }
418}
419
420static void tsi721_dma_tasklet(unsigned long data)
421{
422 struct tsi721_bdma_chan *bdma_chan = (struct tsi721_bdma_chan *)data;
423 u32 dmac_int, dmac_sts;
424
425 dmac_int = ioread32(bdma_chan->regs + TSI721_DMAC_INT);
426 dev_dbg(bdma_chan->dchan.device->dev, "%s: DMAC%d_INT = 0x%x\n",
427 __func__, bdma_chan->id, dmac_int);
428 /* Clear channel interrupts */
429 iowrite32(dmac_int, bdma_chan->regs + TSI721_DMAC_INT);
430
431 if (dmac_int & TSI721_DMAC_INT_ERR) {
432 dmac_sts = ioread32(bdma_chan->regs + TSI721_DMAC_STS);
433 dev_err(bdma_chan->dchan.device->dev,
434 "%s: DMA ERROR - DMAC%d_STS = 0x%x\n",
435 __func__, bdma_chan->id, dmac_sts);
436 }
437
438 if (dmac_int & TSI721_DMAC_INT_STFULL) {
439 dev_err(bdma_chan->dchan.device->dev,
440 "%s: DMAC%d descriptor status FIFO is full\n",
441 __func__, bdma_chan->id);
442 }
443
444 if (dmac_int & (TSI721_DMAC_INT_DONE | TSI721_DMAC_INT_IOFDONE)) {
445 tsi721_clr_stat(bdma_chan);
446 spin_lock(&bdma_chan->lock);
447 tsi721_advance_work(bdma_chan);
448 spin_unlock(&bdma_chan->lock);
449 }
450
451 /* Re-Enable BDMA channel interrupts */
452 iowrite32(TSI721_DMAC_INT_ALL, bdma_chan->regs + TSI721_DMAC_INTE);
453}
454
455static dma_cookie_t tsi721_tx_submit(struct dma_async_tx_descriptor *txd)
456{
457 struct tsi721_tx_desc *desc = to_tsi721_desc(txd);
458 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(txd->chan);
459 dma_cookie_t cookie;
460
461 spin_lock_bh(&bdma_chan->lock);
462
463 cookie = txd->chan->cookie;
464 if (++cookie < 0)
465 cookie = 1;
466 txd->chan->cookie = cookie;
467 txd->cookie = cookie;
468
469 if (list_empty(&bdma_chan->active_list)) {
470 list_add_tail(&desc->desc_node, &bdma_chan->active_list);
471 tsi721_start_dma(bdma_chan);
472 } else {
473 list_add_tail(&desc->desc_node, &bdma_chan->queue);
474 }
475
476 spin_unlock_bh(&bdma_chan->lock);
477 return cookie;
478}
479
480static int tsi721_alloc_chan_resources(struct dma_chan *dchan)
481{
482 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
483#ifdef CONFIG_PCI_MSI
484 struct tsi721_device *priv = to_tsi721(dchan->device);
485#endif
486 struct tsi721_tx_desc *desc = NULL;
487 LIST_HEAD(tmp_list);
488 int i;
489 int rc;
490
491 if (bdma_chan->bd_base)
492 return bdma_chan->bd_num - 1;
493
494 /* Initialize BDMA channel */
495 if (tsi721_bdma_ch_init(bdma_chan)) {
496 dev_err(dchan->device->dev, "Unable to initialize data DMA"
497 " channel %d, aborting\n", bdma_chan->id);
498 return -ENOMEM;
499 }
500
501 /* Alocate matching number of logical descriptors */
502 desc = kcalloc((bdma_chan->bd_num - 1), sizeof(struct tsi721_tx_desc),
503 GFP_KERNEL);
504 if (!desc) {
505 dev_err(dchan->device->dev,
506 "Failed to allocate logical descriptors\n");
507 rc = -ENOMEM;
508 goto err_out;
509 }
510
511 bdma_chan->tx_desc = desc;
512
513 for (i = 0; i < bdma_chan->bd_num - 1; i++) {
514 dma_async_tx_descriptor_init(&desc[i].txd, dchan);
515 desc[i].txd.tx_submit = tsi721_tx_submit;
516 desc[i].txd.flags = DMA_CTRL_ACK;
517 INIT_LIST_HEAD(&desc[i].tx_list);
518 list_add_tail(&desc[i].desc_node, &tmp_list);
519 }
520
521 spin_lock_bh(&bdma_chan->lock);
522 list_splice(&tmp_list, &bdma_chan->free_list);
523 bdma_chan->completed_cookie = dchan->cookie = 1;
524 spin_unlock_bh(&bdma_chan->lock);
525
526#ifdef CONFIG_PCI_MSI
527 if (priv->flags & TSI721_USING_MSIX) {
528 /* Request interrupt service if we are in MSI-X mode */
529 rc = request_irq(
530 priv->msix[TSI721_VECT_DMA0_DONE +
531 bdma_chan->id].vector,
532 tsi721_bdma_msix, 0,
533 priv->msix[TSI721_VECT_DMA0_DONE +
534 bdma_chan->id].irq_name,
535 (void *)bdma_chan);
536
537 if (rc) {
538 dev_dbg(dchan->device->dev,
539 "Unable to allocate MSI-X interrupt for "
540 "BDMA%d-DONE\n", bdma_chan->id);
541 goto err_out;
542 }
543
544 rc = request_irq(priv->msix[TSI721_VECT_DMA0_INT +
545 bdma_chan->id].vector,
546 tsi721_bdma_msix, 0,
547 priv->msix[TSI721_VECT_DMA0_INT +
548 bdma_chan->id].irq_name,
549 (void *)bdma_chan);
550
551 if (rc) {
552 dev_dbg(dchan->device->dev,
553 "Unable to allocate MSI-X interrupt for "
554 "BDMA%d-INT\n", bdma_chan->id);
555 free_irq(
556 priv->msix[TSI721_VECT_DMA0_DONE +
557 bdma_chan->id].vector,
558 (void *)bdma_chan);
559 rc = -EIO;
560 goto err_out;
561 }
562 }
563#endif /* CONFIG_PCI_MSI */
564
565 tasklet_enable(&bdma_chan->tasklet);
566 tsi721_bdma_interrupt_enable(bdma_chan, 1);
567
568 return bdma_chan->bd_num - 1;
569
570err_out:
571 kfree(desc);
572 tsi721_bdma_ch_free(bdma_chan);
573 return rc;
574}
575
576static void tsi721_free_chan_resources(struct dma_chan *dchan)
577{
578 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
579#ifdef CONFIG_PCI_MSI
580 struct tsi721_device *priv = to_tsi721(dchan->device);
581#endif
582 LIST_HEAD(list);
583
584 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
585
586 if (bdma_chan->bd_base == NULL)
587 return;
588
589 BUG_ON(!list_empty(&bdma_chan->active_list));
590 BUG_ON(!list_empty(&bdma_chan->queue));
591
592 tasklet_disable(&bdma_chan->tasklet);
593
594 spin_lock_bh(&bdma_chan->lock);
595 list_splice_init(&bdma_chan->free_list, &list);
596 spin_unlock_bh(&bdma_chan->lock);
597
598 tsi721_bdma_interrupt_enable(bdma_chan, 0);
599
600#ifdef CONFIG_PCI_MSI
601 if (priv->flags & TSI721_USING_MSIX) {
602 free_irq(priv->msix[TSI721_VECT_DMA0_DONE +
603 bdma_chan->id].vector, (void *)bdma_chan);
604 free_irq(priv->msix[TSI721_VECT_DMA0_INT +
605 bdma_chan->id].vector, (void *)bdma_chan);
606 }
607#endif /* CONFIG_PCI_MSI */
608
609 tsi721_bdma_ch_free(bdma_chan);
610 kfree(bdma_chan->tx_desc);
611}
612
613static
614enum dma_status tsi721_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
615 struct dma_tx_state *txstate)
616{
617 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
618 dma_cookie_t last_used;
619 dma_cookie_t last_completed;
620 int ret;
621
622 spin_lock_bh(&bdma_chan->lock);
623 last_completed = bdma_chan->completed_cookie;
624 last_used = dchan->cookie;
625 spin_unlock_bh(&bdma_chan->lock);
626
627 ret = dma_async_is_complete(cookie, last_completed, last_used);
628
629 dma_set_tx_state(txstate, last_completed, last_used, 0);
630
631 dev_dbg(dchan->device->dev,
632 "%s: exit, ret: %d, last_completed: %d, last_used: %d\n",
633 __func__, ret, last_completed, last_used);
634
635 return ret;
636}
637
638static void tsi721_issue_pending(struct dma_chan *dchan)
639{
640 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
641
642 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
643
644 if (tsi721_dma_is_idle(bdma_chan)) {
645 spin_lock_bh(&bdma_chan->lock);
646 tsi721_advance_work(bdma_chan);
647 spin_unlock_bh(&bdma_chan->lock);
648 } else
649 dev_dbg(dchan->device->dev,
650 "%s: DMA channel still busy\n", __func__);
651}
652
653static
654struct dma_async_tx_descriptor *tsi721_prep_rio_sg(struct dma_chan *dchan,
655 struct scatterlist *sgl, unsigned int sg_len,
656 enum dma_transfer_direction dir, unsigned long flags,
657 void *tinfo)
658{
659 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
660 struct tsi721_tx_desc *desc = NULL;
661 struct tsi721_tx_desc *first = NULL;
662 struct scatterlist *sg;
663 struct rio_dma_ext *rext = tinfo;
664 u64 rio_addr = rext->rio_addr; /* limited to 64-bit rio_addr for now */
665 unsigned int i;
666 u32 sys_size = dma_to_mport(dchan->device)->sys_size;
667 enum dma_rtype rtype;
668
669 if (!sgl || !sg_len) {
670 dev_err(dchan->device->dev, "%s: No SG list\n", __func__);
671 return NULL;
672 }
673
674 if (dir == DMA_DEV_TO_MEM)
675 rtype = NREAD;
676 else if (dir == DMA_MEM_TO_DEV) {
677 switch (rext->wr_type) {
678 case RDW_ALL_NWRITE:
679 rtype = ALL_NWRITE;
680 break;
681 case RDW_ALL_NWRITE_R:
682 rtype = ALL_NWRITE_R;
683 break;
684 case RDW_LAST_NWRITE_R:
685 default:
686 rtype = LAST_NWRITE_R;
687 break;
688 }
689 } else {
690 dev_err(dchan->device->dev,
691 "%s: Unsupported DMA direction option\n", __func__);
692 return NULL;
693 }
694
695 for_each_sg(sgl, sg, sg_len, i) {
696 int err;
697
698 dev_dbg(dchan->device->dev, "%s: sg #%d\n", __func__, i);
699 desc = tsi721_desc_get(bdma_chan);
700 if (!desc) {
701 dev_err(dchan->device->dev,
702 "Not enough descriptors available\n");
703 goto err_desc_get;
704 }
705
706 if (sg_is_last(sg))
707 desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0;
708 else
709 desc->interrupt = false;
710
711 desc->destid = rext->destid;
712 desc->rio_addr = rio_addr;
713 desc->rio_addr_u = 0;
714
715 err = tsi721_fill_desc(bdma_chan, desc, sg, rtype, sys_size);
716 if (err) {
717 dev_err(dchan->device->dev,
718 "Failed to build desc: %d\n", err);
719 goto err_desc_get;
720 }
721
722 rio_addr += sg_dma_len(sg);
723
724 if (!first)
725 first = desc;
726 else
727 list_add_tail(&desc->desc_node, &first->tx_list);
728 }
729
730 first->txd.cookie = -EBUSY;
731 desc->txd.flags = flags;
732
733 return &first->txd;
734
735err_desc_get:
736 tsi721_desc_put(bdma_chan, first);
737 return NULL;
738}
739
740static int tsi721_device_control(struct dma_chan *dchan, enum dma_ctrl_cmd cmd,
741 unsigned long arg)
742{
743 struct tsi721_bdma_chan *bdma_chan = to_tsi721_chan(dchan);
744 struct tsi721_tx_desc *desc, *_d;
745 LIST_HEAD(list);
746
747 dev_dbg(dchan->device->dev, "%s: Entry\n", __func__);
748
749 if (cmd != DMA_TERMINATE_ALL)
750 return -ENXIO;
751
752 spin_lock_bh(&bdma_chan->lock);
753
754 /* make sure to stop the transfer */
755 iowrite32(TSI721_DMAC_CTL_SUSP, bdma_chan->regs + TSI721_DMAC_CTL);
756
757 list_splice_init(&bdma_chan->active_list, &list);
758 list_splice_init(&bdma_chan->queue, &list);
759
760 list_for_each_entry_safe(desc, _d, &list, desc_node)
761 tsi721_dma_chain_complete(bdma_chan, desc);
762
763 spin_unlock_bh(&bdma_chan->lock);
764
765 return 0;
766}
767
768int __devinit tsi721_register_dma(struct tsi721_device *priv)
769{
770 int i;
771 int nr_channels = TSI721_DMA_MAXCH;
772 int err;
773 struct rio_mport *mport = priv->mport;
774
775 mport->dma.dev = &priv->pdev->dev;
776 mport->dma.chancnt = nr_channels;
777
778 INIT_LIST_HEAD(&mport->dma.channels);
779
780 for (i = 0; i < nr_channels; i++) {
781 struct tsi721_bdma_chan *bdma_chan = &priv->bdma[i];
782
783 if (i == TSI721_DMACH_MAINT)
784 continue;
785
786 bdma_chan->bd_num = 64;
787 bdma_chan->regs = priv->regs + TSI721_DMAC_BASE(i);
788
789 bdma_chan->dchan.device = &mport->dma;
790 bdma_chan->dchan.cookie = 1;
791 bdma_chan->dchan.chan_id = i;
792 bdma_chan->id = i;
793
794 spin_lock_init(&bdma_chan->lock);
795
796 INIT_LIST_HEAD(&bdma_chan->active_list);
797 INIT_LIST_HEAD(&bdma_chan->queue);
798 INIT_LIST_HEAD(&bdma_chan->free_list);
799
800 tasklet_init(&bdma_chan->tasklet, tsi721_dma_tasklet,
801 (unsigned long)bdma_chan);
802 tasklet_disable(&bdma_chan->tasklet);
803 list_add_tail(&bdma_chan->dchan.device_node,
804 &mport->dma.channels);
805 }
806
807 dma_cap_zero(mport->dma.cap_mask);
808 dma_cap_set(DMA_PRIVATE, mport->dma.cap_mask);
809 dma_cap_set(DMA_SLAVE, mport->dma.cap_mask);
810
811 mport->dma.device_alloc_chan_resources = tsi721_alloc_chan_resources;
812 mport->dma.device_free_chan_resources = tsi721_free_chan_resources;
813 mport->dma.device_tx_status = tsi721_tx_status;
814 mport->dma.device_issue_pending = tsi721_issue_pending;
815 mport->dma.device_prep_slave_sg = tsi721_prep_rio_sg;
816 mport->dma.device_control = tsi721_device_control;
817
818 err = dma_async_device_register(&mport->dma);
819 if (err)
820 dev_err(&priv->pdev->dev, "Failed to register DMA device\n");
821
822 return err;
823}
diff --git a/drivers/rapidio/rio.c b/drivers/rapidio/rio.c
index 86c9a091a2ff..c40665a4fa33 100644
--- a/drivers/rapidio/rio.c
+++ b/drivers/rapidio/rio.c
@@ -1121,6 +1121,87 @@ int rio_std_route_clr_table(struct rio_mport *mport, u16 destid, u8 hopcount,
1121 return 0; 1121 return 0;
1122} 1122}
1123 1123
1124#ifdef CONFIG_RAPIDIO_DMA_ENGINE
1125
1126static bool rio_chan_filter(struct dma_chan *chan, void *arg)
1127{
1128 struct rio_dev *rdev = arg;
1129
1130 /* Check that DMA device belongs to the right MPORT */
1131 return (rdev->net->hport ==
1132 container_of(chan->device, struct rio_mport, dma));
1133}
1134
1135/**
1136 * rio_request_dma - request RapidIO capable DMA channel that supports
1137 * specified target RapidIO device.
1138 * @rdev: RIO device control structure
1139 *
1140 * Returns pointer to allocated DMA channel or NULL if failed.
1141 */
1142struct dma_chan *rio_request_dma(struct rio_dev *rdev)
1143{
1144 dma_cap_mask_t mask;
1145 struct dma_chan *dchan;
1146
1147 dma_cap_zero(mask);
1148 dma_cap_set(DMA_SLAVE, mask);
1149 dchan = dma_request_channel(mask, rio_chan_filter, rdev);
1150
1151 return dchan;
1152}
1153EXPORT_SYMBOL_GPL(rio_request_dma);
1154
1155/**
1156 * rio_release_dma - release specified DMA channel
1157 * @dchan: DMA channel to release
1158 */
1159void rio_release_dma(struct dma_chan *dchan)
1160{
1161 dma_release_channel(dchan);
1162}
1163EXPORT_SYMBOL_GPL(rio_release_dma);
1164
1165/**
1166 * rio_dma_prep_slave_sg - RapidIO specific wrapper
1167 * for device_prep_slave_sg callback defined by DMAENGINE.
1168 * @rdev: RIO device control structure
1169 * @dchan: DMA channel to configure
1170 * @data: RIO specific data descriptor
1171 * @direction: DMA data transfer direction (TO or FROM the device)
1172 * @flags: dmaengine defined flags
1173 *
1174 * Initializes RapidIO capable DMA channel for the specified data transfer.
1175 * Uses DMA channel private extension to pass information related to remote
1176 * target RIO device.
1177 * Returns pointer to DMA transaction descriptor or NULL if failed.
1178 */
1179struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(struct rio_dev *rdev,
1180 struct dma_chan *dchan, struct rio_dma_data *data,
1181 enum dma_transfer_direction direction, unsigned long flags)
1182{
1183 struct dma_async_tx_descriptor *txd = NULL;
1184 struct rio_dma_ext rio_ext;
1185
1186 if (dchan->device->device_prep_slave_sg == NULL) {
1187 pr_err("%s: prep_rio_sg == NULL\n", __func__);
1188 return NULL;
1189 }
1190
1191 rio_ext.destid = rdev->destid;
1192 rio_ext.rio_addr_u = data->rio_addr_u;
1193 rio_ext.rio_addr = data->rio_addr;
1194 rio_ext.wr_type = data->wr_type;
1195
1196 txd = dmaengine_prep_rio_sg(dchan, data->sg, data->sg_len,
1197 direction, flags, &rio_ext);
1198
1199 return txd;
1200}
1201EXPORT_SYMBOL_GPL(rio_dma_prep_slave_sg);
1202
1203#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
1204
1124static void rio_fixup_device(struct rio_dev *dev) 1205static void rio_fixup_device(struct rio_dev *dev)
1125{ 1206{
1126} 1207}
diff --git a/drivers/regulator/ab8500.c b/drivers/regulator/ab8500.c
index e1b8c54ace5a..a739f5ca936a 100644
--- a/drivers/regulator/ab8500.c
+++ b/drivers/regulator/ab8500.c
@@ -794,17 +794,17 @@ static __devinit int ab8500_regulator_register(struct platform_device *pdev,
794} 794}
795 795
796static struct of_regulator_match ab8500_regulator_matches[] = { 796static struct of_regulator_match ab8500_regulator_matches[] = {
797 { .name = "LDO-AUX1", .driver_data = (void *) AB8500_LDO_AUX1, }, 797 { .name = "ab8500_ldo_aux1", .driver_data = (void *) AB8500_LDO_AUX1, },
798 { .name = "LDO-AUX2", .driver_data = (void *) AB8500_LDO_AUX2, }, 798 { .name = "ab8500_ldo_aux2", .driver_data = (void *) AB8500_LDO_AUX2, },
799 { .name = "LDO-AUX3", .driver_data = (void *) AB8500_LDO_AUX3, }, 799 { .name = "ab8500_ldo_aux3", .driver_data = (void *) AB8500_LDO_AUX3, },
800 { .name = "LDO-INTCORE", .driver_data = (void *) AB8500_LDO_INTCORE, }, 800 { .name = "ab8500_ldo_intcore", .driver_data = (void *) AB8500_LDO_INTCORE, },
801 { .name = "LDO-TVOUT", .driver_data = (void *) AB8500_LDO_TVOUT, }, 801 { .name = "ab8500_ldo_tvout", .driver_data = (void *) AB8500_LDO_TVOUT, },
802 { .name = "LDO-USB", .driver_data = (void *) AB8500_LDO_USB, }, 802 { .name = "ab8500_ldo_usb", .driver_data = (void *) AB8500_LDO_USB, },
803 { .name = "LDO-AUDIO", .driver_data = (void *) AB8500_LDO_AUDIO, }, 803 { .name = "ab8500_ldo_audio", .driver_data = (void *) AB8500_LDO_AUDIO, },
804 { .name = "LDO-ANAMIC1", .driver_data = (void *) AB8500_LDO_ANAMIC1, }, 804 { .name = "ab8500_ldo_anamic1", .driver_data = (void *) AB8500_LDO_ANAMIC1, },
805 { .name = "LDO-ANAMIC2", .driver_data = (void *) AB8500_LDO_ANAMIC2, }, 805 { .name = "ab8500_ldo_amamic2", .driver_data = (void *) AB8500_LDO_ANAMIC2, },
806 { .name = "LDO-DMIC", .driver_data = (void *) AB8500_LDO_DMIC, }, 806 { .name = "ab8500_ldo_dmic", .driver_data = (void *) AB8500_LDO_DMIC, },
807 { .name = "LDO-ANA", .driver_data = (void *) AB8500_LDO_ANA, }, 807 { .name = "ab8500_ldo_ana", .driver_data = (void *) AB8500_LDO_ANA, },
808}; 808};
809 809
810static __devinit int 810static __devinit int
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 3660bace123c..e82e7eaac0f1 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -224,7 +224,7 @@ static struct platform_driver anatop_regulator_driver = {
224 .of_match_table = of_anatop_regulator_match_tbl, 224 .of_match_table = of_anatop_regulator_match_tbl,
225 }, 225 },
226 .probe = anatop_regulator_probe, 226 .probe = anatop_regulator_probe,
227 .remove = anatop_regulator_remove, 227 .remove = __devexit_p(anatop_regulator_remove),
228}; 228};
229 229
230static int __init anatop_regulator_init(void) 230static int __init anatop_regulator_init(void)
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 7584a74eec8a..09a737c868b5 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -2050,6 +2050,9 @@ int regulator_map_voltage_linear(struct regulator_dev *rdev,
2050 return -EINVAL; 2050 return -EINVAL;
2051 } 2051 }
2052 2052
2053 if (min_uV < rdev->desc->min_uV)
2054 min_uV = rdev->desc->min_uV;
2055
2053 ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step); 2056 ret = DIV_ROUND_UP(min_uV - rdev->desc->min_uV, rdev->desc->uV_step);
2054 if (ret < 0) 2057 if (ret < 0)
2055 return ret; 2058 return ret;
diff --git a/drivers/regulator/db8500-prcmu.c b/drivers/regulator/db8500-prcmu.c
index 968f97f3cb3d..9dbb491b6efa 100644
--- a/drivers/regulator/db8500-prcmu.c
+++ b/drivers/regulator/db8500-prcmu.c
@@ -452,26 +452,26 @@ static __devinit int db8500_regulator_register(struct platform_device *pdev,
452} 452}
453 453
454static struct of_regulator_match db8500_regulator_matches[] = { 454static struct of_regulator_match db8500_regulator_matches[] = {
455 { .name = "db8500-vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, }, 455 { .name = "db8500_vape", .driver_data = (void *) DB8500_REGULATOR_VAPE, },
456 { .name = "db8500-varm", .driver_data = (void *) DB8500_REGULATOR_VARM, }, 456 { .name = "db8500_varm", .driver_data = (void *) DB8500_REGULATOR_VARM, },
457 { .name = "db8500-vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, }, 457 { .name = "db8500_vmodem", .driver_data = (void *) DB8500_REGULATOR_VMODEM, },
458 { .name = "db8500-vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, }, 458 { .name = "db8500_vpll", .driver_data = (void *) DB8500_REGULATOR_VPLL, },
459 { .name = "db8500-vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, }, 459 { .name = "db8500_vsmps1", .driver_data = (void *) DB8500_REGULATOR_VSMPS1, },
460 { .name = "db8500-vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, }, 460 { .name = "db8500_vsmps2", .driver_data = (void *) DB8500_REGULATOR_VSMPS2, },
461 { .name = "db8500-vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, }, 461 { .name = "db8500_vsmps3", .driver_data = (void *) DB8500_REGULATOR_VSMPS3, },
462 { .name = "db8500-vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, }, 462 { .name = "db8500_vrf1", .driver_data = (void *) DB8500_REGULATOR_VRF1, },
463 { .name = "db8500-sva-mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, }, 463 { .name = "db8500_sva_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSP, },
464 { .name = "db8500-sva-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, }, 464 { .name = "db8500_sva_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAMMDSPRET, },
465 { .name = "db8500-sva-pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, }, 465 { .name = "db8500_sva_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SVAPIPE, },
466 { .name = "db8500-sia-mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, }, 466 { .name = "db8500_sia_mmdsp", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSP, },
467 { .name = "db8500-sia-mmdsp-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, }, 467 { .name = "db8500_sia_mmdsp_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAMMDSPRET, },
468 { .name = "db8500-sia-pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, }, 468 { .name = "db8500_sia_pipe", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SIAPIPE, },
469 { .name = "db8500-sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, }, 469 { .name = "db8500_sga", .driver_data = (void *) DB8500_REGULATOR_SWITCH_SGA, },
470 { .name = "db8500-b2r2-mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, }, 470 { .name = "db8500_b2r2_mcde", .driver_data = (void *) DB8500_REGULATOR_SWITCH_B2R2_MCDE, },
471 { .name = "db8500-esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, }, 471 { .name = "db8500_esram12", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12, },
472 { .name = "db8500-esram12-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, }, 472 { .name = "db8500_esram12_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM12RET, },
473 { .name = "db8500-esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, }, 473 { .name = "db8500_esram34", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34, },
474 { .name = "db8500-esram34-ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, }, 474 { .name = "db8500_esram34_ret", .driver_data = (void *) DB8500_REGULATOR_SWITCH_ESRAM34RET, },
475}; 475};
476 476
477static __devinit int 477static __devinit int
diff --git a/drivers/regulator/gpio-regulator.c b/drivers/regulator/gpio-regulator.c
index 9997d7aaca84..242851a4c1a6 100644
--- a/drivers/regulator/gpio-regulator.c
+++ b/drivers/regulator/gpio-regulator.c
@@ -101,16 +101,20 @@ static int gpio_regulator_get_value(struct regulator_dev *dev)
101} 101}
102 102
103static int gpio_regulator_set_value(struct regulator_dev *dev, 103static int gpio_regulator_set_value(struct regulator_dev *dev,
104 int min, int max) 104 int min, int max, unsigned *selector)
105{ 105{
106 struct gpio_regulator_data *data = rdev_get_drvdata(dev); 106 struct gpio_regulator_data *data = rdev_get_drvdata(dev);
107 int ptr, target, state, best_val = INT_MAX; 107 int ptr, target = 0, state, best_val = INT_MAX;
108 108
109 for (ptr = 0; ptr < data->nr_states; ptr++) 109 for (ptr = 0; ptr < data->nr_states; ptr++)
110 if (data->states[ptr].value < best_val && 110 if (data->states[ptr].value < best_val &&
111 data->states[ptr].value >= min && 111 data->states[ptr].value >= min &&
112 data->states[ptr].value <= max) 112 data->states[ptr].value <= max) {
113 target = data->states[ptr].gpios; 113 target = data->states[ptr].gpios;
114 best_val = data->states[ptr].value;
115 if (selector)
116 *selector = ptr;
117 }
114 118
115 if (best_val == INT_MAX) 119 if (best_val == INT_MAX)
116 return -EINVAL; 120 return -EINVAL;
@@ -128,7 +132,7 @@ static int gpio_regulator_set_voltage(struct regulator_dev *dev,
128 int min_uV, int max_uV, 132 int min_uV, int max_uV,
129 unsigned *selector) 133 unsigned *selector)
130{ 134{
131 return gpio_regulator_set_value(dev, min_uV, max_uV); 135 return gpio_regulator_set_value(dev, min_uV, max_uV, selector);
132} 136}
133 137
134static int gpio_regulator_list_voltage(struct regulator_dev *dev, 138static int gpio_regulator_list_voltage(struct regulator_dev *dev,
@@ -145,7 +149,7 @@ static int gpio_regulator_list_voltage(struct regulator_dev *dev,
145static int gpio_regulator_set_current_limit(struct regulator_dev *dev, 149static int gpio_regulator_set_current_limit(struct regulator_dev *dev,
146 int min_uA, int max_uA) 150 int min_uA, int max_uA)
147{ 151{
148 return gpio_regulator_set_value(dev, min_uA, max_uA); 152 return gpio_regulator_set_value(dev, min_uA, max_uA, NULL);
149} 153}
150 154
151static struct regulator_ops gpio_regulator_voltage_ops = { 155static struct regulator_ops gpio_regulator_voltage_ops = {
@@ -286,7 +290,7 @@ static int __devinit gpio_regulator_probe(struct platform_device *pdev)
286 290
287 cfg.dev = &pdev->dev; 291 cfg.dev = &pdev->dev;
288 cfg.init_data = config->init_data; 292 cfg.init_data = config->init_data;
289 cfg.driver_data = &drvdata; 293 cfg.driver_data = drvdata;
290 294
291 drvdata->dev = regulator_register(&drvdata->desc, &cfg); 295 drvdata->dev = regulator_register(&drvdata->desc, &cfg);
292 if (IS_ERR(drvdata->dev)) { 296 if (IS_ERR(drvdata->dev)) {
diff --git a/drivers/regulator/max8649.c b/drivers/regulator/max8649.c
index 1f4bb80457b3..9d540cd02dab 100644
--- a/drivers/regulator/max8649.c
+++ b/drivers/regulator/max8649.c
@@ -259,6 +259,7 @@ static int __devinit max8649_regulator_probe(struct i2c_client *client,
259 config.dev = &client->dev; 259 config.dev = &client->dev;
260 config.init_data = pdata->regulator; 260 config.init_data = pdata->regulator;
261 config.driver_data = info; 261 config.driver_data = info;
262 config.regmap = info->regmap;
262 263
263 info->regulator = regulator_register(&dcdc_desc, &config); 264 info->regulator = regulator_register(&dcdc_desc, &config);
264 if (IS_ERR(info->regulator)) { 265 if (IS_ERR(info->regulator)) {
diff --git a/drivers/regulator/palmas-regulator.c b/drivers/regulator/palmas-regulator.c
index c4435f608df7..9b7ca90057d5 100644
--- a/drivers/regulator/palmas-regulator.c
+++ b/drivers/regulator/palmas-regulator.c
@@ -775,9 +775,6 @@ static __devinit int palmas_probe(struct platform_device *pdev)
775err_unregister_regulator: 775err_unregister_regulator:
776 while (--id >= 0) 776 while (--id >= 0)
777 regulator_unregister(pmic->rdev[id]); 777 regulator_unregister(pmic->rdev[id]);
778 kfree(pmic->rdev);
779 kfree(pmic->desc);
780 kfree(pmic);
781 return ret; 778 return ret;
782} 779}
783 780
@@ -788,10 +785,6 @@ static int __devexit palmas_remove(struct platform_device *pdev)
788 785
789 for (id = 0; id < PALMAS_NUM_REGS; id++) 786 for (id = 0; id < PALMAS_NUM_REGS; id++)
790 regulator_unregister(pmic->rdev[id]); 787 regulator_unregister(pmic->rdev[id]);
791
792 kfree(pmic->rdev);
793 kfree(pmic->desc);
794 kfree(pmic);
795 return 0; 788 return 0;
796} 789}
797 790
diff --git a/drivers/regulator/s5m8767.c b/drivers/regulator/s5m8767.c
index 290d6fc01029..9caadb482178 100644
--- a/drivers/regulator/s5m8767.c
+++ b/drivers/regulator/s5m8767.c
@@ -451,7 +451,7 @@ static int s5m8767_set_voltage_time_sel(struct regulator_dev *rdev,
451 451
452 desc = reg_voltage_map[reg_id]; 452 desc = reg_voltage_map[reg_id];
453 453
454 if (old_sel < new_sel) 454 if ((old_sel < new_sel) && s5m8767->ramp_delay)
455 return DIV_ROUND_UP(desc->step * (new_sel - old_sel), 455 return DIV_ROUND_UP(desc->step * (new_sel - old_sel),
456 s5m8767->ramp_delay * 1000); 456 s5m8767->ramp_delay * 1000);
457 return 0; 457 return 0;
diff --git a/drivers/remoteproc/omap_remoteproc.c b/drivers/remoteproc/omap_remoteproc.c
index 69425c4e86f3..de138e30d3e6 100644
--- a/drivers/remoteproc/omap_remoteproc.c
+++ b/drivers/remoteproc/omap_remoteproc.c
@@ -182,7 +182,7 @@ static int __devinit omap_rproc_probe(struct platform_device *pdev)
182 182
183 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); 183 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
184 if (ret) { 184 if (ret) {
185 dev_err(pdev->dev.parent, "dma_set_coherent_mask: %d\n", ret); 185 dev_err(&pdev->dev, "dma_set_coherent_mask: %d\n", ret);
186 return ret; 186 return ret;
187 } 187 }
188 188
diff --git a/drivers/remoteproc/remoteproc_core.c b/drivers/remoteproc/remoteproc_core.c
index 8ea7bccc7100..66324ee4678f 100644
--- a/drivers/remoteproc/remoteproc_core.c
+++ b/drivers/remoteproc/remoteproc_core.c
@@ -247,7 +247,7 @@ rproc_load_segments(struct rproc *rproc, const u8 *elf_data, size_t len)
247 } 247 }
248 248
249 if (offset + filesz > len) { 249 if (offset + filesz > len) {
250 dev_err(dev, "truncated fw: need 0x%x avail 0x%x\n", 250 dev_err(dev, "truncated fw: need 0x%x avail 0x%zx\n",
251 offset + filesz, len); 251 offset + filesz, len);
252 ret = -EINVAL; 252 ret = -EINVAL;
253 break; 253 break;
@@ -934,7 +934,7 @@ static void rproc_resource_cleanup(struct rproc *rproc)
934 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len); 934 unmapped = iommu_unmap(rproc->domain, entry->da, entry->len);
935 if (unmapped != entry->len) { 935 if (unmapped != entry->len) {
936 /* nothing much to do besides complaining */ 936 /* nothing much to do besides complaining */
937 dev_err(dev, "failed to unmap %u/%u\n", entry->len, 937 dev_err(dev, "failed to unmap %u/%zu\n", entry->len,
938 unmapped); 938 unmapped);
939 } 939 }
940 940
@@ -1020,7 +1020,7 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
1020 1020
1021 ehdr = (struct elf32_hdr *)fw->data; 1021 ehdr = (struct elf32_hdr *)fw->data;
1022 1022
1023 dev_info(dev, "Booting fw image %s, size %d\n", name, fw->size); 1023 dev_info(dev, "Booting fw image %s, size %zd\n", name, fw->size);
1024 1024
1025 /* 1025 /*
1026 * if enabling an IOMMU isn't relevant for this rproc, this is 1026 * if enabling an IOMMU isn't relevant for this rproc, this is
@@ -1041,8 +1041,10 @@ static int rproc_fw_boot(struct rproc *rproc, const struct firmware *fw)
1041 1041
1042 /* look for the resource table */ 1042 /* look for the resource table */
1043 table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz); 1043 table = rproc_find_rsc_table(rproc, fw->data, fw->size, &tablesz);
1044 if (!table) 1044 if (!table) {
1045 ret = -EINVAL;
1045 goto clean_up; 1046 goto clean_up;
1047 }
1046 1048
1047 /* handle fw resources which are required to boot rproc */ 1049 /* handle fw resources which are required to boot rproc */
1048 ret = rproc_handle_boot_rsc(rproc, table, tablesz); 1050 ret = rproc_handle_boot_rsc(rproc, table, tablesz);
diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
index 7d5f56edb8ef..4267789ca995 100644
--- a/drivers/rtc/rtc-cmos.c
+++ b/drivers/rtc/rtc-cmos.c
@@ -910,14 +910,17 @@ static inline int cmos_poweroff(struct device *dev)
910 910
911static u32 rtc_handler(void *context) 911static u32 rtc_handler(void *context)
912{ 912{
913 struct device *dev = context;
914
915 pm_wakeup_event(dev, 0);
913 acpi_clear_event(ACPI_EVENT_RTC); 916 acpi_clear_event(ACPI_EVENT_RTC);
914 acpi_disable_event(ACPI_EVENT_RTC, 0); 917 acpi_disable_event(ACPI_EVENT_RTC, 0);
915 return ACPI_INTERRUPT_HANDLED; 918 return ACPI_INTERRUPT_HANDLED;
916} 919}
917 920
918static inline void rtc_wake_setup(void) 921static inline void rtc_wake_setup(struct device *dev)
919{ 922{
920 acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, NULL); 923 acpi_install_fixed_event_handler(ACPI_EVENT_RTC, rtc_handler, dev);
921 /* 924 /*
922 * After the RTC handler is installed, the Fixed_RTC event should 925 * After the RTC handler is installed, the Fixed_RTC event should
923 * be disabled. Only when the RTC alarm is set will it be enabled. 926 * be disabled. Only when the RTC alarm is set will it be enabled.
@@ -950,7 +953,7 @@ cmos_wake_setup(struct device *dev)
950 if (acpi_disabled) 953 if (acpi_disabled)
951 return; 954 return;
952 955
953 rtc_wake_setup(); 956 rtc_wake_setup(dev);
954 acpi_rtc_info.wake_on = rtc_wake_on; 957 acpi_rtc_info.wake_on = rtc_wake_on;
955 acpi_rtc_info.wake_off = rtc_wake_off; 958 acpi_rtc_info.wake_off = rtc_wake_off;
956 959
diff --git a/drivers/s390/block/dasd_int.h b/drivers/s390/block/dasd_int.h
index 33a6743ddc55..c05da00583f0 100644
--- a/drivers/s390/block/dasd_int.h
+++ b/drivers/s390/block/dasd_int.h
@@ -10,8 +10,6 @@
10#ifndef DASD_INT_H 10#ifndef DASD_INT_H
11#define DASD_INT_H 11#define DASD_INT_H
12 12
13#ifdef __KERNEL__
14
15/* we keep old device allocation scheme; IOW, minors are still in 0..255 */ 13/* we keep old device allocation scheme; IOW, minors are still in 0..255 */
16#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS)) 14#define DASD_PER_MAJOR (1U << (MINORBITS - DASD_PARTN_BITS))
17#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1) 15#define DASD_PARTN_MASK ((1 << DASD_PARTN_BITS) - 1)
@@ -791,6 +789,4 @@ static inline int dasd_eer_enabled(struct dasd_device *device)
791#define dasd_eer_enabled(d) (0) 789#define dasd_eer_enabled(d) (0)
792#endif /* CONFIG_DASD_ERR */ 790#endif /* CONFIG_DASD_ERR */
793 791
794#endif /* __KERNEL__ */
795
796#endif /* DASD_H */ 792#endif /* DASD_H */
diff --git a/drivers/s390/char/sclp_sdias.c b/drivers/s390/char/sclp_sdias.c
index 69e6c50d4cfb..50f7115990ff 100644
--- a/drivers/s390/char/sclp_sdias.c
+++ b/drivers/s390/char/sclp_sdias.c
@@ -211,7 +211,7 @@ int sclp_sdias_copy(void *dest, int start_blk, int nr_blks)
211 sccb.evbuf.event_qual = EQ_STORE_DATA; 211 sccb.evbuf.event_qual = EQ_STORE_DATA;
212 sccb.evbuf.data_id = DI_FCP_DUMP; 212 sccb.evbuf.data_id = DI_FCP_DUMP;
213 sccb.evbuf.event_id = 4712; 213 sccb.evbuf.event_id = 4712;
214#ifdef __s390x__ 214#ifdef CONFIG_64BIT
215 sccb.evbuf.asa_size = ASA_SIZE_64; 215 sccb.evbuf.asa_size = ASA_SIZE_64;
216#else 216#else
217 sccb.evbuf.asa_size = ASA_SIZE_32; 217 sccb.evbuf.asa_size = ASA_SIZE_32;
diff --git a/drivers/scsi/be2iscsi/be_mgmt.c b/drivers/scsi/be2iscsi/be_mgmt.c
index 01bb04cd9e75..2a096795b9aa 100644
--- a/drivers/scsi/be2iscsi/be_mgmt.c
+++ b/drivers/scsi/be2iscsi/be_mgmt.c
@@ -571,13 +571,12 @@ free_cmd:
571static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd, 571static int mgmt_alloc_cmd_data(struct beiscsi_hba *phba, struct be_dma_mem *cmd,
572 int iscsi_cmd, int size) 572 int iscsi_cmd, int size)
573{ 573{
574 cmd->va = pci_alloc_consistent(phba->ctrl.pdev, sizeof(size), 574 cmd->va = pci_alloc_consistent(phba->ctrl.pdev, size, &cmd->dma);
575 &cmd->dma);
576 if (!cmd->va) { 575 if (!cmd->va) {
577 SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n"); 576 SE_DEBUG(DBG_LVL_1, "Failed to allocate memory for if info\n");
578 return -ENOMEM; 577 return -ENOMEM;
579 } 578 }
580 memset(cmd->va, 0, sizeof(size)); 579 memset(cmd->va, 0, size);
581 cmd->size = size; 580 cmd->size = size;
582 be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size); 581 be_cmd_hdr_prepare(cmd->va, CMD_SUBSYSTEM_ISCSI, iscsi_cmd, size);
583 return 0; 582 return 0;
diff --git a/drivers/scsi/bfa/bfad_attr.c b/drivers/scsi/bfa/bfad_attr.c
index 8b6c6bf7837e..b83927440171 100644
--- a/drivers/scsi/bfa/bfad_attr.c
+++ b/drivers/scsi/bfa/bfad_attr.c
@@ -426,6 +426,23 @@ bfad_im_vport_create(struct fc_vport *fc_vport, bool disable)
426 vshost = vport->drv_port.im_port->shost; 426 vshost = vport->drv_port.im_port->shost;
427 fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn); 427 fc_host_node_name(vshost) = wwn_to_u64((u8 *)&port_cfg.nwwn);
428 fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn); 428 fc_host_port_name(vshost) = wwn_to_u64((u8 *)&port_cfg.pwwn);
429 fc_host_supported_classes(vshost) = FC_COS_CLASS3;
430
431 memset(fc_host_supported_fc4s(vshost), 0,
432 sizeof(fc_host_supported_fc4s(vshost)));
433
434 /* For FCP type 0x08 */
435 if (supported_fc4s & BFA_LPORT_ROLE_FCP_IM)
436 fc_host_supported_fc4s(vshost)[2] = 1;
437
438 /* For fibre channel services type 0x20 */
439 fc_host_supported_fc4s(vshost)[7] = 1;
440
441 fc_host_supported_speeds(vshost) =
442 bfad_im_supported_speeds(&bfad->bfa);
443 fc_host_maxframe_size(vshost) =
444 bfa_fcport_get_maxfrsize(&bfad->bfa);
445
429 fc_vport->dd_data = vport; 446 fc_vport->dd_data = vport;
430 vport->drv_port.im_port->fc_vport = fc_vport; 447 vport->drv_port.im_port->fc_vport = fc_vport;
431 } else if (rc == BFA_STATUS_INVALID_WWN) 448 } else if (rc == BFA_STATUS_INVALID_WWN)
diff --git a/drivers/scsi/bfa/bfad_im.c b/drivers/scsi/bfa/bfad_im.c
index 3153923f5b60..1ac09afe35ee 100644
--- a/drivers/scsi/bfa/bfad_im.c
+++ b/drivers/scsi/bfa/bfad_im.c
@@ -987,7 +987,7 @@ done:
987 return 0; 987 return 0;
988} 988}
989 989
990static u32 990u32
991bfad_im_supported_speeds(struct bfa_s *bfa) 991bfad_im_supported_speeds(struct bfa_s *bfa)
992{ 992{
993 struct bfa_ioc_attr_s *ioc_attr; 993 struct bfa_ioc_attr_s *ioc_attr;
diff --git a/drivers/scsi/bfa/bfad_im.h b/drivers/scsi/bfa/bfad_im.h
index 0814367ef101..f6c1023e502a 100644
--- a/drivers/scsi/bfa/bfad_im.h
+++ b/drivers/scsi/bfa/bfad_im.h
@@ -37,6 +37,7 @@ int bfad_im_scsi_host_alloc(struct bfad_s *bfad,
37 struct bfad_im_port_s *im_port, struct device *dev); 37 struct bfad_im_port_s *im_port, struct device *dev);
38void bfad_im_scsi_host_free(struct bfad_s *bfad, 38void bfad_im_scsi_host_free(struct bfad_s *bfad,
39 struct bfad_im_port_s *im_port); 39 struct bfad_im_port_s *im_port);
40u32 bfad_im_supported_speeds(struct bfa_s *bfa);
40 41
41#define MAX_FCP_TARGET 1024 42#define MAX_FCP_TARGET 1024
42#define MAX_FCP_LUN 16384 43#define MAX_FCP_LUN 16384
diff --git a/drivers/scsi/bnx2fc/bnx2fc.h b/drivers/scsi/bnx2fc/bnx2fc.h
index a4953ef9e53a..0578fa0dc14b 100644
--- a/drivers/scsi/bnx2fc/bnx2fc.h
+++ b/drivers/scsi/bnx2fc/bnx2fc.h
@@ -62,7 +62,7 @@
62#include "bnx2fc_constants.h" 62#include "bnx2fc_constants.h"
63 63
64#define BNX2FC_NAME "bnx2fc" 64#define BNX2FC_NAME "bnx2fc"
65#define BNX2FC_VERSION "1.0.10" 65#define BNX2FC_VERSION "1.0.11"
66 66
67#define PFX "bnx2fc: " 67#define PFX "bnx2fc: "
68 68
@@ -228,13 +228,16 @@ struct bnx2fc_interface {
228 struct packet_type fip_packet_type; 228 struct packet_type fip_packet_type;
229 struct workqueue_struct *timer_work_queue; 229 struct workqueue_struct *timer_work_queue;
230 struct kref kref; 230 struct kref kref;
231 struct fcoe_ctlr ctlr;
232 u8 vlan_enabled; 231 u8 vlan_enabled;
233 int vlan_id; 232 int vlan_id;
234 bool enabled; 233 bool enabled;
235}; 234};
236 235
237#define bnx2fc_from_ctlr(fip) container_of(fip, struct bnx2fc_interface, ctlr) 236#define bnx2fc_from_ctlr(x) \
237 ((struct bnx2fc_interface *)((x) + 1))
238
239#define bnx2fc_to_ctlr(x) \
240 ((struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1))
238 241
239struct bnx2fc_lport { 242struct bnx2fc_lport {
240 struct list_head list; 243 struct list_head list;
diff --git a/drivers/scsi/bnx2fc/bnx2fc_els.c b/drivers/scsi/bnx2fc/bnx2fc_els.c
index ce0ce3e32f33..bdbbb13b8534 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_els.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_els.c
@@ -854,7 +854,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
854 struct fc_exch *exch = fc_seq_exch(seq); 854 struct fc_exch *exch = fc_seq_exch(seq);
855 struct fc_lport *lport = exch->lp; 855 struct fc_lport *lport = exch->lp;
856 u8 *mac; 856 u8 *mac;
857 struct fc_frame_header *fh;
858 u8 op; 857 u8 op;
859 858
860 if (IS_ERR(fp)) 859 if (IS_ERR(fp))
@@ -862,13 +861,6 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
862 861
863 mac = fr_cb(fp)->granted_mac; 862 mac = fr_cb(fp)->granted_mac;
864 if (is_zero_ether_addr(mac)) { 863 if (is_zero_ether_addr(mac)) {
865 fh = fc_frame_header_get(fp);
866 if (fh->fh_type != FC_TYPE_ELS) {
867 printk(KERN_ERR PFX "bnx2fc_flogi_resp:"
868 "fh_type != FC_TYPE_ELS\n");
869 fc_frame_free(fp);
870 return;
871 }
872 op = fc_frame_payload_op(fp); 864 op = fc_frame_payload_op(fp);
873 if (lport->vport) { 865 if (lport->vport) {
874 if (op == ELS_LS_RJT) { 866 if (op == ELS_LS_RJT) {
@@ -878,12 +870,10 @@ static void bnx2fc_flogi_resp(struct fc_seq *seq, struct fc_frame *fp,
878 return; 870 return;
879 } 871 }
880 } 872 }
881 if (fcoe_ctlr_recv_flogi(fip, lport, fp)) { 873 fcoe_ctlr_recv_flogi(fip, lport, fp);
882 fc_frame_free(fp);
883 return;
884 }
885 } 874 }
886 fip->update_mac(lport, mac); 875 if (!is_zero_ether_addr(mac))
876 fip->update_mac(lport, mac);
887done: 877done:
888 fc_lport_flogi_resp(seq, fp, lport); 878 fc_lport_flogi_resp(seq, fp, lport);
889} 879}
@@ -910,7 +900,7 @@ struct fc_seq *bnx2fc_elsct_send(struct fc_lport *lport, u32 did,
910{ 900{
911 struct fcoe_port *port = lport_priv(lport); 901 struct fcoe_port *port = lport_priv(lport);
912 struct bnx2fc_interface *interface = port->priv; 902 struct bnx2fc_interface *interface = port->priv;
913 struct fcoe_ctlr *fip = &interface->ctlr; 903 struct fcoe_ctlr *fip = bnx2fc_to_ctlr(interface);
914 struct fc_frame_header *fh = fc_frame_header_get(fp); 904 struct fc_frame_header *fh = fc_frame_header_get(fp);
915 905
916 switch (op) { 906 switch (op) {
diff --git a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
index c1c6a92a0b98..f52f668fd247 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_fcoe.c
@@ -22,7 +22,7 @@ DEFINE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
22 22
23#define DRV_MODULE_NAME "bnx2fc" 23#define DRV_MODULE_NAME "bnx2fc"
24#define DRV_MODULE_VERSION BNX2FC_VERSION 24#define DRV_MODULE_VERSION BNX2FC_VERSION
25#define DRV_MODULE_RELDATE "Jan 22, 2011" 25#define DRV_MODULE_RELDATE "Apr 24, 2012"
26 26
27 27
28static char version[] __devinitdata = 28static char version[] __devinitdata =
@@ -54,6 +54,7 @@ static struct cnic_ulp_ops bnx2fc_cnic_cb;
54static struct libfc_function_template bnx2fc_libfc_fcn_templ; 54static struct libfc_function_template bnx2fc_libfc_fcn_templ;
55static struct scsi_host_template bnx2fc_shost_template; 55static struct scsi_host_template bnx2fc_shost_template;
56static struct fc_function_template bnx2fc_transport_function; 56static struct fc_function_template bnx2fc_transport_function;
57static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ;
57static struct fc_function_template bnx2fc_vport_xport_function; 58static struct fc_function_template bnx2fc_vport_xport_function;
58static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode); 59static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode);
59static void __bnx2fc_destroy(struct bnx2fc_interface *interface); 60static void __bnx2fc_destroy(struct bnx2fc_interface *interface);
@@ -88,6 +89,7 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport);
88static void bnx2fc_stop(struct bnx2fc_interface *interface); 89static void bnx2fc_stop(struct bnx2fc_interface *interface);
89static int __init bnx2fc_mod_init(void); 90static int __init bnx2fc_mod_init(void);
90static void __exit bnx2fc_mod_exit(void); 91static void __exit bnx2fc_mod_exit(void);
92static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev);
91 93
92unsigned int bnx2fc_debug_level; 94unsigned int bnx2fc_debug_level;
93module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR); 95module_param_named(debug_logging, bnx2fc_debug_level, int, S_IRUGO|S_IWUSR);
@@ -118,6 +120,41 @@ static void bnx2fc_get_lesb(struct fc_lport *lport,
118 __fcoe_get_lesb(lport, fc_lesb, netdev); 120 __fcoe_get_lesb(lport, fc_lesb, netdev);
119} 121}
120 122
123static void bnx2fc_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
124{
125 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
126 struct net_device *netdev = bnx2fc_netdev(fip->lp);
127 struct fcoe_fc_els_lesb *fcoe_lesb;
128 struct fc_els_lesb fc_lesb;
129
130 __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
131 fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
132
133 ctlr_dev->lesb.lesb_link_fail =
134 ntohl(fcoe_lesb->lesb_link_fail);
135 ctlr_dev->lesb.lesb_vlink_fail =
136 ntohl(fcoe_lesb->lesb_vlink_fail);
137 ctlr_dev->lesb.lesb_miss_fka =
138 ntohl(fcoe_lesb->lesb_miss_fka);
139 ctlr_dev->lesb.lesb_symb_err =
140 ntohl(fcoe_lesb->lesb_symb_err);
141 ctlr_dev->lesb.lesb_err_block =
142 ntohl(fcoe_lesb->lesb_err_block);
143 ctlr_dev->lesb.lesb_fcs_error =
144 ntohl(fcoe_lesb->lesb_fcs_error);
145}
146EXPORT_SYMBOL(bnx2fc_ctlr_get_lesb);
147
148static void bnx2fc_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
149{
150 struct fcoe_ctlr_device *ctlr_dev =
151 fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
152 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
153 struct bnx2fc_interface *fcoe = fcoe_ctlr_priv(ctlr);
154
155 fcf_dev->vlan_id = fcoe->vlan_id;
156}
157
121static void bnx2fc_clean_rx_queue(struct fc_lport *lp) 158static void bnx2fc_clean_rx_queue(struct fc_lport *lp)
122{ 159{
123 struct fcoe_percpu_s *bg; 160 struct fcoe_percpu_s *bg;
@@ -244,6 +281,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
244 struct sk_buff *skb; 281 struct sk_buff *skb;
245 struct fc_frame_header *fh; 282 struct fc_frame_header *fh;
246 struct bnx2fc_interface *interface; 283 struct bnx2fc_interface *interface;
284 struct fcoe_ctlr *ctlr;
247 struct bnx2fc_hba *hba; 285 struct bnx2fc_hba *hba;
248 struct fcoe_port *port; 286 struct fcoe_port *port;
249 struct fcoe_hdr *hp; 287 struct fcoe_hdr *hp;
@@ -256,6 +294,7 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
256 294
257 port = (struct fcoe_port *)lport_priv(lport); 295 port = (struct fcoe_port *)lport_priv(lport);
258 interface = port->priv; 296 interface = port->priv;
297 ctlr = bnx2fc_to_ctlr(interface);
259 hba = interface->hba; 298 hba = interface->hba;
260 299
261 fh = fc_frame_header_get(fp); 300 fh = fc_frame_header_get(fp);
@@ -268,12 +307,12 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
268 } 307 }
269 308
270 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) { 309 if (unlikely(fh->fh_r_ctl == FC_RCTL_ELS_REQ)) {
271 if (!interface->ctlr.sel_fcf) { 310 if (!ctlr->sel_fcf) {
272 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n"); 311 BNX2FC_HBA_DBG(lport, "FCF not selected yet!\n");
273 kfree_skb(skb); 312 kfree_skb(skb);
274 return -EINVAL; 313 return -EINVAL;
275 } 314 }
276 if (fcoe_ctlr_els_send(&interface->ctlr, lport, skb)) 315 if (fcoe_ctlr_els_send(ctlr, lport, skb))
277 return 0; 316 return 0;
278 } 317 }
279 318
@@ -346,14 +385,14 @@ static int bnx2fc_xmit(struct fc_lport *lport, struct fc_frame *fp)
346 /* fill up mac and fcoe headers */ 385 /* fill up mac and fcoe headers */
347 eh = eth_hdr(skb); 386 eh = eth_hdr(skb);
348 eh->h_proto = htons(ETH_P_FCOE); 387 eh->h_proto = htons(ETH_P_FCOE);
349 if (interface->ctlr.map_dest) 388 if (ctlr->map_dest)
350 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id); 389 fc_fcoe_set_mac(eh->h_dest, fh->fh_d_id);
351 else 390 else
352 /* insert GW address */ 391 /* insert GW address */
353 memcpy(eh->h_dest, interface->ctlr.dest_addr, ETH_ALEN); 392 memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
354 393
355 if (unlikely(interface->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 394 if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
356 memcpy(eh->h_source, interface->ctlr.ctl_src_addr, ETH_ALEN); 395 memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
357 else 396 else
358 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); 397 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
359 398
@@ -403,6 +442,7 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
403{ 442{
404 struct fc_lport *lport; 443 struct fc_lport *lport;
405 struct bnx2fc_interface *interface; 444 struct bnx2fc_interface *interface;
445 struct fcoe_ctlr *ctlr;
406 struct fc_frame_header *fh; 446 struct fc_frame_header *fh;
407 struct fcoe_rcv_info *fr; 447 struct fcoe_rcv_info *fr;
408 struct fcoe_percpu_s *bg; 448 struct fcoe_percpu_s *bg;
@@ -410,7 +450,8 @@ static int bnx2fc_rcv(struct sk_buff *skb, struct net_device *dev,
410 450
411 interface = container_of(ptype, struct bnx2fc_interface, 451 interface = container_of(ptype, struct bnx2fc_interface,
412 fcoe_packet_type); 452 fcoe_packet_type);
413 lport = interface->ctlr.lp; 453 ctlr = bnx2fc_to_ctlr(interface);
454 lport = ctlr->lp;
414 455
415 if (unlikely(lport == NULL)) { 456 if (unlikely(lport == NULL)) {
416 printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n"); 457 printk(KERN_ERR PFX "bnx2fc_rcv: lport is NULL\n");
@@ -758,11 +799,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
758{ 799{
759 struct bnx2fc_hba *hba; 800 struct bnx2fc_hba *hba;
760 struct bnx2fc_interface *interface; 801 struct bnx2fc_interface *interface;
802 struct fcoe_ctlr *ctlr;
761 struct fcoe_port *port; 803 struct fcoe_port *port;
762 u64 wwnn, wwpn; 804 u64 wwnn, wwpn;
763 805
764 port = lport_priv(lport); 806 port = lport_priv(lport);
765 interface = port->priv; 807 interface = port->priv;
808 ctlr = bnx2fc_to_ctlr(interface);
766 hba = interface->hba; 809 hba = interface->hba;
767 810
768 /* require support for get_pauseparam ethtool op. */ 811 /* require support for get_pauseparam ethtool op. */
@@ -781,13 +824,13 @@ static int bnx2fc_net_config(struct fc_lport *lport, struct net_device *netdev)
781 824
782 if (!lport->vport) { 825 if (!lport->vport) {
783 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) 826 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
784 wwnn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 827 wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
785 1, 0); 828 1, 0);
786 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn); 829 BNX2FC_HBA_DBG(lport, "WWNN = 0x%llx\n", wwnn);
787 fc_set_wwnn(lport, wwnn); 830 fc_set_wwnn(lport, wwnn);
788 831
789 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) 832 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
790 wwpn = fcoe_wwn_from_mac(interface->ctlr.ctl_src_addr, 833 wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
791 2, 0); 834 2, 0);
792 835
793 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn); 836 BNX2FC_HBA_DBG(lport, "WWPN = 0x%llx\n", wwpn);
@@ -824,6 +867,7 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
824 struct fc_lport *lport; 867 struct fc_lport *lport;
825 struct fc_lport *vport; 868 struct fc_lport *vport;
826 struct bnx2fc_interface *interface, *tmp; 869 struct bnx2fc_interface *interface, *tmp;
870 struct fcoe_ctlr *ctlr;
827 int wait_for_upload = 0; 871 int wait_for_upload = 0;
828 u32 link_possible = 1; 872 u32 link_possible = 1;
829 873
@@ -874,7 +918,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
874 if (interface->hba != hba) 918 if (interface->hba != hba)
875 continue; 919 continue;
876 920
877 lport = interface->ctlr.lp; 921 ctlr = bnx2fc_to_ctlr(interface);
922 lport = ctlr->lp;
878 BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n", 923 BNX2FC_HBA_DBG(lport, "netevent handler - event=%s %ld\n",
879 interface->netdev->name, event); 924 interface->netdev->name, event);
880 925
@@ -889,8 +934,8 @@ static void bnx2fc_indicate_netevent(void *context, unsigned long event,
889 * on a stale vlan 934 * on a stale vlan
890 */ 935 */
891 if (interface->enabled) 936 if (interface->enabled)
892 fcoe_ctlr_link_up(&interface->ctlr); 937 fcoe_ctlr_link_up(ctlr);
893 } else if (fcoe_ctlr_link_down(&interface->ctlr)) { 938 } else if (fcoe_ctlr_link_down(ctlr)) {
894 mutex_lock(&lport->lp_mutex); 939 mutex_lock(&lport->lp_mutex);
895 list_for_each_entry(vport, &lport->vports, list) 940 list_for_each_entry(vport, &lport->vports, list)
896 fc_host_port_type(vport->host) = 941 fc_host_port_type(vport->host) =
@@ -995,9 +1040,11 @@ static int bnx2fc_fip_recv(struct sk_buff *skb, struct net_device *dev,
995 struct net_device *orig_dev) 1040 struct net_device *orig_dev)
996{ 1041{
997 struct bnx2fc_interface *interface; 1042 struct bnx2fc_interface *interface;
1043 struct fcoe_ctlr *ctlr;
998 interface = container_of(ptype, struct bnx2fc_interface, 1044 interface = container_of(ptype, struct bnx2fc_interface,
999 fip_packet_type); 1045 fip_packet_type);
1000 fcoe_ctlr_recv(&interface->ctlr, skb); 1046 ctlr = bnx2fc_to_ctlr(interface);
1047 fcoe_ctlr_recv(ctlr, skb);
1001 return 0; 1048 return 0;
1002} 1049}
1003 1050
@@ -1155,6 +1202,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
1155{ 1202{
1156 struct net_device *netdev = interface->netdev; 1203 struct net_device *netdev = interface->netdev;
1157 struct net_device *physdev = interface->hba->phys_dev; 1204 struct net_device *physdev = interface->hba->phys_dev;
1205 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1158 struct netdev_hw_addr *ha; 1206 struct netdev_hw_addr *ha;
1159 int sel_san_mac = 0; 1207 int sel_san_mac = 0;
1160 1208
@@ -1169,7 +1217,7 @@ static int bnx2fc_interface_setup(struct bnx2fc_interface *interface)
1169 1217
1170 if ((ha->type == NETDEV_HW_ADDR_T_SAN) && 1218 if ((ha->type == NETDEV_HW_ADDR_T_SAN) &&
1171 (is_valid_ether_addr(ha->addr))) { 1219 (is_valid_ether_addr(ha->addr))) {
1172 memcpy(interface->ctlr.ctl_src_addr, ha->addr, 1220 memcpy(ctlr->ctl_src_addr, ha->addr,
1173 ETH_ALEN); 1221 ETH_ALEN);
1174 sel_san_mac = 1; 1222 sel_san_mac = 1;
1175 BNX2FC_MISC_DBG("Found SAN MAC\n"); 1223 BNX2FC_MISC_DBG("Found SAN MAC\n");
@@ -1224,19 +1272,23 @@ static void bnx2fc_release_transport(void)
1224 1272
1225static void bnx2fc_interface_release(struct kref *kref) 1273static void bnx2fc_interface_release(struct kref *kref)
1226{ 1274{
1275 struct fcoe_ctlr_device *ctlr_dev;
1227 struct bnx2fc_interface *interface; 1276 struct bnx2fc_interface *interface;
1277 struct fcoe_ctlr *ctlr;
1228 struct net_device *netdev; 1278 struct net_device *netdev;
1229 1279
1230 interface = container_of(kref, struct bnx2fc_interface, kref); 1280 interface = container_of(kref, struct bnx2fc_interface, kref);
1231 BNX2FC_MISC_DBG("Interface is being released\n"); 1281 BNX2FC_MISC_DBG("Interface is being released\n");
1232 1282
1283 ctlr = bnx2fc_to_ctlr(interface);
1284 ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
1233 netdev = interface->netdev; 1285 netdev = interface->netdev;
1234 1286
1235 /* tear-down FIP controller */ 1287 /* tear-down FIP controller */
1236 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags)) 1288 if (test_and_clear_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags))
1237 fcoe_ctlr_destroy(&interface->ctlr); 1289 fcoe_ctlr_destroy(ctlr);
1238 1290
1239 kfree(interface); 1291 fcoe_ctlr_device_delete(ctlr_dev);
1240 1292
1241 dev_put(netdev); 1293 dev_put(netdev);
1242 module_put(THIS_MODULE); 1294 module_put(THIS_MODULE);
@@ -1329,33 +1381,40 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
1329 struct net_device *netdev, 1381 struct net_device *netdev,
1330 enum fip_state fip_mode) 1382 enum fip_state fip_mode)
1331{ 1383{
1384 struct fcoe_ctlr_device *ctlr_dev;
1332 struct bnx2fc_interface *interface; 1385 struct bnx2fc_interface *interface;
1386 struct fcoe_ctlr *ctlr;
1387 int size;
1333 int rc = 0; 1388 int rc = 0;
1334 1389
1335 interface = kzalloc(sizeof(*interface), GFP_KERNEL); 1390 size = (sizeof(*interface) + sizeof(struct fcoe_ctlr));
1336 if (!interface) { 1391 ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &bnx2fc_fcoe_sysfs_templ,
1392 size);
1393 if (!ctlr_dev) {
1337 printk(KERN_ERR PFX "Unable to allocate interface structure\n"); 1394 printk(KERN_ERR PFX "Unable to allocate interface structure\n");
1338 return NULL; 1395 return NULL;
1339 } 1396 }
1397 ctlr = fcoe_ctlr_device_priv(ctlr_dev);
1398 interface = fcoe_ctlr_priv(ctlr);
1340 dev_hold(netdev); 1399 dev_hold(netdev);
1341 kref_init(&interface->kref); 1400 kref_init(&interface->kref);
1342 interface->hba = hba; 1401 interface->hba = hba;
1343 interface->netdev = netdev; 1402 interface->netdev = netdev;
1344 1403
1345 /* Initialize FIP */ 1404 /* Initialize FIP */
1346 fcoe_ctlr_init(&interface->ctlr, fip_mode); 1405 fcoe_ctlr_init(ctlr, fip_mode);
1347 interface->ctlr.send = bnx2fc_fip_send; 1406 ctlr->send = bnx2fc_fip_send;
1348 interface->ctlr.update_mac = bnx2fc_update_src_mac; 1407 ctlr->update_mac = bnx2fc_update_src_mac;
1349 interface->ctlr.get_src_addr = bnx2fc_get_src_mac; 1408 ctlr->get_src_addr = bnx2fc_get_src_mac;
1350 set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags); 1409 set_bit(BNX2FC_CTLR_INIT_DONE, &interface->if_flags);
1351 1410
1352 rc = bnx2fc_interface_setup(interface); 1411 rc = bnx2fc_interface_setup(interface);
1353 if (!rc) 1412 if (!rc)
1354 return interface; 1413 return interface;
1355 1414
1356 fcoe_ctlr_destroy(&interface->ctlr); 1415 fcoe_ctlr_destroy(ctlr);
1357 dev_put(netdev); 1416 dev_put(netdev);
1358 kfree(interface); 1417 fcoe_ctlr_device_delete(ctlr_dev);
1359 return NULL; 1418 return NULL;
1360} 1419}
1361 1420
@@ -1373,6 +1432,7 @@ struct bnx2fc_interface *bnx2fc_interface_create(struct bnx2fc_hba *hba,
1373static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface, 1432static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1374 struct device *parent, int npiv) 1433 struct device *parent, int npiv)
1375{ 1434{
1435 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1376 struct fc_lport *lport, *n_port; 1436 struct fc_lport *lport, *n_port;
1377 struct fcoe_port *port; 1437 struct fcoe_port *port;
1378 struct Scsi_Host *shost; 1438 struct Scsi_Host *shost;
@@ -1383,7 +1443,7 @@ static struct fc_lport *bnx2fc_if_create(struct bnx2fc_interface *interface,
1383 1443
1384 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL); 1444 blport = kzalloc(sizeof(struct bnx2fc_lport), GFP_KERNEL);
1385 if (!blport) { 1445 if (!blport) {
1386 BNX2FC_HBA_DBG(interface->ctlr.lp, "Unable to alloc blport\n"); 1446 BNX2FC_HBA_DBG(ctlr->lp, "Unable to alloc blport\n");
1387 return NULL; 1447 return NULL;
1388 } 1448 }
1389 1449
@@ -1479,7 +1539,8 @@ static void bnx2fc_net_cleanup(struct bnx2fc_interface *interface)
1479 1539
1480static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface) 1540static void bnx2fc_interface_cleanup(struct bnx2fc_interface *interface)
1481{ 1541{
1482 struct fc_lport *lport = interface->ctlr.lp; 1542 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1543 struct fc_lport *lport = ctlr->lp;
1483 struct fcoe_port *port = lport_priv(lport); 1544 struct fcoe_port *port = lport_priv(lport);
1484 struct bnx2fc_hba *hba = interface->hba; 1545 struct bnx2fc_hba *hba = interface->hba;
1485 1546
@@ -1519,7 +1580,8 @@ static void bnx2fc_if_destroy(struct fc_lport *lport)
1519 1580
1520static void __bnx2fc_destroy(struct bnx2fc_interface *interface) 1581static void __bnx2fc_destroy(struct bnx2fc_interface *interface)
1521{ 1582{
1522 struct fc_lport *lport = interface->ctlr.lp; 1583 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1584 struct fc_lport *lport = ctlr->lp;
1523 struct fcoe_port *port = lport_priv(lport); 1585 struct fcoe_port *port = lport_priv(lport);
1524 1586
1525 bnx2fc_interface_cleanup(interface); 1587 bnx2fc_interface_cleanup(interface);
@@ -1543,13 +1605,15 @@ static int bnx2fc_destroy(struct net_device *netdev)
1543{ 1605{
1544 struct bnx2fc_interface *interface = NULL; 1606 struct bnx2fc_interface *interface = NULL;
1545 struct workqueue_struct *timer_work_queue; 1607 struct workqueue_struct *timer_work_queue;
1608 struct fcoe_ctlr *ctlr;
1546 int rc = 0; 1609 int rc = 0;
1547 1610
1548 rtnl_lock(); 1611 rtnl_lock();
1549 mutex_lock(&bnx2fc_dev_lock); 1612 mutex_lock(&bnx2fc_dev_lock);
1550 1613
1551 interface = bnx2fc_interface_lookup(netdev); 1614 interface = bnx2fc_interface_lookup(netdev);
1552 if (!interface || !interface->ctlr.lp) { 1615 ctlr = bnx2fc_to_ctlr(interface);
1616 if (!interface || !ctlr->lp) {
1553 rc = -ENODEV; 1617 rc = -ENODEV;
1554 printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n"); 1618 printk(KERN_ERR PFX "bnx2fc_destroy: interface or lport not found\n");
1555 goto netdev_err; 1619 goto netdev_err;
@@ -1646,6 +1710,7 @@ static void bnx2fc_ulp_start(void *handle)
1646{ 1710{
1647 struct bnx2fc_hba *hba = handle; 1711 struct bnx2fc_hba *hba = handle;
1648 struct bnx2fc_interface *interface; 1712 struct bnx2fc_interface *interface;
1713 struct fcoe_ctlr *ctlr;
1649 struct fc_lport *lport; 1714 struct fc_lport *lport;
1650 1715
1651 mutex_lock(&bnx2fc_dev_lock); 1716 mutex_lock(&bnx2fc_dev_lock);
@@ -1657,7 +1722,8 @@ static void bnx2fc_ulp_start(void *handle)
1657 1722
1658 list_for_each_entry(interface, &if_list, list) { 1723 list_for_each_entry(interface, &if_list, list) {
1659 if (interface->hba == hba) { 1724 if (interface->hba == hba) {
1660 lport = interface->ctlr.lp; 1725 ctlr = bnx2fc_to_ctlr(interface);
1726 lport = ctlr->lp;
1661 /* Kick off Fabric discovery*/ 1727 /* Kick off Fabric discovery*/
1662 printk(KERN_ERR PFX "ulp_init: start discovery\n"); 1728 printk(KERN_ERR PFX "ulp_init: start discovery\n");
1663 lport->tt.frame_send = bnx2fc_xmit; 1729 lport->tt.frame_send = bnx2fc_xmit;
@@ -1677,13 +1743,14 @@ static void bnx2fc_port_shutdown(struct fc_lport *lport)
1677 1743
1678static void bnx2fc_stop(struct bnx2fc_interface *interface) 1744static void bnx2fc_stop(struct bnx2fc_interface *interface)
1679{ 1745{
1746 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1680 struct fc_lport *lport; 1747 struct fc_lport *lport;
1681 struct fc_lport *vport; 1748 struct fc_lport *vport;
1682 1749
1683 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags)) 1750 if (!test_bit(BNX2FC_FLAG_FW_INIT_DONE, &interface->hba->flags))
1684 return; 1751 return;
1685 1752
1686 lport = interface->ctlr.lp; 1753 lport = ctlr->lp;
1687 bnx2fc_port_shutdown(lport); 1754 bnx2fc_port_shutdown(lport);
1688 1755
1689 mutex_lock(&lport->lp_mutex); 1756 mutex_lock(&lport->lp_mutex);
@@ -1692,7 +1759,7 @@ static void bnx2fc_stop(struct bnx2fc_interface *interface)
1692 FC_PORTTYPE_UNKNOWN; 1759 FC_PORTTYPE_UNKNOWN;
1693 mutex_unlock(&lport->lp_mutex); 1760 mutex_unlock(&lport->lp_mutex);
1694 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN; 1761 fc_host_port_type(lport->host) = FC_PORTTYPE_UNKNOWN;
1695 fcoe_ctlr_link_down(&interface->ctlr); 1762 fcoe_ctlr_link_down(ctlr);
1696 fcoe_clean_pending_queue(lport); 1763 fcoe_clean_pending_queue(lport);
1697} 1764}
1698 1765
@@ -1804,6 +1871,7 @@ exit:
1804 1871
1805static void bnx2fc_start_disc(struct bnx2fc_interface *interface) 1872static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
1806{ 1873{
1874 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
1807 struct fc_lport *lport; 1875 struct fc_lport *lport;
1808 int wait_cnt = 0; 1876 int wait_cnt = 0;
1809 1877
@@ -1814,18 +1882,18 @@ static void bnx2fc_start_disc(struct bnx2fc_interface *interface)
1814 return; 1882 return;
1815 } 1883 }
1816 1884
1817 lport = interface->ctlr.lp; 1885 lport = ctlr->lp;
1818 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n"); 1886 BNX2FC_HBA_DBG(lport, "calling fc_fabric_login\n");
1819 1887
1820 if (!bnx2fc_link_ok(lport) && interface->enabled) { 1888 if (!bnx2fc_link_ok(lport) && interface->enabled) {
1821 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n"); 1889 BNX2FC_HBA_DBG(lport, "ctlr_link_up\n");
1822 fcoe_ctlr_link_up(&interface->ctlr); 1890 fcoe_ctlr_link_up(ctlr);
1823 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 1891 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
1824 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); 1892 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
1825 } 1893 }
1826 1894
1827 /* wait for the FCF to be selected before issuing FLOGI */ 1895 /* wait for the FCF to be selected before issuing FLOGI */
1828 while (!interface->ctlr.sel_fcf) { 1896 while (!ctlr->sel_fcf) {
1829 msleep(250); 1897 msleep(250);
1830 /* give up after 3 secs */ 1898 /* give up after 3 secs */
1831 if (++wait_cnt > 12) 1899 if (++wait_cnt > 12)
@@ -1889,19 +1957,21 @@ static void bnx2fc_ulp_init(struct cnic_dev *dev)
1889static int bnx2fc_disable(struct net_device *netdev) 1957static int bnx2fc_disable(struct net_device *netdev)
1890{ 1958{
1891 struct bnx2fc_interface *interface; 1959 struct bnx2fc_interface *interface;
1960 struct fcoe_ctlr *ctlr;
1892 int rc = 0; 1961 int rc = 0;
1893 1962
1894 rtnl_lock(); 1963 rtnl_lock();
1895 mutex_lock(&bnx2fc_dev_lock); 1964 mutex_lock(&bnx2fc_dev_lock);
1896 1965
1897 interface = bnx2fc_interface_lookup(netdev); 1966 interface = bnx2fc_interface_lookup(netdev);
1898 if (!interface || !interface->ctlr.lp) { 1967 ctlr = bnx2fc_to_ctlr(interface);
1968 if (!interface || !ctlr->lp) {
1899 rc = -ENODEV; 1969 rc = -ENODEV;
1900 printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n"); 1970 printk(KERN_ERR PFX "bnx2fc_disable: interface or lport not found\n");
1901 } else { 1971 } else {
1902 interface->enabled = false; 1972 interface->enabled = false;
1903 fcoe_ctlr_link_down(&interface->ctlr); 1973 fcoe_ctlr_link_down(ctlr);
1904 fcoe_clean_pending_queue(interface->ctlr.lp); 1974 fcoe_clean_pending_queue(ctlr->lp);
1905 } 1975 }
1906 1976
1907 mutex_unlock(&bnx2fc_dev_lock); 1977 mutex_unlock(&bnx2fc_dev_lock);
@@ -1913,17 +1983,19 @@ static int bnx2fc_disable(struct net_device *netdev)
1913static int bnx2fc_enable(struct net_device *netdev) 1983static int bnx2fc_enable(struct net_device *netdev)
1914{ 1984{
1915 struct bnx2fc_interface *interface; 1985 struct bnx2fc_interface *interface;
1986 struct fcoe_ctlr *ctlr;
1916 int rc = 0; 1987 int rc = 0;
1917 1988
1918 rtnl_lock(); 1989 rtnl_lock();
1919 mutex_lock(&bnx2fc_dev_lock); 1990 mutex_lock(&bnx2fc_dev_lock);
1920 1991
1921 interface = bnx2fc_interface_lookup(netdev); 1992 interface = bnx2fc_interface_lookup(netdev);
1922 if (!interface || !interface->ctlr.lp) { 1993 ctlr = bnx2fc_to_ctlr(interface);
1994 if (!interface || !ctlr->lp) {
1923 rc = -ENODEV; 1995 rc = -ENODEV;
1924 printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n"); 1996 printk(KERN_ERR PFX "bnx2fc_enable: interface or lport not found\n");
1925 } else if (!bnx2fc_link_ok(interface->ctlr.lp)) { 1997 } else if (!bnx2fc_link_ok(ctlr->lp)) {
1926 fcoe_ctlr_link_up(&interface->ctlr); 1998 fcoe_ctlr_link_up(ctlr);
1927 interface->enabled = true; 1999 interface->enabled = true;
1928 } 2000 }
1929 2001
@@ -1944,6 +2016,7 @@ static int bnx2fc_enable(struct net_device *netdev)
1944 */ 2016 */
1945static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode) 2017static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
1946{ 2018{
2019 struct fcoe_ctlr *ctlr;
1947 struct bnx2fc_interface *interface; 2020 struct bnx2fc_interface *interface;
1948 struct bnx2fc_hba *hba; 2021 struct bnx2fc_hba *hba;
1949 struct net_device *phys_dev; 2022 struct net_device *phys_dev;
@@ -2010,6 +2083,7 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
2010 goto ifput_err; 2083 goto ifput_err;
2011 } 2084 }
2012 2085
2086 ctlr = bnx2fc_to_ctlr(interface);
2013 interface->vlan_id = vlan_id; 2087 interface->vlan_id = vlan_id;
2014 interface->vlan_enabled = 1; 2088 interface->vlan_enabled = 1;
2015 2089
@@ -2035,10 +2109,10 @@ static int bnx2fc_create(struct net_device *netdev, enum fip_state fip_mode)
2035 lport->boot_time = jiffies; 2109 lport->boot_time = jiffies;
2036 2110
2037 /* Make this master N_port */ 2111 /* Make this master N_port */
2038 interface->ctlr.lp = lport; 2112 ctlr->lp = lport;
2039 2113
2040 if (!bnx2fc_link_ok(lport)) { 2114 if (!bnx2fc_link_ok(lport)) {
2041 fcoe_ctlr_link_up(&interface->ctlr); 2115 fcoe_ctlr_link_up(ctlr);
2042 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT; 2116 fc_host_port_type(lport->host) = FC_PORTTYPE_NPORT;
2043 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state); 2117 set_bit(ADAPTER_STATE_READY, &interface->hba->adapter_state);
2044 } 2118 }
@@ -2439,6 +2513,19 @@ static void __exit bnx2fc_mod_exit(void)
2439module_init(bnx2fc_mod_init); 2513module_init(bnx2fc_mod_init);
2440module_exit(bnx2fc_mod_exit); 2514module_exit(bnx2fc_mod_exit);
2441 2515
2516static struct fcoe_sysfs_function_template bnx2fc_fcoe_sysfs_templ = {
2517 .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
2518 .get_fcoe_ctlr_link_fail = bnx2fc_ctlr_get_lesb,
2519 .get_fcoe_ctlr_vlink_fail = bnx2fc_ctlr_get_lesb,
2520 .get_fcoe_ctlr_miss_fka = bnx2fc_ctlr_get_lesb,
2521 .get_fcoe_ctlr_symb_err = bnx2fc_ctlr_get_lesb,
2522 .get_fcoe_ctlr_err_block = bnx2fc_ctlr_get_lesb,
2523 .get_fcoe_ctlr_fcs_error = bnx2fc_ctlr_get_lesb,
2524
2525 .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
2526 .get_fcoe_fcf_vlan_id = bnx2fc_fcf_get_vlan_id,
2527};
2528
2442static struct fc_function_template bnx2fc_transport_function = { 2529static struct fc_function_template bnx2fc_transport_function = {
2443 .show_host_node_name = 1, 2530 .show_host_node_name = 1,
2444 .show_host_port_name = 1, 2531 .show_host_port_name = 1,
diff --git a/drivers/scsi/bnx2fc/bnx2fc_hwi.c b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
index afd570962b8c..2ca6bfe4ce5e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_hwi.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_hwi.c
@@ -167,6 +167,7 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
167{ 167{
168 struct fc_lport *lport = port->lport; 168 struct fc_lport *lport = port->lport;
169 struct bnx2fc_interface *interface = port->priv; 169 struct bnx2fc_interface *interface = port->priv;
170 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
170 struct bnx2fc_hba *hba = interface->hba; 171 struct bnx2fc_hba *hba = interface->hba;
171 struct kwqe *kwqe_arr[4]; 172 struct kwqe *kwqe_arr[4];
172 struct fcoe_kwqe_conn_offload1 ofld_req1; 173 struct fcoe_kwqe_conn_offload1 ofld_req1;
@@ -314,13 +315,13 @@ int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
314 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; 315 ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2];
315 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; 316 ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1];
316 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; 317 ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0];
317 ofld_req4.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; 318 ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
318 /* fcf mac */ 319 /* fcf mac */
319 ofld_req4.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; 320 ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
320 ofld_req4.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; 321 ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
321 ofld_req4.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; 322 ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
322 ofld_req4.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; 323 ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
323 ofld_req4.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; 324 ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
324 325
325 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; 326 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
326 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); 327 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
@@ -351,6 +352,7 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
351{ 352{
352 struct kwqe *kwqe_arr[2]; 353 struct kwqe *kwqe_arr[2];
353 struct bnx2fc_interface *interface = port->priv; 354 struct bnx2fc_interface *interface = port->priv;
355 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
354 struct bnx2fc_hba *hba = interface->hba; 356 struct bnx2fc_hba *hba = interface->hba;
355 struct fcoe_kwqe_conn_enable_disable enbl_req; 357 struct fcoe_kwqe_conn_enable_disable enbl_req;
356 struct fc_lport *lport = port->lport; 358 struct fc_lport *lport = port->lport;
@@ -374,12 +376,12 @@ static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
374 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; 376 enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0];
375 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); 377 memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN);
376 378
377 enbl_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; 379 enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
378 enbl_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; 380 enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
379 enbl_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; 381 enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
380 enbl_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; 382 enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
381 enbl_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; 383 enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
382 enbl_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; 384 enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
383 385
384 port_id = fc_host_port_id(lport->host); 386 port_id = fc_host_port_id(lport->host);
385 if (port_id != tgt->sid) { 387 if (port_id != tgt->sid) {
@@ -419,6 +421,7 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
419 struct bnx2fc_rport *tgt) 421 struct bnx2fc_rport *tgt)
420{ 422{
421 struct bnx2fc_interface *interface = port->priv; 423 struct bnx2fc_interface *interface = port->priv;
424 struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface);
422 struct bnx2fc_hba *hba = interface->hba; 425 struct bnx2fc_hba *hba = interface->hba;
423 struct fcoe_kwqe_conn_enable_disable disable_req; 426 struct fcoe_kwqe_conn_enable_disable disable_req;
424 struct kwqe *kwqe_arr[2]; 427 struct kwqe *kwqe_arr[2];
@@ -440,12 +443,12 @@ int bnx2fc_send_session_disable_req(struct fcoe_port *port,
440 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; 443 disable_req.src_mac_addr_hi[0] = tgt->src_addr[1];
441 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; 444 disable_req.src_mac_addr_hi[1] = tgt->src_addr[0];
442 445
443 disable_req.dst_mac_addr_lo[0] = interface->ctlr.dest_addr[5]; 446 disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5];
444 disable_req.dst_mac_addr_lo[1] = interface->ctlr.dest_addr[4]; 447 disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4];
445 disable_req.dst_mac_addr_mid[0] = interface->ctlr.dest_addr[3]; 448 disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3];
446 disable_req.dst_mac_addr_mid[1] = interface->ctlr.dest_addr[2]; 449 disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2];
447 disable_req.dst_mac_addr_hi[0] = interface->ctlr.dest_addr[1]; 450 disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1];
448 disable_req.dst_mac_addr_hi[1] = interface->ctlr.dest_addr[0]; 451 disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0];
449 452
450 port_id = tgt->sid; 453 port_id = tgt->sid;
451 disable_req.s_id[0] = (port_id & 0x000000FF); 454 disable_req.s_id[0] = (port_id & 0x000000FF);
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index e897ce975bb8..4f7453b9e41e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -810,8 +810,22 @@ retry_tmf:
810 spin_lock_bh(&tgt->tgt_lock); 810 spin_lock_bh(&tgt->tgt_lock);
811 811
812 io_req->wait_for_comp = 0; 812 io_req->wait_for_comp = 0;
813 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) 813 if (!(test_bit(BNX2FC_FLAG_TM_COMPL, &io_req->req_flags))) {
814 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags); 814 set_bit(BNX2FC_FLAG_TM_TIMEOUT, &io_req->req_flags);
815 if (io_req->on_tmf_queue) {
816 list_del_init(&io_req->link);
817 io_req->on_tmf_queue = 0;
818 }
819 io_req->wait_for_comp = 1;
820 bnx2fc_initiate_cleanup(io_req);
821 spin_unlock_bh(&tgt->tgt_lock);
822 rc = wait_for_completion_timeout(&io_req->tm_done,
823 BNX2FC_FW_TIMEOUT);
824 spin_lock_bh(&tgt->tgt_lock);
825 io_req->wait_for_comp = 0;
826 if (!rc)
827 kref_put(&io_req->refcount, bnx2fc_cmd_release);
828 }
815 829
816 spin_unlock_bh(&tgt->tgt_lock); 830 spin_unlock_bh(&tgt->tgt_lock);
817 831
@@ -1089,6 +1103,48 @@ int bnx2fc_eh_device_reset(struct scsi_cmnd *sc_cmd)
1089 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET); 1103 return bnx2fc_initiate_tmf(sc_cmd, FCP_TMF_LUN_RESET);
1090} 1104}
1091 1105
1106int bnx2fc_expl_logo(struct fc_lport *lport, struct bnx2fc_cmd *io_req)
1107{
1108 struct bnx2fc_rport *tgt = io_req->tgt;
1109 struct fc_rport_priv *rdata = tgt->rdata;
1110 int logo_issued;
1111 int rc = SUCCESS;
1112 int wait_cnt = 0;
1113
1114 BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
1115 tgt->flags);
1116 logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
1117 &tgt->flags);
1118 io_req->wait_for_comp = 1;
1119 bnx2fc_initiate_cleanup(io_req);
1120
1121 spin_unlock_bh(&tgt->tgt_lock);
1122
1123 wait_for_completion(&io_req->tm_done);
1124
1125 io_req->wait_for_comp = 0;
1126 /*
1127 * release the reference taken in eh_abort to allow the
1128 * target to re-login after flushing IOs
1129 */
1130 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1131
1132 if (!logo_issued) {
1133 clear_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags);
1134 mutex_lock(&lport->disc.disc_mutex);
1135 lport->tt.rport_logoff(rdata);
1136 mutex_unlock(&lport->disc.disc_mutex);
1137 do {
1138 msleep(BNX2FC_RELOGIN_WAIT_TIME);
1139 if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT) {
1140 rc = FAILED;
1141 break;
1142 }
1143 } while (!test_bit(BNX2FC_FLAG_SESSION_READY, &tgt->flags));
1144 }
1145 spin_lock_bh(&tgt->tgt_lock);
1146 return rc;
1147}
1092/** 1148/**
1093 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding 1149 * bnx2fc_eh_abort - eh_abort_handler api to abort an outstanding
1094 * SCSI command 1150 * SCSI command
@@ -1103,10 +1159,7 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1103 struct fc_rport_libfc_priv *rp = rport->dd_data; 1159 struct fc_rport_libfc_priv *rp = rport->dd_data;
1104 struct bnx2fc_cmd *io_req; 1160 struct bnx2fc_cmd *io_req;
1105 struct fc_lport *lport; 1161 struct fc_lport *lport;
1106 struct fc_rport_priv *rdata;
1107 struct bnx2fc_rport *tgt; 1162 struct bnx2fc_rport *tgt;
1108 int logo_issued;
1109 int wait_cnt = 0;
1110 int rc = FAILED; 1163 int rc = FAILED;
1111 1164
1112 1165
@@ -1183,58 +1236,31 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1183 list_add_tail(&io_req->link, &tgt->io_retire_queue); 1236 list_add_tail(&io_req->link, &tgt->io_retire_queue);
1184 1237
1185 init_completion(&io_req->tm_done); 1238 init_completion(&io_req->tm_done);
1186 io_req->wait_for_comp = 1;
1187 1239
1188 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { 1240 if (test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) {
1189 /* Cancel the current timer running on this io_req */
1190 if (cancel_delayed_work(&io_req->timeout_work))
1191 kref_put(&io_req->refcount,
1192 bnx2fc_cmd_release); /* drop timer hold */
1193 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1194 rc = bnx2fc_initiate_abts(io_req);
1195 } else {
1196 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) " 1241 printk(KERN_ERR PFX "eh_abort: io_req (xid = 0x%x) "
1197 "already in abts processing\n", io_req->xid); 1242 "already in abts processing\n", io_req->xid);
1198 if (cancel_delayed_work(&io_req->timeout_work)) 1243 if (cancel_delayed_work(&io_req->timeout_work))
1199 kref_put(&io_req->refcount, 1244 kref_put(&io_req->refcount,
1200 bnx2fc_cmd_release); /* drop timer hold */ 1245 bnx2fc_cmd_release); /* drop timer hold */
1201 bnx2fc_initiate_cleanup(io_req); 1246 rc = bnx2fc_expl_logo(lport, io_req);
1247 goto out;
1248 }
1202 1249
1250 /* Cancel the current timer running on this io_req */
1251 if (cancel_delayed_work(&io_req->timeout_work))
1252 kref_put(&io_req->refcount,
1253 bnx2fc_cmd_release); /* drop timer hold */
1254 set_bit(BNX2FC_FLAG_EH_ABORT, &io_req->req_flags);
1255 io_req->wait_for_comp = 1;
1256 rc = bnx2fc_initiate_abts(io_req);
1257 if (rc == FAILED) {
1258 bnx2fc_initiate_cleanup(io_req);
1203 spin_unlock_bh(&tgt->tgt_lock); 1259 spin_unlock_bh(&tgt->tgt_lock);
1204
1205 wait_for_completion(&io_req->tm_done); 1260 wait_for_completion(&io_req->tm_done);
1206
1207 spin_lock_bh(&tgt->tgt_lock); 1261 spin_lock_bh(&tgt->tgt_lock);
1208 io_req->wait_for_comp = 0; 1262 io_req->wait_for_comp = 0;
1209 rdata = io_req->tgt->rdata; 1263 goto done;
1210 logo_issued = test_and_set_bit(BNX2FC_FLAG_EXPL_LOGO,
1211 &tgt->flags);
1212 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1213 spin_unlock_bh(&tgt->tgt_lock);
1214
1215 if (!logo_issued) {
1216 BNX2FC_IO_DBG(io_req, "Expl logo - tgt flags = 0x%lx\n",
1217 tgt->flags);
1218 mutex_lock(&lport->disc.disc_mutex);
1219 lport->tt.rport_logoff(rdata);
1220 mutex_unlock(&lport->disc.disc_mutex);
1221 do {
1222 msleep(BNX2FC_RELOGIN_WAIT_TIME);
1223 /*
1224 * If session not recovered, let SCSI-ml
1225 * escalate error recovery.
1226 */
1227 if (wait_cnt++ > BNX2FC_RELOGIN_WAIT_CNT)
1228 return FAILED;
1229 } while (!test_bit(BNX2FC_FLAG_SESSION_READY,
1230 &tgt->flags));
1231 }
1232 return SUCCESS;
1233 }
1234 if (rc == FAILED) {
1235 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1236 spin_unlock_bh(&tgt->tgt_lock);
1237 return rc;
1238 } 1264 }
1239 spin_unlock_bh(&tgt->tgt_lock); 1265 spin_unlock_bh(&tgt->tgt_lock);
1240 1266
@@ -1247,7 +1273,8 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1247 /* Let the scsi-ml try to recover this command */ 1273 /* Let the scsi-ml try to recover this command */
1248 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n", 1274 printk(KERN_ERR PFX "abort failed, xid = 0x%x\n",
1249 io_req->xid); 1275 io_req->xid);
1250 rc = FAILED; 1276 rc = bnx2fc_expl_logo(lport, io_req);
1277 goto out;
1251 } else { 1278 } else {
1252 /* 1279 /*
1253 * We come here even when there was a race condition 1280 * We come here even when there was a race condition
@@ -1259,9 +1286,10 @@ int bnx2fc_eh_abort(struct scsi_cmnd *sc_cmd)
1259 bnx2fc_scsi_done(io_req, DID_ABORT); 1286 bnx2fc_scsi_done(io_req, DID_ABORT);
1260 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1287 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1261 } 1288 }
1262 1289done:
1263 /* release the reference taken in eh_abort */ 1290 /* release the reference taken in eh_abort */
1264 kref_put(&io_req->refcount, bnx2fc_cmd_release); 1291 kref_put(&io_req->refcount, bnx2fc_cmd_release);
1292out:
1265 spin_unlock_bh(&tgt->tgt_lock); 1293 spin_unlock_bh(&tgt->tgt_lock);
1266 return rc; 1294 return rc;
1267} 1295}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_tgt.c b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
index c1800b531270..082a25c3117e 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_tgt.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_tgt.c
@@ -185,6 +185,16 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
185 BUG_ON(rc); 185 BUG_ON(rc);
186 } 186 }
187 187
188 list_for_each_safe(list, tmp, &tgt->active_tm_queue) {
189 i++;
190 io_req = (struct bnx2fc_cmd *)list;
191 list_del_init(&io_req->link);
192 io_req->on_tmf_queue = 0;
193 BNX2FC_IO_DBG(io_req, "tm_queue cleanup\n");
194 if (io_req->wait_for_comp)
195 complete(&io_req->tm_done);
196 }
197
188 list_for_each_safe(list, tmp, &tgt->els_queue) { 198 list_for_each_safe(list, tmp, &tgt->els_queue) {
189 i++; 199 i++;
190 io_req = (struct bnx2fc_cmd *)list; 200 io_req = (struct bnx2fc_cmd *)list;
@@ -213,8 +223,17 @@ void bnx2fc_flush_active_ios(struct bnx2fc_rport *tgt)
213 223
214 BNX2FC_IO_DBG(io_req, "retire_queue flush\n"); 224 BNX2FC_IO_DBG(io_req, "retire_queue flush\n");
215 225
216 if (cancel_delayed_work(&io_req->timeout_work)) 226 if (cancel_delayed_work(&io_req->timeout_work)) {
227 if (test_and_clear_bit(BNX2FC_FLAG_EH_ABORT,
228 &io_req->req_flags)) {
229 /* Handle eh_abort timeout */
230 BNX2FC_IO_DBG(io_req, "eh_abort for IO "
231 "in retire_q\n");
232 if (io_req->wait_for_comp)
233 complete(&io_req->tm_done);
234 }
217 kref_put(&io_req->refcount, bnx2fc_cmd_release); 235 kref_put(&io_req->refcount, bnx2fc_cmd_release);
236 }
218 237
219 clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags); 238 clear_bit(BNX2FC_FLAG_ISSUE_RRQ, &io_req->req_flags);
220 } 239 }
diff --git a/drivers/scsi/fcoe/Makefile b/drivers/scsi/fcoe/Makefile
index f6d37d0271f7..aed0f5db3668 100644
--- a/drivers/scsi/fcoe/Makefile
+++ b/drivers/scsi/fcoe/Makefile
@@ -1,4 +1,4 @@
1obj-$(CONFIG_FCOE) += fcoe.o 1obj-$(CONFIG_FCOE) += fcoe.o
2obj-$(CONFIG_LIBFCOE) += libfcoe.o 2obj-$(CONFIG_LIBFCOE) += libfcoe.o
3 3
4libfcoe-objs := fcoe_ctlr.o fcoe_transport.o 4libfcoe-objs := fcoe_ctlr.o fcoe_transport.o fcoe_sysfs.o
diff --git a/drivers/scsi/fcoe/fcoe.c b/drivers/scsi/fcoe/fcoe.c
index 76e3d0b5bfa6..fe30b1b65e1d 100644
--- a/drivers/scsi/fcoe/fcoe.c
+++ b/drivers/scsi/fcoe/fcoe.c
@@ -41,6 +41,7 @@
41 41
42#include <scsi/fc/fc_encaps.h> 42#include <scsi/fc/fc_encaps.h>
43#include <scsi/fc/fc_fip.h> 43#include <scsi/fc/fc_fip.h>
44#include <scsi/fc/fc_fcoe.h>
44 45
45#include <scsi/libfc.h> 46#include <scsi/libfc.h>
46#include <scsi/fc_frame.h> 47#include <scsi/fc_frame.h>
@@ -150,6 +151,21 @@ static int fcoe_vport_create(struct fc_vport *, bool disabled);
150static int fcoe_vport_disable(struct fc_vport *, bool disable); 151static int fcoe_vport_disable(struct fc_vport *, bool disable);
151static void fcoe_set_vport_symbolic_name(struct fc_vport *); 152static void fcoe_set_vport_symbolic_name(struct fc_vport *);
152static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *); 153static void fcoe_set_port_id(struct fc_lport *, u32, struct fc_frame *);
154static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *);
155static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *);
156
157static struct fcoe_sysfs_function_template fcoe_sysfs_templ = {
158 .get_fcoe_ctlr_mode = fcoe_ctlr_get_fip_mode,
159 .get_fcoe_ctlr_link_fail = fcoe_ctlr_get_lesb,
160 .get_fcoe_ctlr_vlink_fail = fcoe_ctlr_get_lesb,
161 .get_fcoe_ctlr_miss_fka = fcoe_ctlr_get_lesb,
162 .get_fcoe_ctlr_symb_err = fcoe_ctlr_get_lesb,
163 .get_fcoe_ctlr_err_block = fcoe_ctlr_get_lesb,
164 .get_fcoe_ctlr_fcs_error = fcoe_ctlr_get_lesb,
165
166 .get_fcoe_fcf_selected = fcoe_fcf_get_selected,
167 .get_fcoe_fcf_vlan_id = fcoe_fcf_get_vlan_id,
168};
153 169
154static struct libfc_function_template fcoe_libfc_fcn_templ = { 170static struct libfc_function_template fcoe_libfc_fcn_templ = {
155 .frame_send = fcoe_xmit, 171 .frame_send = fcoe_xmit,
@@ -282,7 +298,7 @@ static struct scsi_host_template fcoe_shost_template = {
282static int fcoe_interface_setup(struct fcoe_interface *fcoe, 298static int fcoe_interface_setup(struct fcoe_interface *fcoe,
283 struct net_device *netdev) 299 struct net_device *netdev)
284{ 300{
285 struct fcoe_ctlr *fip = &fcoe->ctlr; 301 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
286 struct netdev_hw_addr *ha; 302 struct netdev_hw_addr *ha;
287 struct net_device *real_dev; 303 struct net_device *real_dev;
288 u8 flogi_maddr[ETH_ALEN]; 304 u8 flogi_maddr[ETH_ALEN];
@@ -366,7 +382,10 @@ static int fcoe_interface_setup(struct fcoe_interface *fcoe,
366static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev, 382static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
367 enum fip_state fip_mode) 383 enum fip_state fip_mode)
368{ 384{
385 struct fcoe_ctlr_device *ctlr_dev;
386 struct fcoe_ctlr *ctlr;
369 struct fcoe_interface *fcoe; 387 struct fcoe_interface *fcoe;
388 int size;
370 int err; 389 int err;
371 390
372 if (!try_module_get(THIS_MODULE)) { 391 if (!try_module_get(THIS_MODULE)) {
@@ -376,27 +395,32 @@ static struct fcoe_interface *fcoe_interface_create(struct net_device *netdev,
376 goto out; 395 goto out;
377 } 396 }
378 397
379 fcoe = kzalloc(sizeof(*fcoe), GFP_KERNEL); 398 size = sizeof(struct fcoe_ctlr) + sizeof(struct fcoe_interface);
380 if (!fcoe) { 399 ctlr_dev = fcoe_ctlr_device_add(&netdev->dev, &fcoe_sysfs_templ,
381 FCOE_NETDEV_DBG(netdev, "Could not allocate fcoe structure\n"); 400 size);
401 if (!ctlr_dev) {
402 FCOE_DBG("Failed to add fcoe_ctlr_device\n");
382 fcoe = ERR_PTR(-ENOMEM); 403 fcoe = ERR_PTR(-ENOMEM);
383 goto out_putmod; 404 goto out_putmod;
384 } 405 }
385 406
407 ctlr = fcoe_ctlr_device_priv(ctlr_dev);
408 fcoe = fcoe_ctlr_priv(ctlr);
409
386 dev_hold(netdev); 410 dev_hold(netdev);
387 411
388 /* 412 /*
389 * Initialize FIP. 413 * Initialize FIP.
390 */ 414 */
391 fcoe_ctlr_init(&fcoe->ctlr, fip_mode); 415 fcoe_ctlr_init(ctlr, fip_mode);
392 fcoe->ctlr.send = fcoe_fip_send; 416 ctlr->send = fcoe_fip_send;
393 fcoe->ctlr.update_mac = fcoe_update_src_mac; 417 ctlr->update_mac = fcoe_update_src_mac;
394 fcoe->ctlr.get_src_addr = fcoe_get_src_mac; 418 ctlr->get_src_addr = fcoe_get_src_mac;
395 419
396 err = fcoe_interface_setup(fcoe, netdev); 420 err = fcoe_interface_setup(fcoe, netdev);
397 if (err) { 421 if (err) {
398 fcoe_ctlr_destroy(&fcoe->ctlr); 422 fcoe_ctlr_destroy(ctlr);
399 kfree(fcoe); 423 fcoe_ctlr_device_delete(ctlr_dev);
400 dev_put(netdev); 424 dev_put(netdev);
401 fcoe = ERR_PTR(err); 425 fcoe = ERR_PTR(err);
402 goto out_putmod; 426 goto out_putmod;
@@ -419,7 +443,7 @@ out:
419static void fcoe_interface_remove(struct fcoe_interface *fcoe) 443static void fcoe_interface_remove(struct fcoe_interface *fcoe)
420{ 444{
421 struct net_device *netdev = fcoe->netdev; 445 struct net_device *netdev = fcoe->netdev;
422 struct fcoe_ctlr *fip = &fcoe->ctlr; 446 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
423 u8 flogi_maddr[ETH_ALEN]; 447 u8 flogi_maddr[ETH_ALEN];
424 const struct net_device_ops *ops; 448 const struct net_device_ops *ops;
425 449
@@ -462,7 +486,8 @@ static void fcoe_interface_remove(struct fcoe_interface *fcoe)
462static void fcoe_interface_cleanup(struct fcoe_interface *fcoe) 486static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
463{ 487{
464 struct net_device *netdev = fcoe->netdev; 488 struct net_device *netdev = fcoe->netdev;
465 struct fcoe_ctlr *fip = &fcoe->ctlr; 489 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
490 struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
466 491
467 rtnl_lock(); 492 rtnl_lock();
468 if (!fcoe->removed) 493 if (!fcoe->removed)
@@ -472,8 +497,8 @@ static void fcoe_interface_cleanup(struct fcoe_interface *fcoe)
472 /* Release the self-reference taken during fcoe_interface_create() */ 497 /* Release the self-reference taken during fcoe_interface_create() */
473 /* tear-down the FCoE controller */ 498 /* tear-down the FCoE controller */
474 fcoe_ctlr_destroy(fip); 499 fcoe_ctlr_destroy(fip);
475 scsi_host_put(fcoe->ctlr.lp->host); 500 scsi_host_put(fip->lp->host);
476 kfree(fcoe); 501 fcoe_ctlr_device_delete(ctlr_dev);
477 dev_put(netdev); 502 dev_put(netdev);
478 module_put(THIS_MODULE); 503 module_put(THIS_MODULE);
479} 504}
@@ -493,9 +518,11 @@ static int fcoe_fip_recv(struct sk_buff *skb, struct net_device *netdev,
493 struct net_device *orig_dev) 518 struct net_device *orig_dev)
494{ 519{
495 struct fcoe_interface *fcoe; 520 struct fcoe_interface *fcoe;
521 struct fcoe_ctlr *ctlr;
496 522
497 fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type); 523 fcoe = container_of(ptype, struct fcoe_interface, fip_packet_type);
498 fcoe_ctlr_recv(&fcoe->ctlr, skb); 524 ctlr = fcoe_to_ctlr(fcoe);
525 fcoe_ctlr_recv(ctlr, skb);
499 return 0; 526 return 0;
500} 527}
501 528
@@ -645,11 +672,13 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
645 u32 mfs; 672 u32 mfs;
646 u64 wwnn, wwpn; 673 u64 wwnn, wwpn;
647 struct fcoe_interface *fcoe; 674 struct fcoe_interface *fcoe;
675 struct fcoe_ctlr *ctlr;
648 struct fcoe_port *port; 676 struct fcoe_port *port;
649 677
650 /* Setup lport private data to point to fcoe softc */ 678 /* Setup lport private data to point to fcoe softc */
651 port = lport_priv(lport); 679 port = lport_priv(lport);
652 fcoe = port->priv; 680 fcoe = port->priv;
681 ctlr = fcoe_to_ctlr(fcoe);
653 682
654 /* 683 /*
655 * Determine max frame size based on underlying device and optional 684 * Determine max frame size based on underlying device and optional
@@ -676,10 +705,10 @@ static int fcoe_netdev_config(struct fc_lport *lport, struct net_device *netdev)
676 705
677 if (!lport->vport) { 706 if (!lport->vport) {
678 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN)) 707 if (fcoe_get_wwn(netdev, &wwnn, NETDEV_FCOE_WWNN))
679 wwnn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 1, 0); 708 wwnn = fcoe_wwn_from_mac(ctlr->ctl_src_addr, 1, 0);
680 fc_set_wwnn(lport, wwnn); 709 fc_set_wwnn(lport, wwnn);
681 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN)) 710 if (fcoe_get_wwn(netdev, &wwpn, NETDEV_FCOE_WWPN))
682 wwpn = fcoe_wwn_from_mac(fcoe->ctlr.ctl_src_addr, 711 wwpn = fcoe_wwn_from_mac(ctlr->ctl_src_addr,
683 2, 0); 712 2, 0);
684 fc_set_wwpn(lport, wwpn); 713 fc_set_wwpn(lport, wwpn);
685 } 714 }
@@ -1056,6 +1085,7 @@ static int fcoe_ddp_done(struct fc_lport *lport, u16 xid)
1056static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe, 1085static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
1057 struct device *parent, int npiv) 1086 struct device *parent, int npiv)
1058{ 1087{
1088 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
1059 struct net_device *netdev = fcoe->netdev; 1089 struct net_device *netdev = fcoe->netdev;
1060 struct fc_lport *lport, *n_port; 1090 struct fc_lport *lport, *n_port;
1061 struct fcoe_port *port; 1091 struct fcoe_port *port;
@@ -1119,7 +1149,7 @@ static struct fc_lport *fcoe_if_create(struct fcoe_interface *fcoe,
1119 } 1149 }
1120 1150
1121 /* Initialize the library */ 1151 /* Initialize the library */
1122 rc = fcoe_libfc_config(lport, &fcoe->ctlr, &fcoe_libfc_fcn_templ, 1); 1152 rc = fcoe_libfc_config(lport, ctlr, &fcoe_libfc_fcn_templ, 1);
1123 if (rc) { 1153 if (rc) {
1124 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the " 1154 FCOE_NETDEV_DBG(netdev, "Could not configure libfc for the "
1125 "interface\n"); 1155 "interface\n");
@@ -1386,6 +1416,7 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1386{ 1416{
1387 struct fc_lport *lport; 1417 struct fc_lport *lport;
1388 struct fcoe_rcv_info *fr; 1418 struct fcoe_rcv_info *fr;
1419 struct fcoe_ctlr *ctlr;
1389 struct fcoe_interface *fcoe; 1420 struct fcoe_interface *fcoe;
1390 struct fc_frame_header *fh; 1421 struct fc_frame_header *fh;
1391 struct fcoe_percpu_s *fps; 1422 struct fcoe_percpu_s *fps;
@@ -1393,7 +1424,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1393 unsigned int cpu; 1424 unsigned int cpu;
1394 1425
1395 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type); 1426 fcoe = container_of(ptype, struct fcoe_interface, fcoe_packet_type);
1396 lport = fcoe->ctlr.lp; 1427 ctlr = fcoe_to_ctlr(fcoe);
1428 lport = ctlr->lp;
1397 if (unlikely(!lport)) { 1429 if (unlikely(!lport)) {
1398 FCOE_NETDEV_DBG(netdev, "Cannot find hba structure"); 1430 FCOE_NETDEV_DBG(netdev, "Cannot find hba structure");
1399 goto err2; 1431 goto err2;
@@ -1409,8 +1441,8 @@ static int fcoe_rcv(struct sk_buff *skb, struct net_device *netdev,
1409 1441
1410 eh = eth_hdr(skb); 1442 eh = eth_hdr(skb);
1411 1443
1412 if (is_fip_mode(&fcoe->ctlr) && 1444 if (is_fip_mode(ctlr) &&
1413 compare_ether_addr(eh->h_source, fcoe->ctlr.dest_addr)) { 1445 compare_ether_addr(eh->h_source, ctlr->dest_addr)) {
1414 FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n", 1446 FCOE_NETDEV_DBG(netdev, "wrong source mac address:%pM\n",
1415 eh->h_source); 1447 eh->h_source);
1416 goto err; 1448 goto err;
@@ -1544,6 +1576,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1544 unsigned int elen; /* eth header, may include vlan */ 1576 unsigned int elen; /* eth header, may include vlan */
1545 struct fcoe_port *port = lport_priv(lport); 1577 struct fcoe_port *port = lport_priv(lport);
1546 struct fcoe_interface *fcoe = port->priv; 1578 struct fcoe_interface *fcoe = port->priv;
1579 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
1547 u8 sof, eof; 1580 u8 sof, eof;
1548 struct fcoe_hdr *hp; 1581 struct fcoe_hdr *hp;
1549 1582
@@ -1559,7 +1592,7 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1559 } 1592 }
1560 1593
1561 if (unlikely(fh->fh_type == FC_TYPE_ELS) && 1594 if (unlikely(fh->fh_type == FC_TYPE_ELS) &&
1562 fcoe_ctlr_els_send(&fcoe->ctlr, lport, skb)) 1595 fcoe_ctlr_els_send(ctlr, lport, skb))
1563 return 0; 1596 return 0;
1564 1597
1565 sof = fr_sof(fp); 1598 sof = fr_sof(fp);
@@ -1623,12 +1656,12 @@ static int fcoe_xmit(struct fc_lport *lport, struct fc_frame *fp)
1623 /* fill up mac and fcoe headers */ 1656 /* fill up mac and fcoe headers */
1624 eh = eth_hdr(skb); 1657 eh = eth_hdr(skb);
1625 eh->h_proto = htons(ETH_P_FCOE); 1658 eh->h_proto = htons(ETH_P_FCOE);
1626 memcpy(eh->h_dest, fcoe->ctlr.dest_addr, ETH_ALEN); 1659 memcpy(eh->h_dest, ctlr->dest_addr, ETH_ALEN);
1627 if (fcoe->ctlr.map_dest) 1660 if (ctlr->map_dest)
1628 memcpy(eh->h_dest + 3, fh->fh_d_id, 3); 1661 memcpy(eh->h_dest + 3, fh->fh_d_id, 3);
1629 1662
1630 if (unlikely(fcoe->ctlr.flogi_oxid != FC_XID_UNKNOWN)) 1663 if (unlikely(ctlr->flogi_oxid != FC_XID_UNKNOWN))
1631 memcpy(eh->h_source, fcoe->ctlr.ctl_src_addr, ETH_ALEN); 1664 memcpy(eh->h_source, ctlr->ctl_src_addr, ETH_ALEN);
1632 else 1665 else
1633 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN); 1666 memcpy(eh->h_source, port->data_src_addr, ETH_ALEN);
1634 1667
@@ -1677,6 +1710,7 @@ static void fcoe_percpu_flush_done(struct sk_buff *skb)
1677static inline int fcoe_filter_frames(struct fc_lport *lport, 1710static inline int fcoe_filter_frames(struct fc_lport *lport,
1678 struct fc_frame *fp) 1711 struct fc_frame *fp)
1679{ 1712{
1713 struct fcoe_ctlr *ctlr;
1680 struct fcoe_interface *fcoe; 1714 struct fcoe_interface *fcoe;
1681 struct fc_frame_header *fh; 1715 struct fc_frame_header *fh;
1682 struct sk_buff *skb = (struct sk_buff *)fp; 1716 struct sk_buff *skb = (struct sk_buff *)fp;
@@ -1698,7 +1732,8 @@ static inline int fcoe_filter_frames(struct fc_lport *lport,
1698 return 0; 1732 return 0;
1699 1733
1700 fcoe = ((struct fcoe_port *)lport_priv(lport))->priv; 1734 fcoe = ((struct fcoe_port *)lport_priv(lport))->priv;
1701 if (is_fip_mode(&fcoe->ctlr) && fc_frame_payload_op(fp) == ELS_LOGO && 1735 ctlr = fcoe_to_ctlr(fcoe);
1736 if (is_fip_mode(ctlr) && fc_frame_payload_op(fp) == ELS_LOGO &&
1702 ntoh24(fh->fh_s_id) == FC_FID_FLOGI) { 1737 ntoh24(fh->fh_s_id) == FC_FID_FLOGI) {
1703 FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n"); 1738 FCOE_DBG("fcoe: dropping FCoE lport LOGO in fip mode\n");
1704 return -EINVAL; 1739 return -EINVAL;
@@ -1877,6 +1912,7 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1877 ulong event, void *ptr) 1912 ulong event, void *ptr)
1878{ 1913{
1879 struct dcb_app_type *entry = ptr; 1914 struct dcb_app_type *entry = ptr;
1915 struct fcoe_ctlr *ctlr;
1880 struct fcoe_interface *fcoe; 1916 struct fcoe_interface *fcoe;
1881 struct net_device *netdev; 1917 struct net_device *netdev;
1882 struct fcoe_port *port; 1918 struct fcoe_port *port;
@@ -1894,6 +1930,8 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1894 if (!fcoe) 1930 if (!fcoe)
1895 return NOTIFY_OK; 1931 return NOTIFY_OK;
1896 1932
1933 ctlr = fcoe_to_ctlr(fcoe);
1934
1897 if (entry->dcbx & DCB_CAP_DCBX_VER_CEE) 1935 if (entry->dcbx & DCB_CAP_DCBX_VER_CEE)
1898 prio = ffs(entry->app.priority) - 1; 1936 prio = ffs(entry->app.priority) - 1;
1899 else 1937 else
@@ -1904,10 +1942,10 @@ static int fcoe_dcb_app_notification(struct notifier_block *notifier,
1904 1942
1905 if (entry->app.protocol == ETH_P_FIP || 1943 if (entry->app.protocol == ETH_P_FIP ||
1906 entry->app.protocol == ETH_P_FCOE) 1944 entry->app.protocol == ETH_P_FCOE)
1907 fcoe->ctlr.priority = prio; 1945 ctlr->priority = prio;
1908 1946
1909 if (entry->app.protocol == ETH_P_FCOE) { 1947 if (entry->app.protocol == ETH_P_FCOE) {
1910 port = lport_priv(fcoe->ctlr.lp); 1948 port = lport_priv(ctlr->lp);
1911 port->priority = prio; 1949 port->priority = prio;
1912 } 1950 }
1913 1951
@@ -1929,6 +1967,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1929{ 1967{
1930 struct fc_lport *lport = NULL; 1968 struct fc_lport *lport = NULL;
1931 struct net_device *netdev = ptr; 1969 struct net_device *netdev = ptr;
1970 struct fcoe_ctlr *ctlr;
1932 struct fcoe_interface *fcoe; 1971 struct fcoe_interface *fcoe;
1933 struct fcoe_port *port; 1972 struct fcoe_port *port;
1934 struct fcoe_dev_stats *stats; 1973 struct fcoe_dev_stats *stats;
@@ -1938,7 +1977,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1938 1977
1939 list_for_each_entry(fcoe, &fcoe_hostlist, list) { 1978 list_for_each_entry(fcoe, &fcoe_hostlist, list) {
1940 if (fcoe->netdev == netdev) { 1979 if (fcoe->netdev == netdev) {
1941 lport = fcoe->ctlr.lp; 1980 ctlr = fcoe_to_ctlr(fcoe);
1981 lport = ctlr->lp;
1942 break; 1982 break;
1943 } 1983 }
1944 } 1984 }
@@ -1967,7 +2007,7 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1967 break; 2007 break;
1968 case NETDEV_UNREGISTER: 2008 case NETDEV_UNREGISTER:
1969 list_del(&fcoe->list); 2009 list_del(&fcoe->list);
1970 port = lport_priv(fcoe->ctlr.lp); 2010 port = lport_priv(ctlr->lp);
1971 queue_work(fcoe_wq, &port->destroy_work); 2011 queue_work(fcoe_wq, &port->destroy_work);
1972 goto out; 2012 goto out;
1973 break; 2013 break;
@@ -1982,8 +2022,8 @@ static int fcoe_device_notification(struct notifier_block *notifier,
1982 fcoe_link_speed_update(lport); 2022 fcoe_link_speed_update(lport);
1983 2023
1984 if (link_possible && !fcoe_link_ok(lport)) 2024 if (link_possible && !fcoe_link_ok(lport))
1985 fcoe_ctlr_link_up(&fcoe->ctlr); 2025 fcoe_ctlr_link_up(ctlr);
1986 else if (fcoe_ctlr_link_down(&fcoe->ctlr)) { 2026 else if (fcoe_ctlr_link_down(ctlr)) {
1987 stats = per_cpu_ptr(lport->dev_stats, get_cpu()); 2027 stats = per_cpu_ptr(lport->dev_stats, get_cpu());
1988 stats->LinkFailureCount++; 2028 stats->LinkFailureCount++;
1989 put_cpu(); 2029 put_cpu();
@@ -2003,6 +2043,7 @@ out:
2003 */ 2043 */
2004static int fcoe_disable(struct net_device *netdev) 2044static int fcoe_disable(struct net_device *netdev)
2005{ 2045{
2046 struct fcoe_ctlr *ctlr;
2006 struct fcoe_interface *fcoe; 2047 struct fcoe_interface *fcoe;
2007 int rc = 0; 2048 int rc = 0;
2008 2049
@@ -2013,8 +2054,9 @@ static int fcoe_disable(struct net_device *netdev)
2013 rtnl_unlock(); 2054 rtnl_unlock();
2014 2055
2015 if (fcoe) { 2056 if (fcoe) {
2016 fcoe_ctlr_link_down(&fcoe->ctlr); 2057 ctlr = fcoe_to_ctlr(fcoe);
2017 fcoe_clean_pending_queue(fcoe->ctlr.lp); 2058 fcoe_ctlr_link_down(ctlr);
2059 fcoe_clean_pending_queue(ctlr->lp);
2018 } else 2060 } else
2019 rc = -ENODEV; 2061 rc = -ENODEV;
2020 2062
@@ -2032,6 +2074,7 @@ static int fcoe_disable(struct net_device *netdev)
2032 */ 2074 */
2033static int fcoe_enable(struct net_device *netdev) 2075static int fcoe_enable(struct net_device *netdev)
2034{ 2076{
2077 struct fcoe_ctlr *ctlr;
2035 struct fcoe_interface *fcoe; 2078 struct fcoe_interface *fcoe;
2036 int rc = 0; 2079 int rc = 0;
2037 2080
@@ -2040,11 +2083,17 @@ static int fcoe_enable(struct net_device *netdev)
2040 fcoe = fcoe_hostlist_lookup_port(netdev); 2083 fcoe = fcoe_hostlist_lookup_port(netdev);
2041 rtnl_unlock(); 2084 rtnl_unlock();
2042 2085
2043 if (!fcoe) 2086 if (!fcoe) {
2044 rc = -ENODEV; 2087 rc = -ENODEV;
2045 else if (!fcoe_link_ok(fcoe->ctlr.lp)) 2088 goto out;
2046 fcoe_ctlr_link_up(&fcoe->ctlr); 2089 }
2090
2091 ctlr = fcoe_to_ctlr(fcoe);
2092
2093 if (!fcoe_link_ok(ctlr->lp))
2094 fcoe_ctlr_link_up(ctlr);
2047 2095
2096out:
2048 mutex_unlock(&fcoe_config_mutex); 2097 mutex_unlock(&fcoe_config_mutex);
2049 return rc; 2098 return rc;
2050} 2099}
@@ -2059,6 +2108,7 @@ static int fcoe_enable(struct net_device *netdev)
2059 */ 2108 */
2060static int fcoe_destroy(struct net_device *netdev) 2109static int fcoe_destroy(struct net_device *netdev)
2061{ 2110{
2111 struct fcoe_ctlr *ctlr;
2062 struct fcoe_interface *fcoe; 2112 struct fcoe_interface *fcoe;
2063 struct fc_lport *lport; 2113 struct fc_lport *lport;
2064 struct fcoe_port *port; 2114 struct fcoe_port *port;
@@ -2071,7 +2121,8 @@ static int fcoe_destroy(struct net_device *netdev)
2071 rc = -ENODEV; 2121 rc = -ENODEV;
2072 goto out_nodev; 2122 goto out_nodev;
2073 } 2123 }
2074 lport = fcoe->ctlr.lp; 2124 ctlr = fcoe_to_ctlr(fcoe);
2125 lport = ctlr->lp;
2075 port = lport_priv(lport); 2126 port = lport_priv(lport);
2076 list_del(&fcoe->list); 2127 list_del(&fcoe->list);
2077 queue_work(fcoe_wq, &port->destroy_work); 2128 queue_work(fcoe_wq, &port->destroy_work);
@@ -2126,7 +2177,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2126 int dcbx; 2177 int dcbx;
2127 u8 fup, up; 2178 u8 fup, up;
2128 struct net_device *netdev = fcoe->realdev; 2179 struct net_device *netdev = fcoe->realdev;
2129 struct fcoe_port *port = lport_priv(fcoe->ctlr.lp); 2180 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2181 struct fcoe_port *port = lport_priv(ctlr->lp);
2130 struct dcb_app app = { 2182 struct dcb_app app = {
2131 .priority = 0, 2183 .priority = 0,
2132 .protocol = ETH_P_FCOE 2184 .protocol = ETH_P_FCOE
@@ -2149,7 +2201,7 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2149 } 2201 }
2150 2202
2151 port->priority = ffs(up) ? ffs(up) - 1 : 0; 2203 port->priority = ffs(up) ? ffs(up) - 1 : 0;
2152 fcoe->ctlr.priority = ffs(fup) ? ffs(fup) - 1 : port->priority; 2204 ctlr->priority = ffs(fup) ? ffs(fup) - 1 : port->priority;
2153 } 2205 }
2154#endif 2206#endif
2155} 2207}
@@ -2166,6 +2218,8 @@ static void fcoe_dcb_create(struct fcoe_interface *fcoe)
2166static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode) 2218static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2167{ 2219{
2168 int rc = 0; 2220 int rc = 0;
2221 struct fcoe_ctlr_device *ctlr_dev;
2222 struct fcoe_ctlr *ctlr;
2169 struct fcoe_interface *fcoe; 2223 struct fcoe_interface *fcoe;
2170 struct fc_lport *lport; 2224 struct fc_lport *lport;
2171 2225
@@ -2184,7 +2238,9 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2184 goto out_nodev; 2238 goto out_nodev;
2185 } 2239 }
2186 2240
2187 lport = fcoe_if_create(fcoe, &netdev->dev, 0); 2241 ctlr = fcoe_to_ctlr(fcoe);
2242 ctlr_dev = fcoe_ctlr_to_ctlr_dev(ctlr);
2243 lport = fcoe_if_create(fcoe, &ctlr_dev->dev, 0);
2188 if (IS_ERR(lport)) { 2244 if (IS_ERR(lport)) {
2189 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n", 2245 printk(KERN_ERR "fcoe: Failed to create interface (%s)\n",
2190 netdev->name); 2246 netdev->name);
@@ -2195,7 +2251,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2195 } 2251 }
2196 2252
2197 /* Make this the "master" N_Port */ 2253 /* Make this the "master" N_Port */
2198 fcoe->ctlr.lp = lport; 2254 ctlr->lp = lport;
2199 2255
2200 /* setup DCB priority attributes. */ 2256 /* setup DCB priority attributes. */
2201 fcoe_dcb_create(fcoe); 2257 fcoe_dcb_create(fcoe);
@@ -2208,7 +2264,7 @@ static int fcoe_create(struct net_device *netdev, enum fip_state fip_mode)
2208 fc_fabric_login(lport); 2264 fc_fabric_login(lport);
2209 if (!fcoe_link_ok(lport)) { 2265 if (!fcoe_link_ok(lport)) {
2210 rtnl_unlock(); 2266 rtnl_unlock();
2211 fcoe_ctlr_link_up(&fcoe->ctlr); 2267 fcoe_ctlr_link_up(ctlr);
2212 mutex_unlock(&fcoe_config_mutex); 2268 mutex_unlock(&fcoe_config_mutex);
2213 return rc; 2269 return rc;
2214 } 2270 }
@@ -2320,11 +2376,12 @@ static int fcoe_reset(struct Scsi_Host *shost)
2320 struct fc_lport *lport = shost_priv(shost); 2376 struct fc_lport *lport = shost_priv(shost);
2321 struct fcoe_port *port = lport_priv(lport); 2377 struct fcoe_port *port = lport_priv(lport);
2322 struct fcoe_interface *fcoe = port->priv; 2378 struct fcoe_interface *fcoe = port->priv;
2379 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2323 2380
2324 fcoe_ctlr_link_down(&fcoe->ctlr); 2381 fcoe_ctlr_link_down(ctlr);
2325 fcoe_clean_pending_queue(fcoe->ctlr.lp); 2382 fcoe_clean_pending_queue(ctlr->lp);
2326 if (!fcoe_link_ok(fcoe->ctlr.lp)) 2383 if (!fcoe_link_ok(ctlr->lp))
2327 fcoe_ctlr_link_up(&fcoe->ctlr); 2384 fcoe_ctlr_link_up(ctlr);
2328 return 0; 2385 return 0;
2329} 2386}
2330 2387
@@ -2359,10 +2416,12 @@ fcoe_hostlist_lookup_port(const struct net_device *netdev)
2359 */ 2416 */
2360static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev) 2417static struct fc_lport *fcoe_hostlist_lookup(const struct net_device *netdev)
2361{ 2418{
2419 struct fcoe_ctlr *ctlr;
2362 struct fcoe_interface *fcoe; 2420 struct fcoe_interface *fcoe;
2363 2421
2364 fcoe = fcoe_hostlist_lookup_port(netdev); 2422 fcoe = fcoe_hostlist_lookup_port(netdev);
2365 return (fcoe) ? fcoe->ctlr.lp : NULL; 2423 ctlr = fcoe_to_ctlr(fcoe);
2424 return (fcoe) ? ctlr->lp : NULL;
2366} 2425}
2367 2426
2368/** 2427/**
@@ -2466,6 +2525,7 @@ module_init(fcoe_init);
2466static void __exit fcoe_exit(void) 2525static void __exit fcoe_exit(void)
2467{ 2526{
2468 struct fcoe_interface *fcoe, *tmp; 2527 struct fcoe_interface *fcoe, *tmp;
2528 struct fcoe_ctlr *ctlr;
2469 struct fcoe_port *port; 2529 struct fcoe_port *port;
2470 unsigned int cpu; 2530 unsigned int cpu;
2471 2531
@@ -2477,7 +2537,8 @@ static void __exit fcoe_exit(void)
2477 rtnl_lock(); 2537 rtnl_lock();
2478 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) { 2538 list_for_each_entry_safe(fcoe, tmp, &fcoe_hostlist, list) {
2479 list_del(&fcoe->list); 2539 list_del(&fcoe->list);
2480 port = lport_priv(fcoe->ctlr.lp); 2540 ctlr = fcoe_to_ctlr(fcoe);
2541 port = lport_priv(ctlr->lp);
2481 queue_work(fcoe_wq, &port->destroy_work); 2542 queue_work(fcoe_wq, &port->destroy_work);
2482 } 2543 }
2483 rtnl_unlock(); 2544 rtnl_unlock();
@@ -2573,7 +2634,7 @@ static struct fc_seq *fcoe_elsct_send(struct fc_lport *lport, u32 did,
2573{ 2634{
2574 struct fcoe_port *port = lport_priv(lport); 2635 struct fcoe_port *port = lport_priv(lport);
2575 struct fcoe_interface *fcoe = port->priv; 2636 struct fcoe_interface *fcoe = port->priv;
2576 struct fcoe_ctlr *fip = &fcoe->ctlr; 2637 struct fcoe_ctlr *fip = fcoe_to_ctlr(fcoe);
2577 struct fc_frame_header *fh = fc_frame_header_get(fp); 2638 struct fc_frame_header *fh = fc_frame_header_get(fp);
2578 2639
2579 switch (op) { 2640 switch (op) {
@@ -2730,6 +2791,40 @@ static void fcoe_get_lesb(struct fc_lport *lport,
2730 __fcoe_get_lesb(lport, fc_lesb, netdev); 2791 __fcoe_get_lesb(lport, fc_lesb, netdev);
2731} 2792}
2732 2793
2794static void fcoe_ctlr_get_lesb(struct fcoe_ctlr_device *ctlr_dev)
2795{
2796 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
2797 struct net_device *netdev = fcoe_netdev(fip->lp);
2798 struct fcoe_fc_els_lesb *fcoe_lesb;
2799 struct fc_els_lesb fc_lesb;
2800
2801 __fcoe_get_lesb(fip->lp, &fc_lesb, netdev);
2802 fcoe_lesb = (struct fcoe_fc_els_lesb *)(&fc_lesb);
2803
2804 ctlr_dev->lesb.lesb_link_fail =
2805 ntohl(fcoe_lesb->lesb_link_fail);
2806 ctlr_dev->lesb.lesb_vlink_fail =
2807 ntohl(fcoe_lesb->lesb_vlink_fail);
2808 ctlr_dev->lesb.lesb_miss_fka =
2809 ntohl(fcoe_lesb->lesb_miss_fka);
2810 ctlr_dev->lesb.lesb_symb_err =
2811 ntohl(fcoe_lesb->lesb_symb_err);
2812 ctlr_dev->lesb.lesb_err_block =
2813 ntohl(fcoe_lesb->lesb_err_block);
2814 ctlr_dev->lesb.lesb_fcs_error =
2815 ntohl(fcoe_lesb->lesb_fcs_error);
2816}
2817
2818static void fcoe_fcf_get_vlan_id(struct fcoe_fcf_device *fcf_dev)
2819{
2820 struct fcoe_ctlr_device *ctlr_dev =
2821 fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
2822 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
2823 struct fcoe_interface *fcoe = fcoe_ctlr_priv(ctlr);
2824
2825 fcf_dev->vlan_id = vlan_dev_vlan_id(fcoe->netdev);
2826}
2827
2733/** 2828/**
2734 * fcoe_set_port_id() - Callback from libfc when Port_ID is set. 2829 * fcoe_set_port_id() - Callback from libfc when Port_ID is set.
2735 * @lport: the local port 2830 * @lport: the local port
@@ -2747,7 +2842,8 @@ static void fcoe_set_port_id(struct fc_lport *lport,
2747{ 2842{
2748 struct fcoe_port *port = lport_priv(lport); 2843 struct fcoe_port *port = lport_priv(lport);
2749 struct fcoe_interface *fcoe = port->priv; 2844 struct fcoe_interface *fcoe = port->priv;
2845 struct fcoe_ctlr *ctlr = fcoe_to_ctlr(fcoe);
2750 2846
2751 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI) 2847 if (fp && fc_frame_payload_op(fp) == ELS_FLOGI)
2752 fcoe_ctlr_recv_flogi(&fcoe->ctlr, lport, fp); 2848 fcoe_ctlr_recv_flogi(ctlr, lport, fp);
2753} 2849}
diff --git a/drivers/scsi/fcoe/fcoe.h b/drivers/scsi/fcoe/fcoe.h
index 96ac938d39cc..a624add4f8ec 100644
--- a/drivers/scsi/fcoe/fcoe.h
+++ b/drivers/scsi/fcoe/fcoe.h
@@ -68,7 +68,6 @@ do { \
68 * @netdev: The associated net device 68 * @netdev: The associated net device
69 * @fcoe_packet_type: FCoE packet type 69 * @fcoe_packet_type: FCoE packet type
70 * @fip_packet_type: FIP packet type 70 * @fip_packet_type: FIP packet type
71 * @ctlr: The FCoE controller (for FIP)
72 * @oem: The offload exchange manager for all local port 71 * @oem: The offload exchange manager for all local port
73 * instances associated with this port 72 * instances associated with this port
74 * @removed: Indicates fcoe interface removed from net device 73 * @removed: Indicates fcoe interface removed from net device
@@ -80,12 +79,15 @@ struct fcoe_interface {
80 struct net_device *realdev; 79 struct net_device *realdev;
81 struct packet_type fcoe_packet_type; 80 struct packet_type fcoe_packet_type;
82 struct packet_type fip_packet_type; 81 struct packet_type fip_packet_type;
83 struct fcoe_ctlr ctlr;
84 struct fc_exch_mgr *oem; 82 struct fc_exch_mgr *oem;
85 u8 removed; 83 u8 removed;
86}; 84};
87 85
88#define fcoe_from_ctlr(fip) container_of(fip, struct fcoe_interface, ctlr) 86#define fcoe_to_ctlr(x) \
87 (struct fcoe_ctlr *)(((struct fcoe_ctlr *)(x)) - 1)
88
89#define fcoe_from_ctlr(x) \
90 ((struct fcoe_interface *)((x) + 1))
89 91
90/** 92/**
91 * fcoe_netdev() - Return the net device associated with a local port 93 * fcoe_netdev() - Return the net device associated with a local port
diff --git a/drivers/scsi/fcoe/fcoe_ctlr.c b/drivers/scsi/fcoe/fcoe_ctlr.c
index 5a4c7250aa77..d68d57241ee6 100644
--- a/drivers/scsi/fcoe/fcoe_ctlr.c
+++ b/drivers/scsi/fcoe/fcoe_ctlr.c
@@ -160,6 +160,76 @@ void fcoe_ctlr_init(struct fcoe_ctlr *fip, enum fip_state mode)
160} 160}
161EXPORT_SYMBOL(fcoe_ctlr_init); 161EXPORT_SYMBOL(fcoe_ctlr_init);
162 162
163static int fcoe_sysfs_fcf_add(struct fcoe_fcf *new)
164{
165 struct fcoe_ctlr *fip = new->fip;
166 struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
167 struct fcoe_fcf_device temp, *fcf_dev;
168 int rc = 0;
169
170 LIBFCOE_FIP_DBG(fip, "New FCF fab %16.16llx mac %pM\n",
171 new->fabric_name, new->fcf_mac);
172
173 mutex_lock(&ctlr_dev->lock);
174
175 temp.fabric_name = new->fabric_name;
176 temp.switch_name = new->switch_name;
177 temp.fc_map = new->fc_map;
178 temp.vfid = new->vfid;
179 memcpy(temp.mac, new->fcf_mac, ETH_ALEN);
180 temp.priority = new->pri;
181 temp.fka_period = new->fka_period;
182 temp.selected = 0; /* default to unselected */
183
184 fcf_dev = fcoe_fcf_device_add(ctlr_dev, &temp);
185 if (unlikely(!fcf_dev)) {
186 rc = -ENOMEM;
187 goto out;
188 }
189
190 /*
191 * The fcoe_sysfs layer can return a CONNECTED fcf that
192 * has a priv (fcf was never deleted) or a CONNECTED fcf
193 * that doesn't have a priv (fcf was deleted). However,
194 * libfcoe will always delete FCFs before trying to add
195 * them. This is ensured because both recv_adv and
196 * age_fcfs are protected by the the fcoe_ctlr's mutex.
197 * This means that we should never get a FCF with a
198 * non-NULL priv pointer.
199 */
200 BUG_ON(fcf_dev->priv);
201
202 fcf_dev->priv = new;
203 new->fcf_dev = fcf_dev;
204
205 list_add(&new->list, &fip->fcfs);
206 fip->fcf_count++;
207
208out:
209 mutex_unlock(&ctlr_dev->lock);
210 return rc;
211}
212
213static void fcoe_sysfs_fcf_del(struct fcoe_fcf *new)
214{
215 struct fcoe_ctlr *fip = new->fip;
216 struct fcoe_ctlr_device *ctlr_dev = fcoe_ctlr_to_ctlr_dev(fip);
217 struct fcoe_fcf_device *fcf_dev;
218
219 list_del(&new->list);
220 fip->fcf_count--;
221
222 mutex_lock(&ctlr_dev->lock);
223
224 fcf_dev = fcoe_fcf_to_fcf_dev(new);
225 WARN_ON(!fcf_dev);
226 new->fcf_dev = NULL;
227 fcoe_fcf_device_delete(fcf_dev);
228 kfree(new);
229
230 mutex_unlock(&ctlr_dev->lock);
231}
232
163/** 233/**
164 * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller 234 * fcoe_ctlr_reset_fcfs() - Reset and free all FCFs for a controller
165 * @fip: The FCoE controller whose FCFs are to be reset 235 * @fip: The FCoE controller whose FCFs are to be reset
@@ -173,10 +243,10 @@ static void fcoe_ctlr_reset_fcfs(struct fcoe_ctlr *fip)
173 243
174 fip->sel_fcf = NULL; 244 fip->sel_fcf = NULL;
175 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { 245 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
176 list_del(&fcf->list); 246 fcoe_sysfs_fcf_del(fcf);
177 kfree(fcf);
178 } 247 }
179 fip->fcf_count = 0; 248 WARN_ON(fip->fcf_count);
249
180 fip->sel_time = 0; 250 fip->sel_time = 0;
181} 251}
182 252
@@ -717,8 +787,11 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
717 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD); 787 unsigned long next_timer = jiffies + msecs_to_jiffies(FIP_VN_KA_PERIOD);
718 unsigned long deadline; 788 unsigned long deadline;
719 unsigned long sel_time = 0; 789 unsigned long sel_time = 0;
790 struct list_head del_list;
720 struct fcoe_dev_stats *stats; 791 struct fcoe_dev_stats *stats;
721 792
793 INIT_LIST_HEAD(&del_list);
794
722 stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu()); 795 stats = per_cpu_ptr(fip->lp->dev_stats, get_cpu());
723 796
724 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) { 797 list_for_each_entry_safe(fcf, next, &fip->fcfs, list) {
@@ -739,10 +812,13 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
739 if (time_after_eq(jiffies, deadline)) { 812 if (time_after_eq(jiffies, deadline)) {
740 if (fip->sel_fcf == fcf) 813 if (fip->sel_fcf == fcf)
741 fip->sel_fcf = NULL; 814 fip->sel_fcf = NULL;
815 /*
816 * Move to delete list so we can call
817 * fcoe_sysfs_fcf_del (which can sleep)
818 * after the put_cpu().
819 */
742 list_del(&fcf->list); 820 list_del(&fcf->list);
743 WARN_ON(!fip->fcf_count); 821 list_add(&fcf->list, &del_list);
744 fip->fcf_count--;
745 kfree(fcf);
746 stats->VLinkFailureCount++; 822 stats->VLinkFailureCount++;
747 } else { 823 } else {
748 if (time_after(next_timer, deadline)) 824 if (time_after(next_timer, deadline))
@@ -753,6 +829,12 @@ static unsigned long fcoe_ctlr_age_fcfs(struct fcoe_ctlr *fip)
753 } 829 }
754 } 830 }
755 put_cpu(); 831 put_cpu();
832
833 list_for_each_entry_safe(fcf, next, &del_list, list) {
834 /* Removes fcf from current list */
835 fcoe_sysfs_fcf_del(fcf);
836 }
837
756 if (sel_time && !fip->sel_fcf && !fip->sel_time) { 838 if (sel_time && !fip->sel_fcf && !fip->sel_time) {
757 sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY); 839 sel_time += msecs_to_jiffies(FCOE_CTLR_START_DELAY);
758 fip->sel_time = sel_time; 840 fip->sel_time = sel_time;
@@ -903,23 +985,23 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
903{ 985{
904 struct fcoe_fcf *fcf; 986 struct fcoe_fcf *fcf;
905 struct fcoe_fcf new; 987 struct fcoe_fcf new;
906 struct fcoe_fcf *found;
907 unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV); 988 unsigned long sol_tov = msecs_to_jiffies(FCOE_CTRL_SOL_TOV);
908 int first = 0; 989 int first = 0;
909 int mtu_valid; 990 int mtu_valid;
991 int found = 0;
992 int rc = 0;
910 993
911 if (fcoe_ctlr_parse_adv(fip, skb, &new)) 994 if (fcoe_ctlr_parse_adv(fip, skb, &new))
912 return; 995 return;
913 996
914 mutex_lock(&fip->ctlr_mutex); 997 mutex_lock(&fip->ctlr_mutex);
915 first = list_empty(&fip->fcfs); 998 first = list_empty(&fip->fcfs);
916 found = NULL;
917 list_for_each_entry(fcf, &fip->fcfs, list) { 999 list_for_each_entry(fcf, &fip->fcfs, list) {
918 if (fcf->switch_name == new.switch_name && 1000 if (fcf->switch_name == new.switch_name &&
919 fcf->fabric_name == new.fabric_name && 1001 fcf->fabric_name == new.fabric_name &&
920 fcf->fc_map == new.fc_map && 1002 fcf->fc_map == new.fc_map &&
921 compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) { 1003 compare_ether_addr(fcf->fcf_mac, new.fcf_mac) == 0) {
922 found = fcf; 1004 found = 1;
923 break; 1005 break;
924 } 1006 }
925 } 1007 }
@@ -931,9 +1013,16 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
931 if (!fcf) 1013 if (!fcf)
932 goto out; 1014 goto out;
933 1015
934 fip->fcf_count++;
935 memcpy(fcf, &new, sizeof(new)); 1016 memcpy(fcf, &new, sizeof(new));
936 list_add(&fcf->list, &fip->fcfs); 1017 fcf->fip = fip;
1018 rc = fcoe_sysfs_fcf_add(fcf);
1019 if (rc) {
1020 printk(KERN_ERR "Failed to allocate sysfs instance "
1021 "for FCF, fab %16.16llx mac %pM\n",
1022 new.fabric_name, new.fcf_mac);
1023 kfree(fcf);
1024 goto out;
1025 }
937 } else { 1026 } else {
938 /* 1027 /*
939 * Update the FCF's keep-alive descriptor flags. 1028 * Update the FCF's keep-alive descriptor flags.
@@ -954,6 +1043,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
954 fcf->fka_period = new.fka_period; 1043 fcf->fka_period = new.fka_period;
955 memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN); 1044 memcpy(fcf->fcf_mac, new.fcf_mac, ETH_ALEN);
956 } 1045 }
1046
957 mtu_valid = fcoe_ctlr_mtu_valid(fcf); 1047 mtu_valid = fcoe_ctlr_mtu_valid(fcf);
958 fcf->time = jiffies; 1048 fcf->time = jiffies;
959 if (!found) 1049 if (!found)
@@ -996,6 +1086,7 @@ static void fcoe_ctlr_recv_adv(struct fcoe_ctlr *fip, struct sk_buff *skb)
996 time_before(fip->sel_time, fip->timer.expires)) 1086 time_before(fip->sel_time, fip->timer.expires))
997 mod_timer(&fip->timer, fip->sel_time); 1087 mod_timer(&fip->timer, fip->sel_time);
998 } 1088 }
1089
999out: 1090out:
1000 mutex_unlock(&fip->ctlr_mutex); 1091 mutex_unlock(&fip->ctlr_mutex);
1001} 1092}
@@ -2718,9 +2809,9 @@ unlock:
2718 2809
2719/** 2810/**
2720 * fcoe_libfc_config() - Sets up libfc related properties for local port 2811 * fcoe_libfc_config() - Sets up libfc related properties for local port
2721 * @lp: The local port to configure libfc for 2812 * @lport: The local port to configure libfc for
2722 * @fip: The FCoE controller in use by the local port 2813 * @fip: The FCoE controller in use by the local port
2723 * @tt: The libfc function template 2814 * @tt: The libfc function template
2724 * @init_fcp: If non-zero, the FCP portion of libfc should be initialized 2815 * @init_fcp: If non-zero, the FCP portion of libfc should be initialized
2725 * 2816 *
2726 * Returns : 0 for success 2817 * Returns : 0 for success
@@ -2753,3 +2844,43 @@ int fcoe_libfc_config(struct fc_lport *lport, struct fcoe_ctlr *fip,
2753 return 0; 2844 return 0;
2754} 2845}
2755EXPORT_SYMBOL_GPL(fcoe_libfc_config); 2846EXPORT_SYMBOL_GPL(fcoe_libfc_config);
2847
2848void fcoe_fcf_get_selected(struct fcoe_fcf_device *fcf_dev)
2849{
2850 struct fcoe_ctlr_device *ctlr_dev = fcoe_fcf_dev_to_ctlr_dev(fcf_dev);
2851 struct fcoe_ctlr *fip = fcoe_ctlr_device_priv(ctlr_dev);
2852 struct fcoe_fcf *fcf;
2853
2854 mutex_lock(&fip->ctlr_mutex);
2855 mutex_lock(&ctlr_dev->lock);
2856
2857 fcf = fcoe_fcf_device_priv(fcf_dev);
2858 if (fcf)
2859 fcf_dev->selected = (fcf == fip->sel_fcf) ? 1 : 0;
2860 else
2861 fcf_dev->selected = 0;
2862
2863 mutex_unlock(&ctlr_dev->lock);
2864 mutex_unlock(&fip->ctlr_mutex);
2865}
2866EXPORT_SYMBOL(fcoe_fcf_get_selected);
2867
2868void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *ctlr_dev)
2869{
2870 struct fcoe_ctlr *ctlr = fcoe_ctlr_device_priv(ctlr_dev);
2871
2872 mutex_lock(&ctlr->ctlr_mutex);
2873 switch (ctlr->mode) {
2874 case FIP_MODE_FABRIC:
2875 ctlr_dev->mode = FIP_CONN_TYPE_FABRIC;
2876 break;
2877 case FIP_MODE_VN2VN:
2878 ctlr_dev->mode = FIP_CONN_TYPE_VN2VN;
2879 break;
2880 default:
2881 ctlr_dev->mode = FIP_CONN_TYPE_UNKNOWN;
2882 break;
2883 }
2884 mutex_unlock(&ctlr->ctlr_mutex);
2885}
2886EXPORT_SYMBOL(fcoe_ctlr_get_fip_mode);
diff --git a/drivers/scsi/fcoe/fcoe_sysfs.c b/drivers/scsi/fcoe/fcoe_sysfs.c
new file mode 100644
index 000000000000..2bc163198d33
--- /dev/null
+++ b/drivers/scsi/fcoe/fcoe_sysfs.c
@@ -0,0 +1,832 @@
1/*
2 * Copyright(c) 2011 - 2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#include <linux/module.h>
21#include <linux/types.h>
22#include <linux/kernel.h>
23#include <linux/etherdevice.h>
24
25#include <scsi/fcoe_sysfs.h>
26
27static atomic_t ctlr_num;
28static atomic_t fcf_num;
29
30/*
31 * fcoe_fcf_dev_loss_tmo: the default number of seconds that fcoe sysfs
32 * should insulate the loss of a fcf.
33 */
34static unsigned int fcoe_fcf_dev_loss_tmo = 1800; /* seconds */
35
36module_param_named(fcf_dev_loss_tmo, fcoe_fcf_dev_loss_tmo,
37 uint, S_IRUGO|S_IWUSR);
38MODULE_PARM_DESC(fcf_dev_loss_tmo,
39 "Maximum number of seconds that libfcoe should"
40 " insulate the loss of a fcf. Once this value is"
41 " exceeded, the fcf is removed.");
42
43/*
44 * These are used by the fcoe_*_show_function routines, they
45 * are intentionally placed in the .c file as they're not intended
46 * for use throughout the code.
47 */
48#define fcoe_ctlr_id(x) \
49 ((x)->id)
50#define fcoe_ctlr_work_q_name(x) \
51 ((x)->work_q_name)
52#define fcoe_ctlr_work_q(x) \
53 ((x)->work_q)
54#define fcoe_ctlr_devloss_work_q_name(x) \
55 ((x)->devloss_work_q_name)
56#define fcoe_ctlr_devloss_work_q(x) \
57 ((x)->devloss_work_q)
58#define fcoe_ctlr_mode(x) \
59 ((x)->mode)
60#define fcoe_ctlr_fcf_dev_loss_tmo(x) \
61 ((x)->fcf_dev_loss_tmo)
62#define fcoe_ctlr_link_fail(x) \
63 ((x)->lesb.lesb_link_fail)
64#define fcoe_ctlr_vlink_fail(x) \
65 ((x)->lesb.lesb_vlink_fail)
66#define fcoe_ctlr_miss_fka(x) \
67 ((x)->lesb.lesb_miss_fka)
68#define fcoe_ctlr_symb_err(x) \
69 ((x)->lesb.lesb_symb_err)
70#define fcoe_ctlr_err_block(x) \
71 ((x)->lesb.lesb_err_block)
72#define fcoe_ctlr_fcs_error(x) \
73 ((x)->lesb.lesb_fcs_error)
74#define fcoe_fcf_state(x) \
75 ((x)->state)
76#define fcoe_fcf_fabric_name(x) \
77 ((x)->fabric_name)
78#define fcoe_fcf_switch_name(x) \
79 ((x)->switch_name)
80#define fcoe_fcf_fc_map(x) \
81 ((x)->fc_map)
82#define fcoe_fcf_vfid(x) \
83 ((x)->vfid)
84#define fcoe_fcf_mac(x) \
85 ((x)->mac)
86#define fcoe_fcf_priority(x) \
87 ((x)->priority)
88#define fcoe_fcf_fka_period(x) \
89 ((x)->fka_period)
90#define fcoe_fcf_dev_loss_tmo(x) \
91 ((x)->dev_loss_tmo)
92#define fcoe_fcf_selected(x) \
93 ((x)->selected)
94#define fcoe_fcf_vlan_id(x) \
95 ((x)->vlan_id)
96
97/*
98 * dev_loss_tmo attribute
99 */
100static int fcoe_str_to_dev_loss(const char *buf, unsigned long *val)
101{
102 int ret;
103
104 ret = kstrtoul(buf, 0, val);
105 if (ret || *val < 0)
106 return -EINVAL;
107 /*
108 * Check for overflow; dev_loss_tmo is u32
109 */
110 if (*val > UINT_MAX)
111 return -EINVAL;
112
113 return 0;
114}
115
116static int fcoe_fcf_set_dev_loss_tmo(struct fcoe_fcf_device *fcf,
117 unsigned long val)
118{
119 if ((fcf->state == FCOE_FCF_STATE_UNKNOWN) ||
120 (fcf->state == FCOE_FCF_STATE_DISCONNECTED) ||
121 (fcf->state == FCOE_FCF_STATE_DELETED))
122 return -EBUSY;
123 /*
124 * Check for overflow; dev_loss_tmo is u32
125 */
126 if (val > UINT_MAX)
127 return -EINVAL;
128
129 fcoe_fcf_dev_loss_tmo(fcf) = val;
130 return 0;
131}
132
133#define FCOE_DEVICE_ATTR(_prefix, _name, _mode, _show, _store) \
134struct device_attribute device_attr_fcoe_##_prefix##_##_name = \
135 __ATTR(_name, _mode, _show, _store)
136
137#define fcoe_ctlr_show_function(field, format_string, sz, cast) \
138static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
139 struct device_attribute *attr, \
140 char *buf) \
141{ \
142 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
143 if (ctlr->f->get_fcoe_ctlr_##field) \
144 ctlr->f->get_fcoe_ctlr_##field(ctlr); \
145 return snprintf(buf, sz, format_string, \
146 cast fcoe_ctlr_##field(ctlr)); \
147}
148
149#define fcoe_fcf_show_function(field, format_string, sz, cast) \
150static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
151 struct device_attribute *attr, \
152 char *buf) \
153{ \
154 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
155 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf); \
156 if (ctlr->f->get_fcoe_fcf_##field) \
157 ctlr->f->get_fcoe_fcf_##field(fcf); \
158 return snprintf(buf, sz, format_string, \
159 cast fcoe_fcf_##field(fcf)); \
160}
161
162#define fcoe_ctlr_private_show_function(field, format_string, sz, cast) \
163static ssize_t show_fcoe_ctlr_device_##field(struct device *dev, \
164 struct device_attribute *attr, \
165 char *buf) \
166{ \
167 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev); \
168 return snprintf(buf, sz, format_string, cast fcoe_ctlr_##field(ctlr)); \
169}
170
171#define fcoe_fcf_private_show_function(field, format_string, sz, cast) \
172static ssize_t show_fcoe_fcf_device_##field(struct device *dev, \
173 struct device_attribute *attr, \
174 char *buf) \
175{ \
176 struct fcoe_fcf_device *fcf = dev_to_fcf(dev); \
177 return snprintf(buf, sz, format_string, cast fcoe_fcf_##field(fcf)); \
178}
179
180#define fcoe_ctlr_private_rd_attr(field, format_string, sz) \
181 fcoe_ctlr_private_show_function(field, format_string, sz, ) \
182 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
183 show_fcoe_ctlr_device_##field, NULL)
184
185#define fcoe_ctlr_rd_attr(field, format_string, sz) \
186 fcoe_ctlr_show_function(field, format_string, sz, ) \
187 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
188 show_fcoe_ctlr_device_##field, NULL)
189
190#define fcoe_fcf_rd_attr(field, format_string, sz) \
191 fcoe_fcf_show_function(field, format_string, sz, ) \
192 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
193 show_fcoe_fcf_device_##field, NULL)
194
195#define fcoe_fcf_private_rd_attr(field, format_string, sz) \
196 fcoe_fcf_private_show_function(field, format_string, sz, ) \
197 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
198 show_fcoe_fcf_device_##field, NULL)
199
200#define fcoe_ctlr_private_rd_attr_cast(field, format_string, sz, cast) \
201 fcoe_ctlr_private_show_function(field, format_string, sz, (cast)) \
202 static FCOE_DEVICE_ATTR(ctlr, field, S_IRUGO, \
203 show_fcoe_ctlr_device_##field, NULL)
204
205#define fcoe_fcf_private_rd_attr_cast(field, format_string, sz, cast) \
206 fcoe_fcf_private_show_function(field, format_string, sz, (cast)) \
207 static FCOE_DEVICE_ATTR(fcf, field, S_IRUGO, \
208 show_fcoe_fcf_device_##field, NULL)
209
210#define fcoe_enum_name_search(title, table_type, table) \
211static const char *get_fcoe_##title##_name(enum table_type table_key) \
212{ \
213 int i; \
214 char *name = NULL; \
215 \
216 for (i = 0; i < ARRAY_SIZE(table); i++) { \
217 if (table[i].value == table_key) { \
218 name = table[i].name; \
219 break; \
220 } \
221 } \
222 return name; \
223}
224
225static struct {
226 enum fcf_state value;
227 char *name;
228} fcf_state_names[] = {
229 { FCOE_FCF_STATE_UNKNOWN, "Unknown" },
230 { FCOE_FCF_STATE_DISCONNECTED, "Disconnected" },
231 { FCOE_FCF_STATE_CONNECTED, "Connected" },
232};
233fcoe_enum_name_search(fcf_state, fcf_state, fcf_state_names)
234#define FCOE_FCF_STATE_MAX_NAMELEN 50
235
236static ssize_t show_fcf_state(struct device *dev,
237 struct device_attribute *attr,
238 char *buf)
239{
240 struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
241 const char *name;
242 name = get_fcoe_fcf_state_name(fcf->state);
243 if (!name)
244 return -EINVAL;
245 return snprintf(buf, FCOE_FCF_STATE_MAX_NAMELEN, "%s\n", name);
246}
247static FCOE_DEVICE_ATTR(fcf, state, S_IRUGO, show_fcf_state, NULL);
248
249static struct {
250 enum fip_conn_type value;
251 char *name;
252} fip_conn_type_names[] = {
253 { FIP_CONN_TYPE_UNKNOWN, "Unknown" },
254 { FIP_CONN_TYPE_FABRIC, "Fabric" },
255 { FIP_CONN_TYPE_VN2VN, "VN2VN" },
256};
257fcoe_enum_name_search(ctlr_mode, fip_conn_type, fip_conn_type_names)
258#define FCOE_CTLR_MODE_MAX_NAMELEN 50
259
260static ssize_t show_ctlr_mode(struct device *dev,
261 struct device_attribute *attr,
262 char *buf)
263{
264 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
265 const char *name;
266
267 if (ctlr->f->get_fcoe_ctlr_mode)
268 ctlr->f->get_fcoe_ctlr_mode(ctlr);
269
270 name = get_fcoe_ctlr_mode_name(ctlr->mode);
271 if (!name)
272 return -EINVAL;
273 return snprintf(buf, FCOE_CTLR_MODE_MAX_NAMELEN,
274 "%s\n", name);
275}
276static FCOE_DEVICE_ATTR(ctlr, mode, S_IRUGO,
277 show_ctlr_mode, NULL);
278
279static ssize_t
280store_private_fcoe_ctlr_fcf_dev_loss_tmo(struct device *dev,
281 struct device_attribute *attr,
282 const char *buf, size_t count)
283{
284 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
285 struct fcoe_fcf_device *fcf;
286 unsigned long val;
287 int rc;
288
289 rc = fcoe_str_to_dev_loss(buf, &val);
290 if (rc)
291 return rc;
292
293 fcoe_ctlr_fcf_dev_loss_tmo(ctlr) = val;
294 mutex_lock(&ctlr->lock);
295 list_for_each_entry(fcf, &ctlr->fcfs, peers)
296 fcoe_fcf_set_dev_loss_tmo(fcf, val);
297 mutex_unlock(&ctlr->lock);
298 return count;
299}
300fcoe_ctlr_private_show_function(fcf_dev_loss_tmo, "%d\n", 20, );
301static FCOE_DEVICE_ATTR(ctlr, fcf_dev_loss_tmo, S_IRUGO | S_IWUSR,
302 show_fcoe_ctlr_device_fcf_dev_loss_tmo,
303 store_private_fcoe_ctlr_fcf_dev_loss_tmo);
304
305/* Link Error Status Block (LESB) */
306fcoe_ctlr_rd_attr(link_fail, "%u\n", 20);
307fcoe_ctlr_rd_attr(vlink_fail, "%u\n", 20);
308fcoe_ctlr_rd_attr(miss_fka, "%u\n", 20);
309fcoe_ctlr_rd_attr(symb_err, "%u\n", 20);
310fcoe_ctlr_rd_attr(err_block, "%u\n", 20);
311fcoe_ctlr_rd_attr(fcs_error, "%u\n", 20);
312
313fcoe_fcf_private_rd_attr_cast(fabric_name, "0x%llx\n", 20, unsigned long long);
314fcoe_fcf_private_rd_attr_cast(switch_name, "0x%llx\n", 20, unsigned long long);
315fcoe_fcf_private_rd_attr(priority, "%u\n", 20);
316fcoe_fcf_private_rd_attr(fc_map, "0x%x\n", 20);
317fcoe_fcf_private_rd_attr(vfid, "%u\n", 20);
318fcoe_fcf_private_rd_attr(mac, "%pM\n", 20);
319fcoe_fcf_private_rd_attr(fka_period, "%u\n", 20);
320fcoe_fcf_rd_attr(selected, "%u\n", 20);
321fcoe_fcf_rd_attr(vlan_id, "%u\n", 20);
322
323fcoe_fcf_private_show_function(dev_loss_tmo, "%d\n", 20, )
324static ssize_t
325store_fcoe_fcf_dev_loss_tmo(struct device *dev, struct device_attribute *attr,
326 const char *buf, size_t count)
327{
328 struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
329 unsigned long val;
330 int rc;
331
332 rc = fcoe_str_to_dev_loss(buf, &val);
333 if (rc)
334 return rc;
335
336 rc = fcoe_fcf_set_dev_loss_tmo(fcf, val);
337 if (rc)
338 return rc;
339 return count;
340}
341static FCOE_DEVICE_ATTR(fcf, dev_loss_tmo, S_IRUGO | S_IWUSR,
342 show_fcoe_fcf_device_dev_loss_tmo,
343 store_fcoe_fcf_dev_loss_tmo);
344
345static struct attribute *fcoe_ctlr_lesb_attrs[] = {
346 &device_attr_fcoe_ctlr_link_fail.attr,
347 &device_attr_fcoe_ctlr_vlink_fail.attr,
348 &device_attr_fcoe_ctlr_miss_fka.attr,
349 &device_attr_fcoe_ctlr_symb_err.attr,
350 &device_attr_fcoe_ctlr_err_block.attr,
351 &device_attr_fcoe_ctlr_fcs_error.attr,
352 NULL,
353};
354
355static struct attribute_group fcoe_ctlr_lesb_attr_group = {
356 .name = "lesb",
357 .attrs = fcoe_ctlr_lesb_attrs,
358};
359
360static struct attribute *fcoe_ctlr_attrs[] = {
361 &device_attr_fcoe_ctlr_fcf_dev_loss_tmo.attr,
362 &device_attr_fcoe_ctlr_mode.attr,
363 NULL,
364};
365
366static struct attribute_group fcoe_ctlr_attr_group = {
367 .attrs = fcoe_ctlr_attrs,
368};
369
370static const struct attribute_group *fcoe_ctlr_attr_groups[] = {
371 &fcoe_ctlr_attr_group,
372 &fcoe_ctlr_lesb_attr_group,
373 NULL,
374};
375
376static struct attribute *fcoe_fcf_attrs[] = {
377 &device_attr_fcoe_fcf_fabric_name.attr,
378 &device_attr_fcoe_fcf_switch_name.attr,
379 &device_attr_fcoe_fcf_dev_loss_tmo.attr,
380 &device_attr_fcoe_fcf_fc_map.attr,
381 &device_attr_fcoe_fcf_vfid.attr,
382 &device_attr_fcoe_fcf_mac.attr,
383 &device_attr_fcoe_fcf_priority.attr,
384 &device_attr_fcoe_fcf_fka_period.attr,
385 &device_attr_fcoe_fcf_state.attr,
386 &device_attr_fcoe_fcf_selected.attr,
387 &device_attr_fcoe_fcf_vlan_id.attr,
388 NULL
389};
390
391static struct attribute_group fcoe_fcf_attr_group = {
392 .attrs = fcoe_fcf_attrs,
393};
394
395static const struct attribute_group *fcoe_fcf_attr_groups[] = {
396 &fcoe_fcf_attr_group,
397 NULL,
398};
399
400struct bus_type fcoe_bus_type;
401
402static int fcoe_bus_match(struct device *dev,
403 struct device_driver *drv)
404{
405 if (dev->bus == &fcoe_bus_type)
406 return 1;
407 return 0;
408}
409
410/**
411 * fcoe_ctlr_device_release() - Release the FIP ctlr memory
412 * @dev: Pointer to the FIP ctlr's embedded device
413 *
414 * Called when the last FIP ctlr reference is released.
415 */
416static void fcoe_ctlr_device_release(struct device *dev)
417{
418 struct fcoe_ctlr_device *ctlr = dev_to_ctlr(dev);
419 kfree(ctlr);
420}
421
422/**
423 * fcoe_fcf_device_release() - Release the FIP fcf memory
424 * @dev: Pointer to the fcf's embedded device
425 *
426 * Called when the last FIP fcf reference is released.
427 */
428static void fcoe_fcf_device_release(struct device *dev)
429{
430 struct fcoe_fcf_device *fcf = dev_to_fcf(dev);
431 kfree(fcf);
432}
433
434struct device_type fcoe_ctlr_device_type = {
435 .name = "fcoe_ctlr",
436 .groups = fcoe_ctlr_attr_groups,
437 .release = fcoe_ctlr_device_release,
438};
439
440struct device_type fcoe_fcf_device_type = {
441 .name = "fcoe_fcf",
442 .groups = fcoe_fcf_attr_groups,
443 .release = fcoe_fcf_device_release,
444};
445
446struct bus_type fcoe_bus_type = {
447 .name = "fcoe",
448 .match = &fcoe_bus_match,
449};
450
451/**
452 * fcoe_ctlr_device_flush_work() - Flush a FIP ctlr's workqueue
453 * @ctlr: Pointer to the FIP ctlr whose workqueue is to be flushed
454 */
455void fcoe_ctlr_device_flush_work(struct fcoe_ctlr_device *ctlr)
456{
457 if (!fcoe_ctlr_work_q(ctlr)) {
458 printk(KERN_ERR
459 "ERROR: FIP Ctlr '%d' attempted to flush work, "
460 "when no workqueue created.\n", ctlr->id);
461 dump_stack();
462 return;
463 }
464
465 flush_workqueue(fcoe_ctlr_work_q(ctlr));
466}
467
468/**
469 * fcoe_ctlr_device_queue_work() - Schedule work for a FIP ctlr's workqueue
470 * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
471 * @work: Work to queue for execution
472 *
473 * Return value:
474 * 1 on success / 0 already queued / < 0 for error
475 */
476int fcoe_ctlr_device_queue_work(struct fcoe_ctlr_device *ctlr,
477 struct work_struct *work)
478{
479 if (unlikely(!fcoe_ctlr_work_q(ctlr))) {
480 printk(KERN_ERR
481 "ERROR: FIP Ctlr '%d' attempted to queue work, "
482 "when no workqueue created.\n", ctlr->id);
483 dump_stack();
484
485 return -EINVAL;
486 }
487
488 return queue_work(fcoe_ctlr_work_q(ctlr), work);
489}
490
491/**
492 * fcoe_ctlr_device_flush_devloss() - Flush a FIP ctlr's devloss workqueue
493 * @ctlr: Pointer to FIP ctlr whose workqueue is to be flushed
494 */
495void fcoe_ctlr_device_flush_devloss(struct fcoe_ctlr_device *ctlr)
496{
497 if (!fcoe_ctlr_devloss_work_q(ctlr)) {
498 printk(KERN_ERR
499 "ERROR: FIP Ctlr '%d' attempted to flush work, "
500 "when no workqueue created.\n", ctlr->id);
501 dump_stack();
502 return;
503 }
504
505 flush_workqueue(fcoe_ctlr_devloss_work_q(ctlr));
506}
507
508/**
509 * fcoe_ctlr_device_queue_devloss_work() - Schedule work for a FIP ctlr's devloss workqueue
510 * @ctlr: Pointer to the FIP ctlr who owns the devloss workqueue
511 * @work: Work to queue for execution
512 * @delay: jiffies to delay the work queuing
513 *
514 * Return value:
515 * 1 on success / 0 already queued / < 0 for error
516 */
517int fcoe_ctlr_device_queue_devloss_work(struct fcoe_ctlr_device *ctlr,
518 struct delayed_work *work,
519 unsigned long delay)
520{
521 if (unlikely(!fcoe_ctlr_devloss_work_q(ctlr))) {
522 printk(KERN_ERR
523 "ERROR: FIP Ctlr '%d' attempted to queue work, "
524 "when no workqueue created.\n", ctlr->id);
525 dump_stack();
526
527 return -EINVAL;
528 }
529
530 return queue_delayed_work(fcoe_ctlr_devloss_work_q(ctlr), work, delay);
531}
532
533static int fcoe_fcf_device_match(struct fcoe_fcf_device *new,
534 struct fcoe_fcf_device *old)
535{
536 if (new->switch_name == old->switch_name &&
537 new->fabric_name == old->fabric_name &&
538 new->fc_map == old->fc_map &&
539 compare_ether_addr(new->mac, old->mac) == 0)
540 return 1;
541 return 0;
542}
543
544/**
545 * fcoe_ctlr_device_add() - Add a FIP ctlr to sysfs
546 * @parent: The parent device to which the fcoe_ctlr instance
547 * should be attached
548 * @f: The LLD's FCoE sysfs function template pointer
549 * @priv_size: Size to be allocated with the fcoe_ctlr_device for the LLD
550 *
551 * This routine allocates a FIP ctlr object with some additional memory
552 * for the LLD. The FIP ctlr is initialized, added to sysfs and then
553 * attributes are added to it.
554 */
555struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
556 struct fcoe_sysfs_function_template *f,
557 int priv_size)
558{
559 struct fcoe_ctlr_device *ctlr;
560 int error = 0;
561
562 ctlr = kzalloc(sizeof(struct fcoe_ctlr_device) + priv_size,
563 GFP_KERNEL);
564 if (!ctlr)
565 goto out;
566
567 ctlr->id = atomic_inc_return(&ctlr_num) - 1;
568 ctlr->f = f;
569 INIT_LIST_HEAD(&ctlr->fcfs);
570 mutex_init(&ctlr->lock);
571 ctlr->dev.parent = parent;
572 ctlr->dev.bus = &fcoe_bus_type;
573 ctlr->dev.type = &fcoe_ctlr_device_type;
574
575 ctlr->fcf_dev_loss_tmo = fcoe_fcf_dev_loss_tmo;
576
577 snprintf(ctlr->work_q_name, sizeof(ctlr->work_q_name),
578 "ctlr_wq_%d", ctlr->id);
579 ctlr->work_q = create_singlethread_workqueue(
580 ctlr->work_q_name);
581 if (!ctlr->work_q)
582 goto out_del;
583
584 snprintf(ctlr->devloss_work_q_name,
585 sizeof(ctlr->devloss_work_q_name),
586 "ctlr_dl_wq_%d", ctlr->id);
587 ctlr->devloss_work_q = create_singlethread_workqueue(
588 ctlr->devloss_work_q_name);
589 if (!ctlr->devloss_work_q)
590 goto out_del_q;
591
592 dev_set_name(&ctlr->dev, "ctlr_%d", ctlr->id);
593 error = device_register(&ctlr->dev);
594 if (error)
595 goto out_del_q2;
596
597 return ctlr;
598
599out_del_q2:
600 destroy_workqueue(ctlr->devloss_work_q);
601 ctlr->devloss_work_q = NULL;
602out_del_q:
603 destroy_workqueue(ctlr->work_q);
604 ctlr->work_q = NULL;
605out_del:
606 kfree(ctlr);
607out:
608 return NULL;
609}
610EXPORT_SYMBOL_GPL(fcoe_ctlr_device_add);
611
612/**
613 * fcoe_ctlr_device_delete() - Delete a FIP ctlr and its subtree from sysfs
614 * @ctlr: A pointer to the ctlr to be deleted
615 *
616 * Deletes a FIP ctlr and any fcfs attached
617 * to it. Deleting fcfs will cause their childen
618 * to be deleted as well.
619 *
620 * The ctlr is detached from sysfs and it's resources
621 * are freed (work q), but the memory is not freed
622 * until its last reference is released.
623 *
624 * This routine expects no locks to be held before
625 * calling.
626 *
627 * TODO: Currently there are no callbacks to clean up LLD data
628 * for a fcoe_fcf_device. LLDs must keep this in mind as they need
629 * to clean up each of their LLD data for all fcoe_fcf_device before
630 * calling fcoe_ctlr_device_delete.
631 */
632void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *ctlr)
633{
634 struct fcoe_fcf_device *fcf, *next;
635 /* Remove any attached fcfs */
636 mutex_lock(&ctlr->lock);
637 list_for_each_entry_safe(fcf, next,
638 &ctlr->fcfs, peers) {
639 list_del(&fcf->peers);
640 fcf->state = FCOE_FCF_STATE_DELETED;
641 fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
642 }
643 mutex_unlock(&ctlr->lock);
644
645 fcoe_ctlr_device_flush_work(ctlr);
646
647 destroy_workqueue(ctlr->devloss_work_q);
648 ctlr->devloss_work_q = NULL;
649 destroy_workqueue(ctlr->work_q);
650 ctlr->work_q = NULL;
651
652 device_unregister(&ctlr->dev);
653}
654EXPORT_SYMBOL_GPL(fcoe_ctlr_device_delete);
655
656/**
657 * fcoe_fcf_device_final_delete() - Final delete routine
658 * @work: The FIP fcf's embedded work struct
659 *
660 * It is expected that the fcf has been removed from
661 * the FIP ctlr's list before calling this routine.
662 */
663static void fcoe_fcf_device_final_delete(struct work_struct *work)
664{
665 struct fcoe_fcf_device *fcf =
666 container_of(work, struct fcoe_fcf_device, delete_work);
667 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
668
669 /*
670 * Cancel any outstanding timers. These should really exist
671 * only when rmmod'ing the LLDD and we're asking for
672 * immediate termination of the rports
673 */
674 if (!cancel_delayed_work(&fcf->dev_loss_work))
675 fcoe_ctlr_device_flush_devloss(ctlr);
676
677 device_unregister(&fcf->dev);
678}
679
680/**
681 * fip_timeout_deleted_fcf() - Delete a fcf when the devloss timer fires
682 * @work: The FIP fcf's embedded work struct
683 *
684 * Removes the fcf from the FIP ctlr's list of fcfs and
685 * queues the final deletion.
686 */
687static void fip_timeout_deleted_fcf(struct work_struct *work)
688{
689 struct fcoe_fcf_device *fcf =
690 container_of(work, struct fcoe_fcf_device, dev_loss_work.work);
691 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
692
693 mutex_lock(&ctlr->lock);
694
695 /*
696 * If the fcf is deleted or reconnected before the timer
697 * fires the devloss queue will be flushed, but the state will
698 * either be CONNECTED or DELETED. If that is the case we
699 * cancel deleting the fcf.
700 */
701 if (fcf->state != FCOE_FCF_STATE_DISCONNECTED)
702 goto out;
703
704 dev_printk(KERN_ERR, &fcf->dev,
705 "FIP fcf connection time out: removing fcf\n");
706
707 list_del(&fcf->peers);
708 fcf->state = FCOE_FCF_STATE_DELETED;
709 fcoe_ctlr_device_queue_work(ctlr, &fcf->delete_work);
710
711out:
712 mutex_unlock(&ctlr->lock);
713}
714
715/**
716 * fcoe_fcf_device_delete() - Delete a FIP fcf
717 * @fcf: Pointer to the fcf which is to be deleted
718 *
719 * Queues the FIP fcf on the devloss workqueue
720 *
721 * Expects the ctlr_attrs mutex to be held for fcf
722 * state change.
723 */
724void fcoe_fcf_device_delete(struct fcoe_fcf_device *fcf)
725{
726 struct fcoe_ctlr_device *ctlr = fcoe_fcf_dev_to_ctlr_dev(fcf);
727 int timeout = fcf->dev_loss_tmo;
728
729 if (fcf->state != FCOE_FCF_STATE_CONNECTED)
730 return;
731
732 fcf->state = FCOE_FCF_STATE_DISCONNECTED;
733
734 /*
735 * FCF will only be re-connected by the LLD calling
736 * fcoe_fcf_device_add, and it should be setting up
737 * priv then.
738 */
739 fcf->priv = NULL;
740
741 fcoe_ctlr_device_queue_devloss_work(ctlr, &fcf->dev_loss_work,
742 timeout * HZ);
743}
744EXPORT_SYMBOL_GPL(fcoe_fcf_device_delete);
745
746/**
747 * fcoe_fcf_device_add() - Add a FCoE sysfs fcoe_fcf_device to the system
748 * @ctlr: The fcoe_ctlr_device that will be the fcoe_fcf_device parent
749 * @new_fcf: A temporary FCF used for lookups on the current list of fcfs
750 *
751 * Expects to be called with the ctlr->lock held
752 */
753struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *ctlr,
754 struct fcoe_fcf_device *new_fcf)
755{
756 struct fcoe_fcf_device *fcf;
757 int error = 0;
758
759 list_for_each_entry(fcf, &ctlr->fcfs, peers) {
760 if (fcoe_fcf_device_match(new_fcf, fcf)) {
761 if (fcf->state == FCOE_FCF_STATE_CONNECTED)
762 return fcf;
763
764 fcf->state = FCOE_FCF_STATE_CONNECTED;
765
766 if (!cancel_delayed_work(&fcf->dev_loss_work))
767 fcoe_ctlr_device_flush_devloss(ctlr);
768
769 return fcf;
770 }
771 }
772
773 fcf = kzalloc(sizeof(struct fcoe_fcf_device), GFP_ATOMIC);
774 if (unlikely(!fcf))
775 goto out;
776
777 INIT_WORK(&fcf->delete_work, fcoe_fcf_device_final_delete);
778 INIT_DELAYED_WORK(&fcf->dev_loss_work, fip_timeout_deleted_fcf);
779
780 fcf->dev.parent = &ctlr->dev;
781 fcf->dev.bus = &fcoe_bus_type;
782 fcf->dev.type = &fcoe_fcf_device_type;
783 fcf->id = atomic_inc_return(&fcf_num) - 1;
784 fcf->state = FCOE_FCF_STATE_UNKNOWN;
785
786 fcf->dev_loss_tmo = ctlr->fcf_dev_loss_tmo;
787
788 dev_set_name(&fcf->dev, "fcf_%d", fcf->id);
789
790 fcf->fabric_name = new_fcf->fabric_name;
791 fcf->switch_name = new_fcf->switch_name;
792 fcf->fc_map = new_fcf->fc_map;
793 fcf->vfid = new_fcf->vfid;
794 memcpy(fcf->mac, new_fcf->mac, ETH_ALEN);
795 fcf->priority = new_fcf->priority;
796 fcf->fka_period = new_fcf->fka_period;
797 fcf->selected = new_fcf->selected;
798
799 error = device_register(&fcf->dev);
800 if (error)
801 goto out_del;
802
803 fcf->state = FCOE_FCF_STATE_CONNECTED;
804 list_add_tail(&fcf->peers, &ctlr->fcfs);
805
806 return fcf;
807
808out_del:
809 kfree(fcf);
810out:
811 return NULL;
812}
813EXPORT_SYMBOL_GPL(fcoe_fcf_device_add);
814
815int __init fcoe_sysfs_setup(void)
816{
817 int error;
818
819 atomic_set(&ctlr_num, 0);
820 atomic_set(&fcf_num, 0);
821
822 error = bus_register(&fcoe_bus_type);
823 if (error)
824 return error;
825
826 return 0;
827}
828
829void __exit fcoe_sysfs_teardown(void)
830{
831 bus_unregister(&fcoe_bus_type);
832}
diff --git a/drivers/scsi/fcoe/fcoe_transport.c b/drivers/scsi/fcoe/fcoe_transport.c
index 710e149d41b6..b46f43dced78 100644
--- a/drivers/scsi/fcoe/fcoe_transport.c
+++ b/drivers/scsi/fcoe/fcoe_transport.c
@@ -815,9 +815,17 @@ out_nodev:
815 */ 815 */
816static int __init libfcoe_init(void) 816static int __init libfcoe_init(void)
817{ 817{
818 fcoe_transport_init(); 818 int rc = 0;
819 819
820 return 0; 820 rc = fcoe_transport_init();
821 if (rc)
822 return rc;
823
824 rc = fcoe_sysfs_setup();
825 if (rc)
826 fcoe_transport_exit();
827
828 return rc;
821} 829}
822module_init(libfcoe_init); 830module_init(libfcoe_init);
823 831
@@ -826,6 +834,7 @@ module_init(libfcoe_init);
826 */ 834 */
827static void __exit libfcoe_exit(void) 835static void __exit libfcoe_exit(void)
828{ 836{
837 fcoe_sysfs_teardown();
829 fcoe_transport_exit(); 838 fcoe_transport_exit();
830} 839}
831module_exit(libfcoe_exit); 840module_exit(libfcoe_exit);
diff --git a/drivers/scsi/mpt2sas/mpt2sas_base.c b/drivers/scsi/mpt2sas/mpt2sas_base.c
index 6102ef2cb2d8..9d46fcbe7755 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_base.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_base.c
@@ -1792,7 +1792,7 @@ static inline void _base_writeq(__u64 b, volatile void __iomem *addr,
1792static inline u8 1792static inline u8
1793_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc) 1793_base_get_msix_index(struct MPT2SAS_ADAPTER *ioc)
1794{ 1794{
1795 return ioc->cpu_msix_table[smp_processor_id()]; 1795 return ioc->cpu_msix_table[raw_smp_processor_id()];
1796} 1796}
1797 1797
1798/** 1798/**
diff --git a/drivers/scsi/qla2xxx/Kconfig b/drivers/scsi/qla2xxx/Kconfig
index 6208d562890d..317a7fdc3b82 100644
--- a/drivers/scsi/qla2xxx/Kconfig
+++ b/drivers/scsi/qla2xxx/Kconfig
@@ -25,3 +25,12 @@ config SCSI_QLA_FC
25 Firmware images can be retrieved from: 25 Firmware images can be retrieved from:
26 26
27 ftp://ftp.qlogic.com/outgoing/linux/firmware/ 27 ftp://ftp.qlogic.com/outgoing/linux/firmware/
28
29config TCM_QLA2XXX
30 tristate "TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs"
31 depends on SCSI_QLA_FC && TARGET_CORE
32 select LIBFC
33 select BTREE
34 default n
35 ---help---
36 Say Y here to enable the TCM_QLA2XXX fabric module for Qlogic 2xxx series target mode HBAs
diff --git a/drivers/scsi/qla2xxx/Makefile b/drivers/scsi/qla2xxx/Makefile
index 5df782f4a097..dce7d788cdc9 100644
--- a/drivers/scsi/qla2xxx/Makefile
+++ b/drivers/scsi/qla2xxx/Makefile
@@ -1,5 +1,6 @@
1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \ 1qla2xxx-y := qla_os.o qla_init.o qla_mbx.o qla_iocb.o qla_isr.o qla_gs.o \
2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \ 2 qla_dbg.o qla_sup.o qla_attr.o qla_mid.o qla_dfs.o qla_bsg.o \
3 qla_nx.o 3 qla_nx.o qla_target.o
4 4
5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o 5obj-$(CONFIG_SCSI_QLA_FC) += qla2xxx.o
6obj-$(CONFIG_TCM_QLA2XXX) += tcm_qla2xxx.o
diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
index 5926f5a87ea8..5ab953029f8d 100644
--- a/drivers/scsi/qla2xxx/qla_attr.c
+++ b/drivers/scsi/qla2xxx/qla_attr.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9#include <linux/kthread.h> 10#include <linux/kthread.h>
10#include <linux/vmalloc.h> 11#include <linux/vmalloc.h>
@@ -576,6 +577,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
576 scsi_block_requests(vha->host); 577 scsi_block_requests(vha->host);
577 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 578 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
578 if (IS_QLA82XX(ha)) { 579 if (IS_QLA82XX(ha)) {
580 ha->flags.isp82xx_no_md_cap = 1;
579 qla82xx_idc_lock(ha); 581 qla82xx_idc_lock(ha);
580 qla82xx_set_reset_owner(vha); 582 qla82xx_set_reset_owner(vha);
581 qla82xx_idc_unlock(ha); 583 qla82xx_idc_unlock(ha);
@@ -585,7 +587,7 @@ qla2x00_sysfs_write_reset(struct file *filp, struct kobject *kobj,
585 scsi_unblock_requests(vha->host); 587 scsi_unblock_requests(vha->host);
586 break; 588 break;
587 case 0x2025d: 589 case 0x2025d:
588 if (!IS_QLA81XX(ha)) 590 if (!IS_QLA81XX(ha) || !IS_QLA8031(ha))
589 return -EPERM; 591 return -EPERM;
590 592
591 ql_log(ql_log_info, vha, 0x706f, 593 ql_log(ql_log_info, vha, 0x706f,
@@ -1105,9 +1107,8 @@ qla2x00_total_isp_aborts_show(struct device *dev,
1105 struct device_attribute *attr, char *buf) 1107 struct device_attribute *attr, char *buf)
1106{ 1108{
1107 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1109 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1108 struct qla_hw_data *ha = vha->hw;
1109 return snprintf(buf, PAGE_SIZE, "%d\n", 1110 return snprintf(buf, PAGE_SIZE, "%d\n",
1110 ha->qla_stats.total_isp_aborts); 1111 vha->qla_stats.total_isp_aborts);
1111} 1112}
1112 1113
1113static ssize_t 1114static ssize_t
@@ -1154,7 +1155,7 @@ qla2x00_phy_version_show(struct device *dev, struct device_attribute *attr,
1154 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev)); 1155 scsi_qla_host_t *vha = shost_priv(class_to_shost(dev));
1155 struct qla_hw_data *ha = vha->hw; 1156 struct qla_hw_data *ha = vha->hw;
1156 1157
1157 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1158 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
1158 return snprintf(buf, PAGE_SIZE, "\n"); 1159 return snprintf(buf, PAGE_SIZE, "\n");
1159 1160
1160 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n", 1161 return snprintf(buf, PAGE_SIZE, "%d.%02d.%02d\n",
@@ -1537,7 +1538,7 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1537 dma_addr_t stats_dma; 1538 dma_addr_t stats_dma;
1538 struct fc_host_statistics *pfc_host_stat; 1539 struct fc_host_statistics *pfc_host_stat;
1539 1540
1540 pfc_host_stat = &ha->fc_host_stat; 1541 pfc_host_stat = &vha->fc_host_stat;
1541 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics)); 1542 memset(pfc_host_stat, -1, sizeof(struct fc_host_statistics));
1542 1543
1543 if (test_bit(UNLOADING, &vha->dpc_flags)) 1544 if (test_bit(UNLOADING, &vha->dpc_flags))
@@ -1580,8 +1581,8 @@ qla2x00_get_fc_host_stats(struct Scsi_Host *shost)
1580 pfc_host_stat->dumped_frames = stats->dumped_frames; 1581 pfc_host_stat->dumped_frames = stats->dumped_frames;
1581 pfc_host_stat->nos_count = stats->nos_rcvd; 1582 pfc_host_stat->nos_count = stats->nos_rcvd;
1582 } 1583 }
1583 pfc_host_stat->fcp_input_megabytes = ha->qla_stats.input_bytes >> 20; 1584 pfc_host_stat->fcp_input_megabytes = vha->qla_stats.input_bytes >> 20;
1584 pfc_host_stat->fcp_output_megabytes = ha->qla_stats.output_bytes >> 20; 1585 pfc_host_stat->fcp_output_megabytes = vha->qla_stats.output_bytes >> 20;
1585 1586
1586done_free: 1587done_free:
1587 dma_pool_free(ha->s_dma_pool, stats, stats_dma); 1588 dma_pool_free(ha->s_dma_pool, stats, stats_dma);
@@ -1737,6 +1738,7 @@ qla24xx_vport_create(struct fc_vport *fc_vport, bool disable)
1737 fc_host_supported_speeds(vha->host) = 1738 fc_host_supported_speeds(vha->host) =
1738 fc_host_supported_speeds(base_vha->host); 1739 fc_host_supported_speeds(base_vha->host);
1739 1740
1741 qlt_vport_create(vha, ha);
1740 qla24xx_vport_disable(fc_vport, disable); 1742 qla24xx_vport_disable(fc_vport, disable);
1741 1743
1742 if (ha->flags.cpu_affinity_enabled) { 1744 if (ha->flags.cpu_affinity_enabled) {
@@ -1951,12 +1953,16 @@ qla2x00_init_host_attr(scsi_qla_host_t *vha)
1951 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count; 1953 fc_host_dev_loss_tmo(vha->host) = ha->port_down_retry_count;
1952 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name); 1954 fc_host_node_name(vha->host) = wwn_to_u64(vha->node_name);
1953 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name); 1955 fc_host_port_name(vha->host) = wwn_to_u64(vha->port_name);
1954 fc_host_supported_classes(vha->host) = FC_COS_CLASS3; 1956 fc_host_supported_classes(vha->host) = ha->tgt.enable_class_2 ?
1957 (FC_COS_CLASS2|FC_COS_CLASS3) : FC_COS_CLASS3;
1955 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports; 1958 fc_host_max_npiv_vports(vha->host) = ha->max_npiv_vports;
1956 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count; 1959 fc_host_npiv_vports_inuse(vha->host) = ha->cur_vport_count;
1957 1960
1958 if (IS_CNA_CAPABLE(ha)) 1961 if (IS_CNA_CAPABLE(ha))
1959 speed = FC_PORTSPEED_10GBIT; 1962 speed = FC_PORTSPEED_10GBIT;
1963 else if (IS_QLA2031(ha))
1964 speed = FC_PORTSPEED_16GBIT | FC_PORTSPEED_8GBIT |
1965 FC_PORTSPEED_4GBIT;
1960 else if (IS_QLA25XX(ha)) 1966 else if (IS_QLA25XX(ha))
1961 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT | 1967 speed = FC_PORTSPEED_8GBIT | FC_PORTSPEED_4GBIT |
1962 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT; 1968 FC_PORTSPEED_2GBIT | FC_PORTSPEED_1GBIT;
diff --git a/drivers/scsi/qla2xxx/qla_bsg.c b/drivers/scsi/qla2xxx/qla_bsg.c
index bc3cc6d91117..c68883806c54 100644
--- a/drivers/scsi/qla2xxx/qla_bsg.c
+++ b/drivers/scsi/qla2xxx/qla_bsg.c
@@ -297,7 +297,6 @@ qla2x00_process_els(struct fc_bsg_job *bsg_job)
297 297
298 /* Initialize all required fields of fcport */ 298 /* Initialize all required fields of fcport */
299 fcport->vha = vha; 299 fcport->vha = vha;
300 fcport->vp_idx = vha->vp_idx;
301 fcport->d_id.b.al_pa = 300 fcport->d_id.b.al_pa =
302 bsg_job->request->rqst_data.h_els.port_id[0]; 301 bsg_job->request->rqst_data.h_els.port_id[0];
303 fcport->d_id.b.area = 302 fcport->d_id.b.area =
@@ -483,7 +482,6 @@ qla2x00_process_ct(struct fc_bsg_job *bsg_job)
483 482
484 /* Initialize all required fields of fcport */ 483 /* Initialize all required fields of fcport */
485 fcport->vha = vha; 484 fcport->vha = vha;
486 fcport->vp_idx = vha->vp_idx;
487 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0]; 485 fcport->d_id.b.al_pa = bsg_job->request->rqst_data.h_ct.port_id[0];
488 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1]; 486 fcport->d_id.b.area = bsg_job->request->rqst_data.h_ct.port_id[1];
489 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2]; 487 fcport->d_id.b.domain = bsg_job->request->rqst_data.h_ct.port_id[2];
@@ -544,7 +542,7 @@ qla81xx_set_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
544 int rval = 0; 542 int rval = 0;
545 struct qla_hw_data *ha = vha->hw; 543 struct qla_hw_data *ha = vha->hw;
546 544
547 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 545 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
548 goto done_set_internal; 546 goto done_set_internal;
549 547
550 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1); 548 new_config[0] = config[0] | (ENABLE_INTERNAL_LOOPBACK << 1);
@@ -586,7 +584,7 @@ qla81xx_reset_internal_loopback(scsi_qla_host_t *vha, uint16_t *config,
586 uint16_t new_config[4]; 584 uint16_t new_config[4];
587 struct qla_hw_data *ha = vha->hw; 585 struct qla_hw_data *ha = vha->hw;
588 586
589 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 587 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
590 goto done_reset_internal; 588 goto done_reset_internal;
591 589
592 memset(new_config, 0 , sizeof(new_config)); 590 memset(new_config, 0 , sizeof(new_config));
@@ -710,8 +708,7 @@ qla2x00_process_loopback(struct fc_bsg_job *bsg_job)
710 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1]; 708 elreq.options = bsg_job->request->rqst_data.h_vendor.vendor_cmd[1];
711 709
712 if ((ha->current_topology == ISP_CFG_F || 710 if ((ha->current_topology == ISP_CFG_F ||
713 (atomic_read(&vha->loop_state) == LOOP_DOWN) || 711 ((IS_QLA81XX(ha) || IS_QLA8031(ha)) &&
714 ((IS_QLA81XX(ha) || IS_QLA83XX(ha)) &&
715 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE 712 le32_to_cpu(*(uint32_t *)req_data) == ELS_OPCODE_BYTE
716 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) && 713 && req_data_len == MAX_ELS_FRAME_PAYLOAD)) &&
717 elreq.options == EXTERNAL_LOOPBACK) { 714 elreq.options == EXTERNAL_LOOPBACK) {
@@ -1402,6 +1399,9 @@ qla2x00_update_optrom(struct fc_bsg_job *bsg_job)
1402 if (rval) 1399 if (rval)
1403 return rval; 1400 return rval;
1404 1401
1402 /* Set the isp82xx_no_md_cap not to capture minidump */
1403 ha->flags.isp82xx_no_md_cap = 1;
1404
1405 sg_copy_to_buffer(bsg_job->request_payload.sg_list, 1405 sg_copy_to_buffer(bsg_job->request_payload.sg_list,
1406 bsg_job->request_payload.sg_cnt, ha->optrom_buffer, 1406 bsg_job->request_payload.sg_cnt, ha->optrom_buffer,
1407 ha->optrom_region_size); 1407 ha->optrom_region_size);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.c b/drivers/scsi/qla2xxx/qla_dbg.c
index 62324a1d5573..fdee5611f3e2 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.c
+++ b/drivers/scsi/qla2xxx/qla_dbg.c
@@ -11,27 +11,31 @@
11 * ---------------------------------------------------------------------- 11 * ----------------------------------------------------------------------
12 * | Level | Last Value Used | Holes | 12 * | Level | Last Value Used | Holes |
13 * ---------------------------------------------------------------------- 13 * ----------------------------------------------------------------------
14 * | Module Init and Probe | 0x0120 | 0x4b,0xba,0xfa | 14 * | Module Init and Probe | 0x0122 | 0x4b,0xba,0xfa |
15 * | Mailbox commands | 0x113e | 0x112c-0x112e | 15 * | Mailbox commands | 0x1140 | 0x111a-0x111b |
16 * | | | 0x112c-0x112e |
16 * | | | 0x113a | 17 * | | | 0x113a |
17 * | Device Discovery | 0x2086 | 0x2020-0x2022 | 18 * | Device Discovery | 0x2086 | 0x2020-0x2022 |
18 * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 | 19 * | Queue Command and IO tracing | 0x3030 | 0x3006,0x3008 |
19 * | | | 0x302d-0x302e | 20 * | | | 0x302d-0x302e |
20 * | DPC Thread | 0x401c | | 21 * | DPC Thread | 0x401c | 0x4002,0x4013 |
21 * | Async Events | 0x505d | 0x502b-0x502f | 22 * | Async Events | 0x505f | 0x502b-0x502f |
22 * | | | 0x5047,0x5052 | 23 * | | | 0x5047,0x5052 |
23 * | Timer Routines | 0x6011 | 0x600e-0x600f | 24 * | Timer Routines | 0x6011 | |
24 * | User Space Interactions | 0x709f | 0x7018,0x702e, | 25 * | User Space Interactions | 0x709f | 0x7018,0x702e, |
25 * | | | 0x7039,0x7045, | 26 * | | | 0x7039,0x7045, |
26 * | | | 0x7073-0x7075, | 27 * | | | 0x7073-0x7075, |
27 * | | | 0x708c | 28 * | | | 0x708c |
28 * | Task Management | 0x803c | 0x8025-0x8026 | 29 * | Task Management | 0x803c | 0x8025-0x8026 |
29 * | | | 0x800b,0x8039 | 30 * | | | 0x800b,0x8039 |
30 * | AER/EEH | 0x900f | | 31 * | AER/EEH | 0x9011 | |
31 * | Virtual Port | 0xa007 | | 32 * | Virtual Port | 0xa007 | |
32 * | ISP82XX Specific | 0xb054 | 0xb053 | 33 * | ISP82XX Specific | 0xb054 | 0xb024 |
33 * | MultiQ | 0xc00c | | 34 * | MultiQ | 0xc00c | |
34 * | Misc | 0xd010 | | 35 * | Misc | 0xd010 | |
36 * | Target Mode | 0xe06f | |
37 * | Target Mode Management | 0xf071 | |
38 * | Target Mode Task Management | 0x1000b | |
35 * ---------------------------------------------------------------------- 39 * ----------------------------------------------------------------------
36 */ 40 */
37 41
@@ -379,6 +383,54 @@ qla25xx_copy_fce(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
379} 383}
380 384
381static inline void * 385static inline void *
386qla2xxx_copy_atioqueues(struct qla_hw_data *ha, void *ptr,
387 uint32_t **last_chain)
388{
389 struct qla2xxx_mqueue_chain *q;
390 struct qla2xxx_mqueue_header *qh;
391 uint32_t num_queues;
392 int que;
393 struct {
394 int length;
395 void *ring;
396 } aq, *aqp;
397
398 if (!ha->tgt.atio_q_length)
399 return ptr;
400
401 num_queues = 1;
402 aqp = &aq;
403 aqp->length = ha->tgt.atio_q_length;
404 aqp->ring = ha->tgt.atio_ring;
405
406 for (que = 0; que < num_queues; que++) {
407 /* aqp = ha->atio_q_map[que]; */
408 q = ptr;
409 *last_chain = &q->type;
410 q->type = __constant_htonl(DUMP_CHAIN_QUEUE);
411 q->chain_size = htonl(
412 sizeof(struct qla2xxx_mqueue_chain) +
413 sizeof(struct qla2xxx_mqueue_header) +
414 (aqp->length * sizeof(request_t)));
415 ptr += sizeof(struct qla2xxx_mqueue_chain);
416
417 /* Add header. */
418 qh = ptr;
419 qh->queue = __constant_htonl(TYPE_ATIO_QUEUE);
420 qh->number = htonl(que);
421 qh->size = htonl(aqp->length * sizeof(request_t));
422 ptr += sizeof(struct qla2xxx_mqueue_header);
423
424 /* Add data. */
425 memcpy(ptr, aqp->ring, aqp->length * sizeof(request_t));
426
427 ptr += aqp->length * sizeof(request_t);
428 }
429
430 return ptr;
431}
432
433static inline void *
382qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain) 434qla25xx_copy_mqueues(struct qla_hw_data *ha, void *ptr, uint32_t **last_chain)
383{ 435{
384 struct qla2xxx_mqueue_chain *q; 436 struct qla2xxx_mqueue_chain *q;
@@ -873,6 +925,8 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
873 struct qla24xx_fw_dump *fw; 925 struct qla24xx_fw_dump *fw;
874 uint32_t ext_mem_cnt; 926 uint32_t ext_mem_cnt;
875 void *nxt; 927 void *nxt;
928 void *nxt_chain;
929 uint32_t *last_chain = NULL;
876 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 930 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
877 931
878 if (IS_QLA82XX(ha)) 932 if (IS_QLA82XX(ha))
@@ -1091,6 +1145,16 @@ qla24xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1091 1145
1092 qla24xx_copy_eft(ha, nxt); 1146 qla24xx_copy_eft(ha, nxt);
1093 1147
1148 nxt_chain = (void *)ha->fw_dump + ha->chain_offset;
1149 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1150 if (last_chain) {
1151 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1152 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
1153 }
1154
1155 /* Adjust valid length. */
1156 ha->fw_dump_len = (nxt_chain - (void *)ha->fw_dump);
1157
1094qla24xx_fw_dump_failed_0: 1158qla24xx_fw_dump_failed_0:
1095 qla2xxx_dump_post_process(base_vha, rval); 1159 qla2xxx_dump_post_process(base_vha, rval);
1096 1160
@@ -1399,6 +1463,7 @@ qla25xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1399 /* Chain entries -- started with MQ. */ 1463 /* Chain entries -- started with MQ. */
1400 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1464 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1401 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1465 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1466 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1402 if (last_chain) { 1467 if (last_chain) {
1403 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1468 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1404 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1469 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -1717,6 +1782,7 @@ qla81xx_fw_dump(scsi_qla_host_t *vha, int hardware_locked)
1717 /* Chain entries -- started with MQ. */ 1782 /* Chain entries -- started with MQ. */
1718 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 1783 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
1719 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 1784 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
1785 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
1720 if (last_chain) { 1786 if (last_chain) {
1721 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 1787 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
1722 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 1788 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
@@ -2218,6 +2284,7 @@ copy_queue:
2218 /* Chain entries -- started with MQ. */ 2284 /* Chain entries -- started with MQ. */
2219 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain); 2285 nxt_chain = qla25xx_copy_fce(ha, nxt_chain, &last_chain);
2220 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain); 2286 nxt_chain = qla25xx_copy_mqueues(ha, nxt_chain, &last_chain);
2287 nxt_chain = qla2xxx_copy_atioqueues(ha, nxt_chain, &last_chain);
2221 if (last_chain) { 2288 if (last_chain) {
2222 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT); 2289 ha->fw_dump->version |= __constant_htonl(DUMP_CHAIN_VARIANT);
2223 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST); 2290 *last_chain |= __constant_htonl(DUMP_CHAIN_LAST);
diff --git a/drivers/scsi/qla2xxx/qla_dbg.h b/drivers/scsi/qla2xxx/qla_dbg.h
index 2157bdf1569a..f278df8cce0f 100644
--- a/drivers/scsi/qla2xxx/qla_dbg.h
+++ b/drivers/scsi/qla2xxx/qla_dbg.h
@@ -244,6 +244,7 @@ struct qla2xxx_mqueue_header {
244 uint32_t queue; 244 uint32_t queue;
245#define TYPE_REQUEST_QUEUE 0x1 245#define TYPE_REQUEST_QUEUE 0x1
246#define TYPE_RESPONSE_QUEUE 0x2 246#define TYPE_RESPONSE_QUEUE 0x2
247#define TYPE_ATIO_QUEUE 0x3
247 uint32_t number; 248 uint32_t number;
248 uint32_t size; 249 uint32_t size;
249}; 250};
@@ -339,3 +340,11 @@ ql_log_pci(uint32_t, struct pci_dev *pdev, int32_t, const char *fmt, ...);
339#define ql_dbg_misc 0x00010000 /* For dumping everything that is not 340#define ql_dbg_misc 0x00010000 /* For dumping everything that is not
340 * not covered by upper categories 341 * not covered by upper categories
341 */ 342 */
343#define ql_dbg_verbose 0x00008000 /* More verbosity for each level
344 * This is to be used with other levels where
345 * more verbosity is required. It might not
346 * be applicable to all the levels.
347 */
348#define ql_dbg_tgt 0x00004000 /* Target mode */
349#define ql_dbg_tgt_mgt 0x00002000 /* Target mode management */
350#define ql_dbg_tgt_tmr 0x00001000 /* Target mode task management */
diff --git a/drivers/scsi/qla2xxx/qla_def.h b/drivers/scsi/qla2xxx/qla_def.h
index a2443031dbe7..39007f53aec0 100644
--- a/drivers/scsi/qla2xxx/qla_def.h
+++ b/drivers/scsi/qla2xxx/qla_def.h
@@ -186,6 +186,7 @@
186#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/ 186#define RESPONSE_ENTRY_CNT_2100 64 /* Number of response entries.*/
187#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/ 187#define RESPONSE_ENTRY_CNT_2300 512 /* Number of response entries.*/
188#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/ 188#define RESPONSE_ENTRY_CNT_MQ 128 /* Number of response entries.*/
189#define ATIO_ENTRY_CNT_24XX 4096 /* Number of ATIO entries. */
189 190
190struct req_que; 191struct req_que;
191 192
@@ -1234,11 +1235,27 @@ typedef struct {
1234 * ISP queue - response queue entry definition. 1235 * ISP queue - response queue entry definition.
1235 */ 1236 */
1236typedef struct { 1237typedef struct {
1237 uint8_t data[60]; 1238 uint8_t entry_type; /* Entry type. */
1239 uint8_t entry_count; /* Entry count. */
1240 uint8_t sys_define; /* System defined. */
1241 uint8_t entry_status; /* Entry Status. */
1242 uint32_t handle; /* System defined handle */
1243 uint8_t data[52];
1238 uint32_t signature; 1244 uint32_t signature;
1239#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */ 1245#define RESPONSE_PROCESSED 0xDEADDEAD /* Signature */
1240} response_t; 1246} response_t;
1241 1247
1248/*
1249 * ISP queue - ATIO queue entry definition.
1250 */
1251struct atio {
1252 uint8_t entry_type; /* Entry type. */
1253 uint8_t entry_count; /* Entry count. */
1254 uint8_t data[58];
1255 uint32_t signature;
1256#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
1257};
1258
1242typedef union { 1259typedef union {
1243 uint16_t extended; 1260 uint16_t extended;
1244 struct { 1261 struct {
@@ -1719,11 +1736,13 @@ typedef struct fc_port {
1719 struct fc_rport *rport, *drport; 1736 struct fc_rport *rport, *drport;
1720 u32 supported_classes; 1737 u32 supported_classes;
1721 1738
1722 uint16_t vp_idx;
1723 uint8_t fc4_type; 1739 uint8_t fc4_type;
1724 uint8_t scan_state; 1740 uint8_t scan_state;
1725} fc_port_t; 1741} fc_port_t;
1726 1742
1743#define QLA_FCPORT_SCAN_NONE 0
1744#define QLA_FCPORT_SCAN_FOUND 1
1745
1727/* 1746/*
1728 * Fibre channel port/lun states. 1747 * Fibre channel port/lun states.
1729 */ 1748 */
@@ -1747,6 +1766,7 @@ static const char * const port_state_str[] = {
1747#define FCF_LOGIN_NEEDED BIT_1 1766#define FCF_LOGIN_NEEDED BIT_1
1748#define FCF_FCP2_DEVICE BIT_2 1767#define FCF_FCP2_DEVICE BIT_2
1749#define FCF_ASYNC_SENT BIT_3 1768#define FCF_ASYNC_SENT BIT_3
1769#define FCF_CONF_COMP_SUPPORTED BIT_4
1750 1770
1751/* No loop ID flag. */ 1771/* No loop ID flag. */
1752#define FC_NO_LOOP_ID 0x1000 1772#define FC_NO_LOOP_ID 0x1000
@@ -2419,6 +2439,40 @@ struct qlfc_fw {
2419 uint32_t len; 2439 uint32_t len;
2420}; 2440};
2421 2441
2442struct qlt_hw_data {
2443 /* Protected by hw lock */
2444 uint32_t enable_class_2:1;
2445 uint32_t enable_explicit_conf:1;
2446 uint32_t ini_mode_force_reverse:1;
2447 uint32_t node_name_set:1;
2448
2449 dma_addr_t atio_dma; /* Physical address. */
2450 struct atio *atio_ring; /* Base virtual address */
2451 struct atio *atio_ring_ptr; /* Current address. */
2452 uint16_t atio_ring_index; /* Current index. */
2453 uint16_t atio_q_length;
2454
2455 void *target_lport_ptr;
2456 struct qla_tgt_func_tmpl *tgt_ops;
2457 struct qla_tgt *qla_tgt;
2458 struct qla_tgt_cmd *cmds[MAX_OUTSTANDING_COMMANDS];
2459 uint16_t current_handle;
2460
2461 struct qla_tgt_vp_map *tgt_vp_map;
2462 struct mutex tgt_mutex;
2463 struct mutex tgt_host_action_mutex;
2464
2465 int saved_set;
2466 uint16_t saved_exchange_count;
2467 uint32_t saved_firmware_options_1;
2468 uint32_t saved_firmware_options_2;
2469 uint32_t saved_firmware_options_3;
2470 uint8_t saved_firmware_options[2];
2471 uint8_t saved_add_firmware_options[2];
2472
2473 uint8_t tgt_node_name[WWN_SIZE];
2474};
2475
2422/* 2476/*
2423 * Qlogic host adapter specific data structure. 2477 * Qlogic host adapter specific data structure.
2424*/ 2478*/
@@ -2460,7 +2514,9 @@ struct qla_hw_data {
2460 uint32_t thermal_supported:1; 2514 uint32_t thermal_supported:1;
2461 uint32_t isp82xx_reset_hdlr_active:1; 2515 uint32_t isp82xx_reset_hdlr_active:1;
2462 uint32_t isp82xx_reset_owner:1; 2516 uint32_t isp82xx_reset_owner:1;
2463 /* 28 bits */ 2517 uint32_t isp82xx_no_md_cap:1;
2518 uint32_t host_shutting_down:1;
2519 /* 30 bits */
2464 } flags; 2520 } flags;
2465 2521
2466 /* This spinlock is used to protect "io transactions", you must 2522 /* This spinlock is used to protect "io transactions", you must
@@ -2804,7 +2860,6 @@ struct qla_hw_data {
2804 /* ISP2322: red, green, amber. */ 2860 /* ISP2322: red, green, amber. */
2805 uint16_t zio_mode; 2861 uint16_t zio_mode;
2806 uint16_t zio_timer; 2862 uint16_t zio_timer;
2807 struct fc_host_statistics fc_host_stat;
2808 2863
2809 struct qla_msix_entry *msix_entries; 2864 struct qla_msix_entry *msix_entries;
2810 2865
@@ -2817,7 +2872,6 @@ struct qla_hw_data {
2817 int cur_vport_count; 2872 int cur_vport_count;
2818 2873
2819 struct qla_chip_state_84xx *cs84xx; 2874 struct qla_chip_state_84xx *cs84xx;
2820 struct qla_statistics qla_stats;
2821 struct isp_operations *isp_ops; 2875 struct isp_operations *isp_ops;
2822 struct workqueue_struct *wq; 2876 struct workqueue_struct *wq;
2823 struct qlfc_fw fw_buf; 2877 struct qlfc_fw fw_buf;
@@ -2863,6 +2917,8 @@ struct qla_hw_data {
2863 dma_addr_t md_tmplt_hdr_dma; 2917 dma_addr_t md_tmplt_hdr_dma;
2864 void *md_dump; 2918 void *md_dump;
2865 uint32_t md_dump_size; 2919 uint32_t md_dump_size;
2920
2921 struct qlt_hw_data tgt;
2866}; 2922};
2867 2923
2868/* 2924/*
@@ -2920,6 +2976,7 @@ typedef struct scsi_qla_host {
2920#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */ 2976#define FCOE_CTX_RESET_NEEDED 18 /* Initiate FCoE context reset */
2921#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */ 2977#define MPI_RESET_NEEDED 19 /* Initiate MPI FW reset */
2922#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */ 2978#define ISP_QUIESCE_NEEDED 20 /* Driver need some quiescence */
2979#define SCR_PENDING 21 /* SCR in target mode */
2923 2980
2924 uint32_t device_flags; 2981 uint32_t device_flags;
2925#define SWITCH_FOUND BIT_0 2982#define SWITCH_FOUND BIT_0
@@ -2979,10 +3036,21 @@ typedef struct scsi_qla_host {
2979 struct req_que *req; 3036 struct req_que *req;
2980 int fw_heartbeat_counter; 3037 int fw_heartbeat_counter;
2981 int seconds_since_last_heartbeat; 3038 int seconds_since_last_heartbeat;
3039 struct fc_host_statistics fc_host_stat;
3040 struct qla_statistics qla_stats;
2982 3041
2983 atomic_t vref_count; 3042 atomic_t vref_count;
2984} scsi_qla_host_t; 3043} scsi_qla_host_t;
2985 3044
3045#define SET_VP_IDX 1
3046#define SET_AL_PA 2
3047#define RESET_VP_IDX 3
3048#define RESET_AL_PA 4
3049struct qla_tgt_vp_map {
3050 uint8_t idx;
3051 scsi_qla_host_t *vha;
3052};
3053
2986/* 3054/*
2987 * Macros to help code, maintain, etc. 3055 * Macros to help code, maintain, etc.
2988 */ 3056 */
diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
index 9f065804bd12..9eacd2df111b 100644
--- a/drivers/scsi/qla2xxx/qla_gbl.h
+++ b/drivers/scsi/qla2xxx/qla_gbl.h
@@ -175,6 +175,7 @@ extern int qla2x00_vp_abort_isp(scsi_qla_host_t *);
175/* 175/*
176 * Global Function Prototypes in qla_iocb.c source file. 176 * Global Function Prototypes in qla_iocb.c source file.
177 */ 177 */
178
178extern uint16_t qla2x00_calc_iocbs_32(uint16_t); 179extern uint16_t qla2x00_calc_iocbs_32(uint16_t);
179extern uint16_t qla2x00_calc_iocbs_64(uint16_t); 180extern uint16_t qla2x00_calc_iocbs_64(uint16_t);
180extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t); 181extern void qla2x00_build_scsi_iocbs_32(srb_t *, cmd_entry_t *, uint16_t);
@@ -188,6 +189,8 @@ extern uint16_t qla24xx_calc_iocbs(scsi_qla_host_t *, uint16_t);
188extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t); 189extern void qla24xx_build_scsi_iocbs(srb_t *, struct cmd_type_7 *, uint16_t);
189extern int qla24xx_dif_start_scsi(srb_t *); 190extern int qla24xx_dif_start_scsi(srb_t *);
190 191
192extern void *qla2x00_alloc_iocbs(scsi_qla_host_t *, srb_t *);
193extern int qla2x00_issue_marker(scsi_qla_host_t *, int);
191 194
192/* 195/*
193 * Global Function Prototypes in qla_mbx.c source file. 196 * Global Function Prototypes in qla_mbx.c source file.
@@ -239,6 +242,9 @@ extern int
239qla2x00_init_firmware(scsi_qla_host_t *, uint16_t); 242qla2x00_init_firmware(scsi_qla_host_t *, uint16_t);
240 243
241extern int 244extern int
245qla2x00_get_node_name_list(scsi_qla_host_t *, void **, int *);
246
247extern int
242qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t); 248qla2x00_get_port_database(scsi_qla_host_t *, fc_port_t *, uint8_t);
243 249
244extern int 250extern int
@@ -383,6 +389,8 @@ extern int qla2x00_request_irqs(struct qla_hw_data *, struct rsp_que *);
383extern void qla2x00_free_irqs(scsi_qla_host_t *); 389extern void qla2x00_free_irqs(scsi_qla_host_t *);
384 390
385extern int qla2x00_get_data_rate(scsi_qla_host_t *); 391extern int qla2x00_get_data_rate(scsi_qla_host_t *);
392extern char *qla2x00_get_link_speed_str(struct qla_hw_data *);
393
386/* 394/*
387 * Global Function Prototypes in qla_sup.c source file. 395 * Global Function Prototypes in qla_sup.c source file.
388 */ 396 */
@@ -546,6 +554,7 @@ extern void qla2x00_sp_free(void *, void *);
546extern void qla2x00_sp_timeout(unsigned long); 554extern void qla2x00_sp_timeout(unsigned long);
547extern void qla2x00_bsg_job_done(void *, void *, int); 555extern void qla2x00_bsg_job_done(void *, void *, int);
548extern void qla2x00_bsg_sp_free(void *, void *); 556extern void qla2x00_bsg_sp_free(void *, void *);
557extern void qla2x00_start_iocbs(struct scsi_qla_host *, struct req_que *);
549 558
550/* Interrupt related */ 559/* Interrupt related */
551extern irqreturn_t qla82xx_intr_handler(int, void *); 560extern irqreturn_t qla82xx_intr_handler(int, void *);
diff --git a/drivers/scsi/qla2xxx/qla_gs.c b/drivers/scsi/qla2xxx/qla_gs.c
index 3128f80441f5..05260d25fe46 100644
--- a/drivers/scsi/qla2xxx/qla_gs.c
+++ b/drivers/scsi/qla2xxx/qla_gs.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *); 10static int qla2x00_sns_ga_nxt(scsi_qla_host_t *, fc_port_t *);
10static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *); 11static int qla2x00_sns_gid_pt(scsi_qla_host_t *, sw_info_t *);
@@ -556,7 +557,8 @@ qla2x00_rff_id(scsi_qla_host_t *vha)
556 ct_req->req.rff_id.port_id[1] = vha->d_id.b.area; 557 ct_req->req.rff_id.port_id[1] = vha->d_id.b.area;
557 ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa; 558 ct_req->req.rff_id.port_id[2] = vha->d_id.b.al_pa;
558 559
559 ct_req->req.rff_id.fc4_feature = BIT_1; 560 qlt_rff_id(vha, ct_req);
561
560 ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */ 562 ct_req->req.rff_id.fc4_type = 0x08; /* SCSI - FCP */
561 563
562 /* Execute MS IOCB */ 564 /* Execute MS IOCB */
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index b9465643396b..ca5084743135 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -17,6 +17,9 @@
17#include <asm/prom.h> 17#include <asm/prom.h>
18#endif 18#endif
19 19
20#include <target/target_core_base.h>
21#include "qla_target.h"
22
20/* 23/*
21* QLogic ISP2x00 Hardware Support Function Prototypes. 24* QLogic ISP2x00 Hardware Support Function Prototypes.
22*/ 25*/
@@ -518,7 +521,10 @@ qla2x00_initialize_adapter(scsi_qla_host_t *vha)
518 return QLA_FUNCTION_FAILED; 521 return QLA_FUNCTION_FAILED;
519 } 522 }
520 } 523 }
521 rval = qla2x00_init_rings(vha); 524
525 if (qla_ini_mode_enabled(vha))
526 rval = qla2x00_init_rings(vha);
527
522 ha->flags.chip_reset_done = 1; 528 ha->flags.chip_reset_done = 1;
523 529
524 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) { 530 if (rval == QLA_SUCCESS && IS_QLA84XX(ha)) {
@@ -1233,6 +1239,8 @@ qla2x00_alloc_fw_dump(scsi_qla_host_t *vha)
1233 mq_size += ha->max_rsp_queues * 1239 mq_size += ha->max_rsp_queues *
1234 (rsp->length * sizeof(response_t)); 1240 (rsp->length * sizeof(response_t));
1235 } 1241 }
1242 if (ha->tgt.atio_q_length)
1243 mq_size += ha->tgt.atio_q_length * sizeof(request_t);
1236 /* Allocate memory for Fibre Channel Event Buffer. */ 1244 /* Allocate memory for Fibre Channel Event Buffer. */
1237 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 1245 if (!IS_QLA25XX(ha) && !IS_QLA81XX(ha) && !IS_QLA83XX(ha))
1238 goto try_eft; 1246 goto try_eft;
@@ -1696,6 +1704,12 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1696 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma)); 1704 icb->response_q_address[0] = cpu_to_le32(LSD(rsp->dma));
1697 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma)); 1705 icb->response_q_address[1] = cpu_to_le32(MSD(rsp->dma));
1698 1706
1707 /* Setup ATIO queue dma pointers for target mode */
1708 icb->atio_q_inpointer = __constant_cpu_to_le16(0);
1709 icb->atio_q_length = cpu_to_le16(ha->tgt.atio_q_length);
1710 icb->atio_q_address[0] = cpu_to_le32(LSD(ha->tgt.atio_dma));
1711 icb->atio_q_address[1] = cpu_to_le32(MSD(ha->tgt.atio_dma));
1712
1699 if (ha->mqenable || IS_QLA83XX(ha)) { 1713 if (ha->mqenable || IS_QLA83XX(ha)) {
1700 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS); 1714 icb->qos = __constant_cpu_to_le16(QLA_DEFAULT_QUE_QOS);
1701 icb->rid = __constant_cpu_to_le16(rid); 1715 icb->rid = __constant_cpu_to_le16(rid);
@@ -1739,6 +1753,8 @@ qla24xx_config_rings(struct scsi_qla_host *vha)
1739 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0); 1753 WRT_REG_DWORD(&reg->isp24.rsp_q_in, 0);
1740 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0); 1754 WRT_REG_DWORD(&reg->isp24.rsp_q_out, 0);
1741 } 1755 }
1756 qlt_24xx_config_rings(vha, reg);
1757
1742 /* PCI posting */ 1758 /* PCI posting */
1743 RD_REG_DWORD(&ioreg->hccr); 1759 RD_REG_DWORD(&ioreg->hccr);
1744} 1760}
@@ -1794,6 +1810,11 @@ qla2x00_init_rings(scsi_qla_host_t *vha)
1794 1810
1795 spin_unlock(&ha->vport_slock); 1811 spin_unlock(&ha->vport_slock);
1796 1812
1813 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
1814 ha->tgt.atio_ring_index = 0;
1815 /* Initialize ATIO queue entries */
1816 qlt_init_atio_q_entries(vha);
1817
1797 ha->isp_ops->config_rings(vha); 1818 ha->isp_ops->config_rings(vha);
1798 1819
1799 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1820 spin_unlock_irqrestore(&ha->hardware_lock, flags);
@@ -2051,6 +2072,10 @@ qla2x00_configure_hba(scsi_qla_host_t *vha)
2051 vha->d_id.b.area = area; 2072 vha->d_id.b.area = area;
2052 vha->d_id.b.al_pa = al_pa; 2073 vha->d_id.b.al_pa = al_pa;
2053 2074
2075 spin_lock(&ha->vport_slock);
2076 qlt_update_vp_map(vha, SET_AL_PA);
2077 spin_unlock(&ha->vport_slock);
2078
2054 if (!vha->flags.init_done) 2079 if (!vha->flags.init_done)
2055 ql_log(ql_log_info, vha, 0x2010, 2080 ql_log(ql_log_info, vha, 0x2010,
2056 "Topology - %s, Host Loop address 0x%x.\n", 2081 "Topology - %s, Host Loop address 0x%x.\n",
@@ -2185,7 +2210,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2185 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) { 2210 nv->id[2] != 'P' || nv->id[3] != ' ' || nv->nvram_version < 1) {
2186 /* Reset NVRAM data. */ 2211 /* Reset NVRAM data. */
2187 ql_log(ql_log_warn, vha, 0x0064, 2212 ql_log(ql_log_warn, vha, 0x0064,
2188 "Inconisistent NVRAM " 2213 "Inconsistent NVRAM "
2189 "detected: checksum=0x%x id=%c version=0x%x.\n", 2214 "detected: checksum=0x%x id=%c version=0x%x.\n",
2190 chksum, nv->id[0], nv->nvram_version); 2215 chksum, nv->id[0], nv->nvram_version);
2191 ql_log(ql_log_warn, vha, 0x0065, 2216 ql_log(ql_log_warn, vha, 0x0065,
@@ -2270,7 +2295,7 @@ qla2x00_nvram_config(scsi_qla_host_t *vha)
2270 if (IS_QLA23XX(ha)) { 2295 if (IS_QLA23XX(ha)) {
2271 nv->firmware_options[0] |= BIT_2; 2296 nv->firmware_options[0] |= BIT_2;
2272 nv->firmware_options[0] &= ~BIT_3; 2297 nv->firmware_options[0] &= ~BIT_3;
2273 nv->firmware_options[0] &= ~BIT_6; 2298 nv->special_options[0] &= ~BIT_6;
2274 nv->add_firmware_options[1] |= BIT_5 | BIT_4; 2299 nv->add_firmware_options[1] |= BIT_5 | BIT_4;
2275 2300
2276 if (IS_QLA2300(ha)) { 2301 if (IS_QLA2300(ha)) {
@@ -2467,14 +2492,21 @@ qla2x00_rport_del(void *data)
2467{ 2492{
2468 fc_port_t *fcport = data; 2493 fc_port_t *fcport = data;
2469 struct fc_rport *rport; 2494 struct fc_rport *rport;
2495 scsi_qla_host_t *vha = fcport->vha;
2470 unsigned long flags; 2496 unsigned long flags;
2471 2497
2472 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2498 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2473 rport = fcport->drport ? fcport->drport: fcport->rport; 2499 rport = fcport->drport ? fcport->drport: fcport->rport;
2474 fcport->drport = NULL; 2500 fcport->drport = NULL;
2475 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2501 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
2476 if (rport) 2502 if (rport) {
2477 fc_remote_port_delete(rport); 2503 fc_remote_port_delete(rport);
2504 /*
2505 * Release the target mode FC NEXUS in qla_target.c code
2506 * if target mod is enabled.
2507 */
2508 qlt_fc_port_deleted(vha, fcport);
2509 }
2478} 2510}
2479 2511
2480/** 2512/**
@@ -2495,11 +2527,11 @@ qla2x00_alloc_fcport(scsi_qla_host_t *vha, gfp_t flags)
2495 2527
2496 /* Setup fcport template structure. */ 2528 /* Setup fcport template structure. */
2497 fcport->vha = vha; 2529 fcport->vha = vha;
2498 fcport->vp_idx = vha->vp_idx;
2499 fcport->port_type = FCT_UNKNOWN; 2530 fcport->port_type = FCT_UNKNOWN;
2500 fcport->loop_id = FC_NO_LOOP_ID; 2531 fcport->loop_id = FC_NO_LOOP_ID;
2501 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 2532 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
2502 fcport->supported_classes = FC_COS_UNSPECIFIED; 2533 fcport->supported_classes = FC_COS_UNSPECIFIED;
2534 fcport->scan_state = QLA_FCPORT_SCAN_NONE;
2503 2535
2504 return fcport; 2536 return fcport;
2505} 2537}
@@ -2726,7 +2758,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2726 new_fcport->d_id.b.area = area; 2758 new_fcport->d_id.b.area = area;
2727 new_fcport->d_id.b.al_pa = al_pa; 2759 new_fcport->d_id.b.al_pa = al_pa;
2728 new_fcport->loop_id = loop_id; 2760 new_fcport->loop_id = loop_id;
2729 new_fcport->vp_idx = vha->vp_idx;
2730 rval2 = qla2x00_get_port_database(vha, new_fcport, 0); 2761 rval2 = qla2x00_get_port_database(vha, new_fcport, 0);
2731 if (rval2 != QLA_SUCCESS) { 2762 if (rval2 != QLA_SUCCESS) {
2732 ql_dbg(ql_dbg_disc, vha, 0x201a, 2763 ql_dbg(ql_dbg_disc, vha, 0x201a,
@@ -2760,10 +2791,6 @@ qla2x00_configure_local_loop(scsi_qla_host_t *vha)
2760 2791
2761 if (!found) { 2792 if (!found) {
2762 /* New device, add to fcports list. */ 2793 /* New device, add to fcports list. */
2763 if (vha->vp_idx) {
2764 new_fcport->vha = vha;
2765 new_fcport->vp_idx = vha->vp_idx;
2766 }
2767 list_add_tail(&new_fcport->list, &vha->vp_fcports); 2794 list_add_tail(&new_fcport->list, &vha->vp_fcports);
2768 2795
2769 /* Allocate a new replacement fcport. */ 2796 /* Allocate a new replacement fcport. */
@@ -2800,8 +2827,6 @@ cleanup_allocation:
2800static void 2827static void
2801qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport) 2828qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2802{ 2829{
2803#define LS_UNKNOWN 2
2804 static char *link_speeds[] = { "1", "2", "?", "4", "8", "10" };
2805 char *link_speed; 2830 char *link_speed;
2806 int rval; 2831 int rval;
2807 uint16_t mb[4]; 2832 uint16_t mb[4];
@@ -2829,11 +2854,7 @@ qla2x00_iidma_fcport(scsi_qla_host_t *vha, fc_port_t *fcport)
2829 fcport->port_name[6], fcport->port_name[7], rval, 2854 fcport->port_name[6], fcport->port_name[7], rval,
2830 fcport->fp_speed, mb[0], mb[1]); 2855 fcport->fp_speed, mb[0], mb[1]);
2831 } else { 2856 } else {
2832 link_speed = link_speeds[LS_UNKNOWN]; 2857 link_speed = qla2x00_get_link_speed_str(ha);
2833 if (fcport->fp_speed < 5)
2834 link_speed = link_speeds[fcport->fp_speed];
2835 else if (fcport->fp_speed == 0x13)
2836 link_speed = link_speeds[5];
2837 ql_dbg(ql_dbg_disc, vha, 0x2005, 2858 ql_dbg(ql_dbg_disc, vha, 0x2005,
2838 "iIDMA adjusted to %s GB/s " 2859 "iIDMA adjusted to %s GB/s "
2839 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed, 2860 "on %02x%02x%02x%02x%02x%02x%02x%02x.\n", link_speed,
@@ -2864,6 +2885,12 @@ qla2x00_reg_remote_port(scsi_qla_host_t *vha, fc_port_t *fcport)
2864 "Unable to allocate fc remote port.\n"); 2885 "Unable to allocate fc remote port.\n");
2865 return; 2886 return;
2866 } 2887 }
2888 /*
2889 * Create target mode FC NEXUS in qla_target.c if target mode is
2890 * enabled..
2891 */
2892 qlt_fc_port_added(vha, fcport);
2893
2867 spin_lock_irqsave(fcport->vha->host->host_lock, flags); 2894 spin_lock_irqsave(fcport->vha->host->host_lock, flags);
2868 *((fc_port_t **)rport->dd_data) = fcport; 2895 *((fc_port_t **)rport->dd_data) = fcport;
2869 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags); 2896 spin_unlock_irqrestore(fcport->vha->host->host_lock, flags);
@@ -2921,7 +2948,7 @@ static int
2921qla2x00_configure_fabric(scsi_qla_host_t *vha) 2948qla2x00_configure_fabric(scsi_qla_host_t *vha)
2922{ 2949{
2923 int rval; 2950 int rval;
2924 fc_port_t *fcport, *fcptemp; 2951 fc_port_t *fcport;
2925 uint16_t next_loopid; 2952 uint16_t next_loopid;
2926 uint16_t mb[MAILBOX_REGISTER_COUNT]; 2953 uint16_t mb[MAILBOX_REGISTER_COUNT];
2927 uint16_t loop_id; 2954 uint16_t loop_id;
@@ -2959,7 +2986,7 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2959 0xfc, mb, BIT_1|BIT_0); 2986 0xfc, mb, BIT_1|BIT_0);
2960 if (rval != QLA_SUCCESS) { 2987 if (rval != QLA_SUCCESS) {
2961 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 2988 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
2962 return rval; 2989 break;
2963 } 2990 }
2964 if (mb[0] != MBS_COMMAND_COMPLETE) { 2991 if (mb[0] != MBS_COMMAND_COMPLETE) {
2965 ql_dbg(ql_dbg_disc, vha, 0x2042, 2992 ql_dbg(ql_dbg_disc, vha, 0x2042,
@@ -2991,21 +3018,16 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
2991 } 3018 }
2992 } 3019 }
2993 3020
2994#define QLA_FCPORT_SCAN 1
2995#define QLA_FCPORT_FOUND 2
2996
2997 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2998 fcport->scan_state = QLA_FCPORT_SCAN;
2999 }
3000
3001 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports); 3021 rval = qla2x00_find_all_fabric_devs(vha, &new_fcports);
3002 if (rval != QLA_SUCCESS) 3022 if (rval != QLA_SUCCESS)
3003 break; 3023 break;
3004 3024
3005 /* 3025 /* Add new ports to existing port list */
3006 * Logout all previous fabric devices marked lost, except 3026 list_splice_tail_init(&new_fcports, &vha->vp_fcports);
3007 * FCP2 devices. 3027
3008 */ 3028 /* Starting free loop ID. */
3029 next_loopid = ha->min_external_loopid;
3030
3009 list_for_each_entry(fcport, &vha->vp_fcports, list) { 3031 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3010 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) 3032 if (test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3011 break; 3033 break;
@@ -3013,7 +3035,8 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3013 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0) 3035 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0)
3014 continue; 3036 continue;
3015 3037
3016 if (fcport->scan_state == QLA_FCPORT_SCAN && 3038 /* Logout lost/gone fabric devices (non-FCP2) */
3039 if (fcport->scan_state != QLA_FCPORT_SCAN_FOUND &&
3017 atomic_read(&fcport->state) == FCS_ONLINE) { 3040 atomic_read(&fcport->state) == FCS_ONLINE) {
3018 qla2x00_mark_device_lost(vha, fcport, 3041 qla2x00_mark_device_lost(vha, fcport,
3019 ql2xplogiabsentdevice, 0); 3042 ql2xplogiabsentdevice, 0);
@@ -3026,78 +3049,30 @@ qla2x00_configure_fabric(scsi_qla_host_t *vha)
3026 fcport->d_id.b.domain, 3049 fcport->d_id.b.domain,
3027 fcport->d_id.b.area, 3050 fcport->d_id.b.area,
3028 fcport->d_id.b.al_pa); 3051 fcport->d_id.b.al_pa);
3029 fcport->loop_id = FC_NO_LOOP_ID;
3030 } 3052 }
3031 }
3032 }
3033
3034 /* Starting free loop ID. */
3035 next_loopid = ha->min_external_loopid;
3036
3037 /*
3038 * Scan through our port list and login entries that need to be
3039 * logged in.
3040 */
3041 list_for_each_entry(fcport, &vha->vp_fcports, list) {
3042 if (atomic_read(&vha->loop_down_timer) ||
3043 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3044 break;
3045
3046 if ((fcport->flags & FCF_FABRIC_DEVICE) == 0 ||
3047 (fcport->flags & FCF_LOGIN_NEEDED) == 0)
3048 continue; 3053 continue;
3049
3050 if (fcport->loop_id == FC_NO_LOOP_ID) {
3051 fcport->loop_id = next_loopid;
3052 rval = qla2x00_find_new_loop_id(
3053 base_vha, fcport);
3054 if (rval != QLA_SUCCESS) {
3055 /* Ran out of IDs to use */
3056 break;
3057 }
3058 } 3054 }
3059 /* Login and update database */ 3055 fcport->scan_state = QLA_FCPORT_SCAN_NONE;
3060 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3056
3061 } 3057 /* Login fabric devices that need a login */
3062 3058 if ((fcport->flags & FCF_LOGIN_NEEDED) != 0 &&
3063 /* Exit if out of loop IDs. */ 3059 atomic_read(&vha->loop_down_timer) == 0) {
3064 if (rval != QLA_SUCCESS) { 3060 if (fcport->loop_id == FC_NO_LOOP_ID) {
3065 break; 3061 fcport->loop_id = next_loopid;
3066 } 3062 rval = qla2x00_find_new_loop_id(
3067 3063 base_vha, fcport);
3068 /* 3064 if (rval != QLA_SUCCESS) {
3069 * Login and add the new devices to our port list. 3065 /* Ran out of IDs to use */
3070 */ 3066 continue;
3071 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) { 3067 }
3072 if (atomic_read(&vha->loop_down_timer) || 3068 }
3073 test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags))
3074 break;
3075
3076 /* Find a new loop ID to use. */
3077 fcport->loop_id = next_loopid;
3078 rval = qla2x00_find_new_loop_id(base_vha, fcport);
3079 if (rval != QLA_SUCCESS) {
3080 /* Ran out of IDs to use */
3081 break;
3082 } 3069 }
3083 3070
3084 /* Login and update database */ 3071 /* Login and update database */
3085 qla2x00_fabric_dev_login(vha, fcport, &next_loopid); 3072 qla2x00_fabric_dev_login(vha, fcport, &next_loopid);
3086
3087 if (vha->vp_idx) {
3088 fcport->vha = vha;
3089 fcport->vp_idx = vha->vp_idx;
3090 }
3091 list_move_tail(&fcport->list, &vha->vp_fcports);
3092 } 3073 }
3093 } while (0); 3074 } while (0);
3094 3075
3095 /* Free all new device structures not processed. */
3096 list_for_each_entry_safe(fcport, fcptemp, &new_fcports, list) {
3097 list_del(&fcport->list);
3098 kfree(fcport);
3099 }
3100
3101 if (rval) { 3076 if (rval) {
3102 ql_dbg(ql_dbg_disc, vha, 0x2068, 3077 ql_dbg(ql_dbg_disc, vha, 0x2068,
3103 "Configure fabric error exit rval=%d.\n", rval); 3078 "Configure fabric error exit rval=%d.\n", rval);
@@ -3287,7 +3262,7 @@ qla2x00_find_all_fabric_devs(scsi_qla_host_t *vha,
3287 WWN_SIZE)) 3262 WWN_SIZE))
3288 continue; 3263 continue;
3289 3264
3290 fcport->scan_state = QLA_FCPORT_FOUND; 3265 fcport->scan_state = QLA_FCPORT_SCAN_FOUND;
3291 3266
3292 found++; 3267 found++;
3293 3268
@@ -3595,6 +3570,12 @@ qla2x00_fabric_login(scsi_qla_host_t *vha, fc_port_t *fcport,
3595 if (mb[10] & BIT_1) 3570 if (mb[10] & BIT_1)
3596 fcport->supported_classes |= FC_COS_CLASS3; 3571 fcport->supported_classes |= FC_COS_CLASS3;
3597 3572
3573 if (IS_FWI2_CAPABLE(ha)) {
3574 if (mb[10] & BIT_7)
3575 fcport->flags |=
3576 FCF_CONF_COMP_SUPPORTED;
3577 }
3578
3598 rval = QLA_SUCCESS; 3579 rval = QLA_SUCCESS;
3599 break; 3580 break;
3600 } else if (mb[0] == MBS_LOOP_ID_USED) { 3581 } else if (mb[0] == MBS_LOOP_ID_USED) {
@@ -3841,7 +3822,7 @@ qla2x00_abort_isp_cleanup(scsi_qla_host_t *vha)
3841 vha->flags.online = 0; 3822 vha->flags.online = 0;
3842 ha->flags.chip_reset_done = 0; 3823 ha->flags.chip_reset_done = 0;
3843 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3824 clear_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3844 ha->qla_stats.total_isp_aborts++; 3825 vha->qla_stats.total_isp_aborts++;
3845 3826
3846 ql_log(ql_log_info, vha, 0x00af, 3827 ql_log(ql_log_info, vha, 0x00af,
3847 "Performing ISP error recovery - ha=%p.\n", ha); 3828 "Performing ISP error recovery - ha=%p.\n", ha);
@@ -4066,6 +4047,7 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4066 struct qla_hw_data *ha = vha->hw; 4047 struct qla_hw_data *ha = vha->hw;
4067 struct req_que *req = ha->req_q_map[0]; 4048 struct req_que *req = ha->req_q_map[0];
4068 struct rsp_que *rsp = ha->rsp_q_map[0]; 4049 struct rsp_que *rsp = ha->rsp_q_map[0];
4050 unsigned long flags;
4069 4051
4070 /* If firmware needs to be loaded */ 4052 /* If firmware needs to be loaded */
4071 if (qla2x00_isp_firmware(vha)) { 4053 if (qla2x00_isp_firmware(vha)) {
@@ -4090,6 +4072,16 @@ qla2x00_restart_isp(scsi_qla_host_t *vha)
4090 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL); 4072 qla2x00_marker(vha, req, rsp, 0, 0, MK_SYNC_ALL);
4091 4073
4092 vha->flags.online = 1; 4074 vha->flags.online = 1;
4075
4076 /*
4077 * Process any ATIO queue entries that came in
4078 * while we weren't online.
4079 */
4080 spin_lock_irqsave(&ha->hardware_lock, flags);
4081 if (qla_tgt_mode_enabled(vha))
4082 qlt_24xx_process_atio_queue(vha);
4083 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4084
4093 /* Wait at most MAX_TARGET RSCNs for a stable link. */ 4085 /* Wait at most MAX_TARGET RSCNs for a stable link. */
4094 wait_time = 256; 4086 wait_time = 256;
4095 do { 4087 do {
@@ -4279,7 +4271,7 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4279 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 4271 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
4280 /* Reset NVRAM data. */ 4272 /* Reset NVRAM data. */
4281 ql_log(ql_log_warn, vha, 0x006b, 4273 ql_log(ql_log_warn, vha, 0x006b,
4282 "Inconisistent NVRAM detected: checksum=0x%x id=%c " 4274 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
4283 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version); 4275 "version=0x%x.\n", chksum, nv->id[0], nv->nvram_version);
4284 ql_log(ql_log_warn, vha, 0x006c, 4276 ql_log(ql_log_warn, vha, 0x006c,
4285 "Falling back to functioning (yet invalid -- WWPN) " 4277 "Falling back to functioning (yet invalid -- WWPN) "
@@ -4330,6 +4322,15 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4330 rval = 1; 4322 rval = 1;
4331 } 4323 }
4332 4324
4325 if (!qla_ini_mode_enabled(vha)) {
4326 /* Don't enable full login after initial LIP */
4327 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
4328 /* Don't enable LIP full login for initiator */
4329 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
4330 }
4331
4332 qlt_24xx_config_nvram_stage1(vha, nv);
4333
4333 /* Reset Initialization control block */ 4334 /* Reset Initialization control block */
4334 memset(icb, 0, ha->init_cb_size); 4335 memset(icb, 0, ha->init_cb_size);
4335 4336
@@ -4357,8 +4358,10 @@ qla24xx_nvram_config(scsi_qla_host_t *vha)
4357 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name), 4358 qla2x00_set_model_info(vha, nv->model_name, sizeof(nv->model_name),
4358 "QLA2462"); 4359 "QLA2462");
4359 4360
4360 /* Use alternate WWN? */ 4361 qlt_24xx_config_nvram_stage2(vha, icb);
4362
4361 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) { 4363 if (nv->host_p & __constant_cpu_to_le32(BIT_15)) {
4364 /* Use alternate WWN? */
4362 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE); 4365 memcpy(icb->node_name, nv->alternate_node_name, WWN_SIZE);
4363 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE); 4366 memcpy(icb->port_name, nv->alternate_port_name, WWN_SIZE);
4364 } 4367 }
@@ -5029,7 +5032,7 @@ qla81xx_nvram_config(scsi_qla_host_t *vha)
5029 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) { 5032 nv->nvram_version < __constant_cpu_to_le16(ICB_VERSION)) {
5030 /* Reset NVRAM data. */ 5033 /* Reset NVRAM data. */
5031 ql_log(ql_log_info, vha, 0x0073, 5034 ql_log(ql_log_info, vha, 0x0073,
5032 "Inconisistent NVRAM detected: checksum=0x%x id=%c " 5035 "Inconsistent NVRAM detected: checksum=0x%x id=%c "
5033 "version=0x%x.\n", chksum, nv->id[0], 5036 "version=0x%x.\n", chksum, nv->id[0],
5034 le16_to_cpu(nv->nvram_version)); 5037 le16_to_cpu(nv->nvram_version));
5035 ql_log(ql_log_info, vha, 0x0074, 5038 ql_log(ql_log_info, vha, 0x0074,
diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c
index eac950924497..70dbf53d9e0f 100644
--- a/drivers/scsi/qla2xxx/qla_iocb.c
+++ b/drivers/scsi/qla2xxx/qla_iocb.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9#include <linux/blkdev.h> 10#include <linux/blkdev.h>
10#include <linux/delay.h> 11#include <linux/delay.h>
@@ -23,18 +24,17 @@ qla2x00_get_cmd_direction(srb_t *sp)
23{ 24{
24 uint16_t cflags; 25 uint16_t cflags;
25 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 26 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
27 struct scsi_qla_host *vha = sp->fcport->vha;
26 28
27 cflags = 0; 29 cflags = 0;
28 30
29 /* Set transfer direction */ 31 /* Set transfer direction */
30 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 32 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
31 cflags = CF_WRITE; 33 cflags = CF_WRITE;
32 sp->fcport->vha->hw->qla_stats.output_bytes += 34 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
33 scsi_bufflen(cmd);
34 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 35 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
35 cflags = CF_READ; 36 cflags = CF_READ;
36 sp->fcport->vha->hw->qla_stats.input_bytes += 37 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
37 scsi_bufflen(cmd);
38 } 38 }
39 return (cflags); 39 return (cflags);
40} 40}
@@ -385,9 +385,10 @@ qla2x00_start_scsi(srb_t *sp)
385 else 385 else
386 req->cnt = req->length - 386 req->cnt = req->length -
387 (req->ring_index - cnt); 387 (req->ring_index - cnt);
388 /* If still no head room then bail out */
389 if (req->cnt < (req_cnt + 2))
390 goto queuing_error;
388 } 391 }
389 if (req->cnt < (req_cnt + 2))
390 goto queuing_error;
391 392
392 /* Build command packet */ 393 /* Build command packet */
393 req->current_outstanding_cmd = handle; 394 req->current_outstanding_cmd = handle;
@@ -470,7 +471,7 @@ queuing_error:
470/** 471/**
471 * qla2x00_start_iocbs() - Execute the IOCB command 472 * qla2x00_start_iocbs() - Execute the IOCB command
472 */ 473 */
473static void 474void
474qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req) 475qla2x00_start_iocbs(struct scsi_qla_host *vha, struct req_que *req)
475{ 476{
476 struct qla_hw_data *ha = vha->hw; 477 struct qla_hw_data *ha = vha->hw;
@@ -571,6 +572,29 @@ qla2x00_marker(struct scsi_qla_host *vha, struct req_que *req,
571 return (ret); 572 return (ret);
572} 573}
573 574
575/*
576 * qla2x00_issue_marker
577 *
578 * Issue marker
579 * Caller CAN have hardware lock held as specified by ha_locked parameter.
580 * Might release it, then reaquire.
581 */
582int qla2x00_issue_marker(scsi_qla_host_t *vha, int ha_locked)
583{
584 if (ha_locked) {
585 if (__qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
586 MK_SYNC_ALL) != QLA_SUCCESS)
587 return QLA_FUNCTION_FAILED;
588 } else {
589 if (qla2x00_marker(vha, vha->req, vha->req->rsp, 0, 0,
590 MK_SYNC_ALL) != QLA_SUCCESS)
591 return QLA_FUNCTION_FAILED;
592 }
593 vha->marker_needed = 0;
594
595 return QLA_SUCCESS;
596}
597
574/** 598/**
575 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and 599 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
576 * Continuation Type 1 IOCBs to allocate. 600 * Continuation Type 1 IOCBs to allocate.
@@ -629,11 +653,11 @@ qla24xx_build_scsi_type_6_iocbs(srb_t *sp, struct cmd_type_6 *cmd_pkt,
629 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 653 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
630 cmd_pkt->control_flags = 654 cmd_pkt->control_flags =
631 __constant_cpu_to_le16(CF_WRITE_DATA); 655 __constant_cpu_to_le16(CF_WRITE_DATA);
632 ha->qla_stats.output_bytes += scsi_bufflen(cmd); 656 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
633 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 657 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
634 cmd_pkt->control_flags = 658 cmd_pkt->control_flags =
635 __constant_cpu_to_le16(CF_READ_DATA); 659 __constant_cpu_to_le16(CF_READ_DATA);
636 ha->qla_stats.input_bytes += scsi_bufflen(cmd); 660 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
637 } 661 }
638 662
639 cur_seg = scsi_sglist(cmd); 663 cur_seg = scsi_sglist(cmd);
@@ -745,13 +769,11 @@ qla24xx_build_scsi_iocbs(srb_t *sp, struct cmd_type_7 *cmd_pkt,
745 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 769 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
746 cmd_pkt->task_mgmt_flags = 770 cmd_pkt->task_mgmt_flags =
747 __constant_cpu_to_le16(TMF_WRITE_DATA); 771 __constant_cpu_to_le16(TMF_WRITE_DATA);
748 sp->fcport->vha->hw->qla_stats.output_bytes += 772 vha->qla_stats.output_bytes += scsi_bufflen(cmd);
749 scsi_bufflen(cmd);
750 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) { 773 } else if (cmd->sc_data_direction == DMA_FROM_DEVICE) {
751 cmd_pkt->task_mgmt_flags = 774 cmd_pkt->task_mgmt_flags =
752 __constant_cpu_to_le16(TMF_READ_DATA); 775 __constant_cpu_to_le16(TMF_READ_DATA);
753 sp->fcport->vha->hw->qla_stats.input_bytes += 776 vha->qla_stats.input_bytes += scsi_bufflen(cmd);
754 scsi_bufflen(cmd);
755 } 777 }
756 778
757 /* One DSD is available in the Command Type 3 IOCB */ 779 /* One DSD is available in the Command Type 3 IOCB */
@@ -1245,7 +1267,7 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt,
1245 return QLA_SUCCESS; 1267 return QLA_SUCCESS;
1246 } 1268 }
1247 1269
1248 cmd_pkt->vp_index = sp->fcport->vp_idx; 1270 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1249 1271
1250 /* Set transfer direction */ 1272 /* Set transfer direction */
1251 if (cmd->sc_data_direction == DMA_TO_DEVICE) { 1273 if (cmd->sc_data_direction == DMA_TO_DEVICE) {
@@ -1502,9 +1524,9 @@ qla24xx_start_scsi(srb_t *sp)
1502 else 1524 else
1503 req->cnt = req->length - 1525 req->cnt = req->length -
1504 (req->ring_index - cnt); 1526 (req->ring_index - cnt);
1527 if (req->cnt < (req_cnt + 2))
1528 goto queuing_error;
1505 } 1529 }
1506 if (req->cnt < (req_cnt + 2))
1507 goto queuing_error;
1508 1530
1509 /* Build command packet. */ 1531 /* Build command packet. */
1510 req->current_outstanding_cmd = handle; 1532 req->current_outstanding_cmd = handle;
@@ -1527,7 +1549,7 @@ qla24xx_start_scsi(srb_t *sp)
1527 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 1549 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
1528 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 1550 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
1529 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 1551 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
1530 cmd_pkt->vp_index = sp->fcport->vp_idx; 1552 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
1531 1553
1532 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 1554 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
1533 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun)); 1555 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, sizeof(cmd_pkt->lun));
@@ -1717,11 +1739,10 @@ qla24xx_dif_start_scsi(srb_t *sp)
1717 else 1739 else
1718 req->cnt = req->length - 1740 req->cnt = req->length -
1719 (req->ring_index - cnt); 1741 (req->ring_index - cnt);
1742 if (req->cnt < (req_cnt + 2))
1743 goto queuing_error;
1720 } 1744 }
1721 1745
1722 if (req->cnt < (req_cnt + 2))
1723 goto queuing_error;
1724
1725 status |= QDSS_GOT_Q_SPACE; 1746 status |= QDSS_GOT_Q_SPACE;
1726 1747
1727 /* Build header part of command packet (excluding the OPCODE). */ 1748 /* Build header part of command packet (excluding the OPCODE). */
@@ -1898,7 +1919,7 @@ qla24xx_login_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1898 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1919 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1899 logio->port_id[1] = sp->fcport->d_id.b.area; 1920 logio->port_id[1] = sp->fcport->d_id.b.area;
1900 logio->port_id[2] = sp->fcport->d_id.b.domain; 1921 logio->port_id[2] = sp->fcport->d_id.b.domain;
1901 logio->vp_index = sp->fcport->vp_idx; 1922 logio->vp_index = sp->fcport->vha->vp_idx;
1902} 1923}
1903 1924
1904static void 1925static void
@@ -1922,7 +1943,7 @@ qla2x00_login_iocb(srb_t *sp, struct mbx_entry *mbx)
1922 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 1943 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1923 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 1944 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1924 sp->fcport->d_id.b.al_pa); 1945 sp->fcport->d_id.b.al_pa);
1925 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); 1946 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1926} 1947}
1927 1948
1928static void 1949static void
@@ -1935,7 +1956,7 @@ qla24xx_logout_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1935 logio->port_id[0] = sp->fcport->d_id.b.al_pa; 1956 logio->port_id[0] = sp->fcport->d_id.b.al_pa;
1936 logio->port_id[1] = sp->fcport->d_id.b.area; 1957 logio->port_id[1] = sp->fcport->d_id.b.area;
1937 logio->port_id[2] = sp->fcport->d_id.b.domain; 1958 logio->port_id[2] = sp->fcport->d_id.b.domain;
1938 logio->vp_index = sp->fcport->vp_idx; 1959 logio->vp_index = sp->fcport->vha->vp_idx;
1939} 1960}
1940 1961
1941static void 1962static void
@@ -1952,7 +1973,7 @@ qla2x00_logout_iocb(srb_t *sp, struct mbx_entry *mbx)
1952 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain); 1973 mbx->mb2 = cpu_to_le16(sp->fcport->d_id.b.domain);
1953 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 | 1974 mbx->mb3 = cpu_to_le16(sp->fcport->d_id.b.area << 8 |
1954 sp->fcport->d_id.b.al_pa); 1975 sp->fcport->d_id.b.al_pa);
1955 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); 1976 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1956 /* Implicit: mbx->mbx10 = 0. */ 1977 /* Implicit: mbx->mbx10 = 0. */
1957} 1978}
1958 1979
@@ -1962,7 +1983,7 @@ qla24xx_adisc_iocb(srb_t *sp, struct logio_entry_24xx *logio)
1962 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE; 1983 logio->entry_type = LOGINOUT_PORT_IOCB_TYPE;
1963 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC); 1984 logio->control_flags = cpu_to_le16(LCF_COMMAND_ADISC);
1964 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id); 1985 logio->nport_handle = cpu_to_le16(sp->fcport->loop_id);
1965 logio->vp_index = sp->fcport->vp_idx; 1986 logio->vp_index = sp->fcport->vha->vp_idx;
1966} 1987}
1967 1988
1968static void 1989static void
@@ -1983,7 +2004,7 @@ qla2x00_adisc_iocb(srb_t *sp, struct mbx_entry *mbx)
1983 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma)); 2004 mbx->mb3 = cpu_to_le16(LSW(ha->async_pd_dma));
1984 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma))); 2005 mbx->mb6 = cpu_to_le16(MSW(MSD(ha->async_pd_dma)));
1985 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma))); 2006 mbx->mb7 = cpu_to_le16(LSW(MSD(ha->async_pd_dma)));
1986 mbx->mb9 = cpu_to_le16(sp->fcport->vp_idx); 2007 mbx->mb9 = cpu_to_le16(sp->fcport->vha->vp_idx);
1987} 2008}
1988 2009
1989static void 2010static void
@@ -2009,7 +2030,7 @@ qla24xx_tm_iocb(srb_t *sp, struct tsk_mgmt_entry *tsk)
2009 tsk->port_id[0] = fcport->d_id.b.al_pa; 2030 tsk->port_id[0] = fcport->d_id.b.al_pa;
2010 tsk->port_id[1] = fcport->d_id.b.area; 2031 tsk->port_id[1] = fcport->d_id.b.area;
2011 tsk->port_id[2] = fcport->d_id.b.domain; 2032 tsk->port_id[2] = fcport->d_id.b.domain;
2012 tsk->vp_index = fcport->vp_idx; 2033 tsk->vp_index = fcport->vha->vp_idx;
2013 2034
2014 if (flags == TCF_LUN_RESET) { 2035 if (flags == TCF_LUN_RESET) {
2015 int_to_scsilun(lun, &tsk->lun); 2036 int_to_scsilun(lun, &tsk->lun);
@@ -2030,7 +2051,7 @@ qla24xx_els_iocb(srb_t *sp, struct els_entry_24xx *els_iocb)
2030 els_iocb->handle = sp->handle; 2051 els_iocb->handle = sp->handle;
2031 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2052 els_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2032 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt); 2053 els_iocb->tx_dsd_count = __constant_cpu_to_le16(bsg_job->request_payload.sg_cnt);
2033 els_iocb->vp_index = sp->fcport->vp_idx; 2054 els_iocb->vp_index = sp->fcport->vha->vp_idx;
2034 els_iocb->sof_type = EST_SOFI3; 2055 els_iocb->sof_type = EST_SOFI3;
2035 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt); 2056 els_iocb->rx_dsd_count = __constant_cpu_to_le16(bsg_job->reply_payload.sg_cnt);
2036 2057
@@ -2160,7 +2181,7 @@ qla24xx_ct_iocb(srb_t *sp, struct ct_entry_24xx *ct_iocb)
2160 ct_iocb->handle = sp->handle; 2181 ct_iocb->handle = sp->handle;
2161 2182
2162 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id); 2183 ct_iocb->nport_handle = cpu_to_le16(sp->fcport->loop_id);
2163 ct_iocb->vp_index = sp->fcport->vp_idx; 2184 ct_iocb->vp_index = sp->fcport->vha->vp_idx;
2164 ct_iocb->comp_status = __constant_cpu_to_le16(0); 2185 ct_iocb->comp_status = __constant_cpu_to_le16(0);
2165 2186
2166 ct_iocb->cmd_dsd_count = 2187 ct_iocb->cmd_dsd_count =
@@ -2343,11 +2364,10 @@ sufficient_dsds:
2343 else 2364 else
2344 req->cnt = req->length - 2365 req->cnt = req->length -
2345 (req->ring_index - cnt); 2366 (req->ring_index - cnt);
2367 if (req->cnt < (req_cnt + 2))
2368 goto queuing_error;
2346 } 2369 }
2347 2370
2348 if (req->cnt < (req_cnt + 2))
2349 goto queuing_error;
2350
2351 ctx = sp->u.scmd.ctx = 2371 ctx = sp->u.scmd.ctx =
2352 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC); 2372 mempool_alloc(ha->ctx_mempool, GFP_ATOMIC);
2353 if (!ctx) { 2373 if (!ctx) {
@@ -2362,7 +2382,7 @@ sufficient_dsds:
2362 if (!ctx->fcp_cmnd) { 2382 if (!ctx->fcp_cmnd) {
2363 ql_log(ql_log_fatal, vha, 0x3011, 2383 ql_log(ql_log_fatal, vha, 0x3011,
2364 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd); 2384 "Failed to allocate fcp_cmnd for cmd=%p.\n", cmd);
2365 goto queuing_error_fcp_cmnd; 2385 goto queuing_error;
2366 } 2386 }
2367 2387
2368 /* Initialize the DSD list and dma handle */ 2388 /* Initialize the DSD list and dma handle */
@@ -2400,7 +2420,7 @@ sufficient_dsds:
2400 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2420 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2401 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2421 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2402 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2422 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2403 cmd_pkt->vp_index = sp->fcport->vp_idx; 2423 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2404 2424
2405 /* Build IOCB segments */ 2425 /* Build IOCB segments */
2406 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds)) 2426 if (qla24xx_build_scsi_type_6_iocbs(sp, cmd_pkt, tot_dsds))
@@ -2489,7 +2509,7 @@ sufficient_dsds:
2489 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa; 2509 cmd_pkt->port_id[0] = sp->fcport->d_id.b.al_pa;
2490 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area; 2510 cmd_pkt->port_id[1] = sp->fcport->d_id.b.area;
2491 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain; 2511 cmd_pkt->port_id[2] = sp->fcport->d_id.b.domain;
2492 cmd_pkt->vp_index = sp->fcport->vp_idx; 2512 cmd_pkt->vp_index = sp->fcport->vha->vp_idx;
2493 2513
2494 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun); 2514 int_to_scsilun(cmd->device->lun, &cmd_pkt->lun);
2495 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun, 2515 host_to_fcp_swap((uint8_t *)&cmd_pkt->lun,
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index ce42288049b5..6f67a9d4998b 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9#include <linux/delay.h> 10#include <linux/delay.h>
10#include <linux/slab.h> 11#include <linux/slab.h>
@@ -309,6 +310,28 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
309 "IDC failed to post ACK.\n"); 310 "IDC failed to post ACK.\n");
310} 311}
311 312
313#define LS_UNKNOWN 2
314char *
315qla2x00_get_link_speed_str(struct qla_hw_data *ha)
316{
317 static char *link_speeds[] = {"1", "2", "?", "4", "8", "16", "10"};
318 char *link_speed;
319 int fw_speed = ha->link_data_rate;
320
321 if (IS_QLA2100(ha) || IS_QLA2200(ha))
322 link_speed = link_speeds[0];
323 else if (fw_speed == 0x13)
324 link_speed = link_speeds[6];
325 else {
326 link_speed = link_speeds[LS_UNKNOWN];
327 if (fw_speed < 6)
328 link_speed =
329 link_speeds[fw_speed];
330 }
331
332 return link_speed;
333}
334
312/** 335/**
313 * qla2x00_async_event() - Process aynchronous events. 336 * qla2x00_async_event() - Process aynchronous events.
314 * @ha: SCSI driver HA context 337 * @ha: SCSI driver HA context
@@ -317,9 +340,6 @@ qla81xx_idc_event(scsi_qla_host_t *vha, uint16_t aen, uint16_t descr)
317void 340void
318qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb) 341qla2x00_async_event(scsi_qla_host_t *vha, struct rsp_que *rsp, uint16_t *mb)
319{ 342{
320#define LS_UNKNOWN 2
321 static char *link_speeds[] = { "1", "2", "?", "4", "8", "16", "10" };
322 char *link_speed;
323 uint16_t handle_cnt; 343 uint16_t handle_cnt;
324 uint16_t cnt, mbx; 344 uint16_t cnt, mbx;
325 uint32_t handles[5]; 345 uint32_t handles[5];
@@ -454,8 +474,8 @@ skip_rio:
454 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */ 474 case MBA_WAKEUP_THRES: /* Request Queue Wake-up */
455 ql_dbg(ql_dbg_async, vha, 0x5008, 475 ql_dbg(ql_dbg_async, vha, 0x5008,
456 "Asynchronous WAKEUP_THRES.\n"); 476 "Asynchronous WAKEUP_THRES.\n");
457 break;
458 477
478 break;
459 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */ 479 case MBA_LIP_OCCURRED: /* Loop Initialization Procedure */
460 ql_dbg(ql_dbg_async, vha, 0x5009, 480 ql_dbg(ql_dbg_async, vha, 0x5009,
461 "LIP occurred (%x).\n", mb[1]); 481 "LIP occurred (%x).\n", mb[1]);
@@ -479,20 +499,14 @@ skip_rio:
479 break; 499 break;
480 500
481 case MBA_LOOP_UP: /* Loop Up Event */ 501 case MBA_LOOP_UP: /* Loop Up Event */
482 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 502 if (IS_QLA2100(ha) || IS_QLA2200(ha))
483 link_speed = link_speeds[0];
484 ha->link_data_rate = PORT_SPEED_1GB; 503 ha->link_data_rate = PORT_SPEED_1GB;
485 } else { 504 else
486 link_speed = link_speeds[LS_UNKNOWN];
487 if (mb[1] < 6)
488 link_speed = link_speeds[mb[1]];
489 else if (mb[1] == 0x13)
490 link_speed = link_speeds[6];
491 ha->link_data_rate = mb[1]; 505 ha->link_data_rate = mb[1];
492 }
493 506
494 ql_dbg(ql_dbg_async, vha, 0x500a, 507 ql_dbg(ql_dbg_async, vha, 0x500a,
495 "LOOP UP detected (%s Gbps).\n", link_speed); 508 "LOOP UP detected (%s Gbps).\n",
509 qla2x00_get_link_speed_str(ha));
496 510
497 vha->flags.management_server_logged_in = 0; 511 vha->flags.management_server_logged_in = 0;
498 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate); 512 qla2x00_post_aen_work(vha, FCH_EVT_LINKUP, ha->link_data_rate);
@@ -638,6 +652,8 @@ skip_rio:
638 ql_dbg(ql_dbg_async, vha, 0x5010, 652 ql_dbg(ql_dbg_async, vha, 0x5010,
639 "Port unavailable %04x %04x %04x.\n", 653 "Port unavailable %04x %04x %04x.\n",
640 mb[1], mb[2], mb[3]); 654 mb[1], mb[2], mb[3]);
655 ql_log(ql_log_warn, vha, 0x505e,
656 "Link is offline.\n");
641 657
642 if (atomic_read(&vha->loop_state) != LOOP_DOWN) { 658 if (atomic_read(&vha->loop_state) != LOOP_DOWN) {
643 atomic_set(&vha->loop_state, LOOP_DOWN); 659 atomic_set(&vha->loop_state, LOOP_DOWN);
@@ -670,12 +686,17 @@ skip_rio:
670 ql_dbg(ql_dbg_async, vha, 0x5011, 686 ql_dbg(ql_dbg_async, vha, 0x5011,
671 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n", 687 "Asynchronous PORT UPDATE ignored %04x/%04x/%04x.\n",
672 mb[1], mb[2], mb[3]); 688 mb[1], mb[2], mb[3]);
689
690 qlt_async_event(mb[0], vha, mb);
673 break; 691 break;
674 } 692 }
675 693
676 ql_dbg(ql_dbg_async, vha, 0x5012, 694 ql_dbg(ql_dbg_async, vha, 0x5012,
677 "Port database changed %04x %04x %04x.\n", 695 "Port database changed %04x %04x %04x.\n",
678 mb[1], mb[2], mb[3]); 696 mb[1], mb[2], mb[3]);
697 ql_log(ql_log_warn, vha, 0x505f,
698 "Link is operational (%s Gbps).\n",
699 qla2x00_get_link_speed_str(ha));
679 700
680 /* 701 /*
681 * Mark all devices as missing so we will login again. 702 * Mark all devices as missing so we will login again.
@@ -684,8 +705,13 @@ skip_rio:
684 705
685 qla2x00_mark_all_devices_lost(vha, 1); 706 qla2x00_mark_all_devices_lost(vha, 1);
686 707
708 if (vha->vp_idx == 0 && !qla_ini_mode_enabled(vha))
709 set_bit(SCR_PENDING, &vha->dpc_flags);
710
687 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags); 711 set_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags);
688 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags); 712 set_bit(LOCAL_LOOP_UPDATE, &vha->dpc_flags);
713
714 qlt_async_event(mb[0], vha, mb);
689 break; 715 break;
690 716
691 case MBA_RSCN_UPDATE: /* State Change Registration */ 717 case MBA_RSCN_UPDATE: /* State Change Registration */
@@ -807,6 +833,8 @@ skip_rio:
807 mb[0], mb[1], mb[2], mb[3]); 833 mb[0], mb[1], mb[2], mb[3]);
808 } 834 }
809 835
836 qlt_async_event(mb[0], vha, mb);
837
810 if (!vha->vp_idx && ha->num_vhosts) 838 if (!vha->vp_idx && ha->num_vhosts)
811 qla2x00_alert_all_vps(rsp, mb); 839 qla2x00_alert_all_vps(rsp, mb);
812} 840}
@@ -1172,6 +1200,9 @@ qla24xx_logio_entry(scsi_qla_host_t *vha, struct req_que *req,
1172 } else if (iop[0] & BIT_5) 1200 } else if (iop[0] & BIT_5)
1173 fcport->port_type = FCT_INITIATOR; 1201 fcport->port_type = FCT_INITIATOR;
1174 1202
1203 if (iop[0] & BIT_7)
1204 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1205
1175 if (logio->io_parameter[7] || logio->io_parameter[8]) 1206 if (logio->io_parameter[7] || logio->io_parameter[8])
1176 fcport->supported_classes |= FC_COS_CLASS2; 1207 fcport->supported_classes |= FC_COS_CLASS2;
1177 if (logio->io_parameter[9] || logio->io_parameter[10]) 1208 if (logio->io_parameter[9] || logio->io_parameter[10])
@@ -1986,6 +2017,9 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
1986 2017
1987 if (pkt->entry_status != 0) { 2018 if (pkt->entry_status != 0) {
1988 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt); 2019 qla2x00_error_entry(vha, rsp, (sts_entry_t *) pkt);
2020
2021 (void)qlt_24xx_process_response_error(vha, pkt);
2022
1989 ((response_t *)pkt)->signature = RESPONSE_PROCESSED; 2023 ((response_t *)pkt)->signature = RESPONSE_PROCESSED;
1990 wmb(); 2024 wmb();
1991 continue; 2025 continue;
@@ -2016,6 +2050,14 @@ void qla24xx_process_response_queue(struct scsi_qla_host *vha,
2016 case ELS_IOCB_TYPE: 2050 case ELS_IOCB_TYPE:
2017 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE); 2051 qla24xx_els_ct_entry(vha, rsp->req, pkt, ELS_IOCB_TYPE);
2018 break; 2052 break;
2053 case ABTS_RECV_24XX:
2054 /* ensure that the ATIO queue is empty */
2055 qlt_24xx_process_atio_queue(vha);
2056 case ABTS_RESP_24XX:
2057 case CTIO_TYPE7:
2058 case NOTIFY_ACK_TYPE:
2059 qlt_response_pkt_all_vps(vha, (response_t *)pkt);
2060 break;
2019 case MARKER_TYPE: 2061 case MARKER_TYPE:
2020 /* Do nothing in this case, this check is to prevent it 2062 /* Do nothing in this case, this check is to prevent it
2021 * from falling into default case 2063 * from falling into default case
@@ -2168,6 +2210,13 @@ qla24xx_intr_handler(int irq, void *dev_id)
2168 case 0x14: 2210 case 0x14:
2169 qla24xx_process_response_queue(vha, rsp); 2211 qla24xx_process_response_queue(vha, rsp);
2170 break; 2212 break;
2213 case 0x1C: /* ATIO queue updated */
2214 qlt_24xx_process_atio_queue(vha);
2215 break;
2216 case 0x1D: /* ATIO and response queues updated */
2217 qlt_24xx_process_atio_queue(vha);
2218 qla24xx_process_response_queue(vha, rsp);
2219 break;
2171 default: 2220 default:
2172 ql_dbg(ql_dbg_async, vha, 0x504f, 2221 ql_dbg(ql_dbg_async, vha, 0x504f,
2173 "Unrecognized interrupt type (%d).\n", stat * 0xff); 2222 "Unrecognized interrupt type (%d).\n", stat * 0xff);
@@ -2312,6 +2361,13 @@ qla24xx_msix_default(int irq, void *dev_id)
2312 case 0x14: 2361 case 0x14:
2313 qla24xx_process_response_queue(vha, rsp); 2362 qla24xx_process_response_queue(vha, rsp);
2314 break; 2363 break;
2364 case 0x1C: /* ATIO queue updated */
2365 qlt_24xx_process_atio_queue(vha);
2366 break;
2367 case 0x1D: /* ATIO and response queues updated */
2368 qlt_24xx_process_atio_queue(vha);
2369 qla24xx_process_response_queue(vha, rsp);
2370 break;
2315 default: 2371 default:
2316 ql_dbg(ql_dbg_async, vha, 0x5051, 2372 ql_dbg(ql_dbg_async, vha, 0x5051,
2317 "Unrecognized interrupt type (%d).\n", stat & 0xff); 2373 "Unrecognized interrupt type (%d).\n", stat & 0xff);
@@ -2564,7 +2620,15 @@ void
2564qla2x00_free_irqs(scsi_qla_host_t *vha) 2620qla2x00_free_irqs(scsi_qla_host_t *vha)
2565{ 2621{
2566 struct qla_hw_data *ha = vha->hw; 2622 struct qla_hw_data *ha = vha->hw;
2567 struct rsp_que *rsp = ha->rsp_q_map[0]; 2623 struct rsp_que *rsp;
2624
2625 /*
2626 * We need to check that ha->rsp_q_map is valid in case we are called
2627 * from a probe failure context.
2628 */
2629 if (!ha->rsp_q_map || !ha->rsp_q_map[0])
2630 return;
2631 rsp = ha->rsp_q_map[0];
2568 2632
2569 if (ha->flags.msix_enabled) 2633 if (ha->flags.msix_enabled)
2570 qla24xx_disable_msix(ha); 2634 qla24xx_disable_msix(ha);
diff --git a/drivers/scsi/qla2xxx/qla_mbx.c b/drivers/scsi/qla2xxx/qla_mbx.c
index b4a23394a7bd..d5ce92c0a8fc 100644
--- a/drivers/scsi/qla2xxx/qla_mbx.c
+++ b/drivers/scsi/qla2xxx/qla_mbx.c
@@ -5,6 +5,7 @@
5 * See LICENSE.qla2xxx for copyright and licensing details. 5 * See LICENSE.qla2xxx for copyright and licensing details.
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_target.h"
8 9
9#include <linux/delay.h> 10#include <linux/delay.h>
10#include <linux/gfp.h> 11#include <linux/gfp.h>
@@ -270,11 +271,8 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
270 ictrl = RD_REG_WORD(&reg->isp.ictrl); 271 ictrl = RD_REG_WORD(&reg->isp.ictrl);
271 } 272 }
272 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119, 273 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1119,
273 "MBX Command timeout for cmd %x.\n", command); 274 "MBX Command timeout for cmd %x, iocontrol=%x jiffies=%lx "
274 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111a, 275 "mb[0]=0x%x\n", command, ictrl, jiffies, mb0);
275 "iocontrol=%x jiffies=%lx.\n", ictrl, jiffies);
276 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111b,
277 "mb[0] = 0x%x.\n", mb0);
278 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019); 276 ql_dump_regs(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1019);
279 277
280 /* 278 /*
@@ -320,7 +318,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
320 CRB_NIU_XG_PAUSE_CTL_P1); 318 CRB_NIU_XG_PAUSE_CTL_P1);
321 } 319 }
322 ql_log(ql_log_info, base_vha, 0x101c, 320 ql_log(ql_log_info, base_vha, 0x101c,
323 "Mailbox cmd timeout occured, cmd=0x%x, " 321 "Mailbox cmd timeout occurred, cmd=0x%x, "
324 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP " 322 "mb[0]=0x%x, eeh_busy=0x%x. Scheduling ISP "
325 "abort.\n", command, mcp->mb[0], 323 "abort.\n", command, mcp->mb[0],
326 ha->flags.eeh_busy); 324 ha->flags.eeh_busy);
@@ -345,7 +343,7 @@ qla2x00_mailbox_command(scsi_qla_host_t *vha, mbx_cmd_t *mcp)
345 CRB_NIU_XG_PAUSE_CTL_P1); 343 CRB_NIU_XG_PAUSE_CTL_P1);
346 } 344 }
347 ql_log(ql_log_info, base_vha, 0x101e, 345 ql_log(ql_log_info, base_vha, 0x101e,
348 "Mailbox cmd timeout occured, cmd=0x%x, " 346 "Mailbox cmd timeout occurred, cmd=0x%x, "
349 "mb[0]=0x%x. Scheduling ISP abort ", 347 "mb[0]=0x%x. Scheduling ISP abort ",
350 command, mcp->mb[0]); 348 command, mcp->mb[0]);
351 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags); 349 set_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
@@ -390,7 +388,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
390 mbx_cmd_t mc; 388 mbx_cmd_t mc;
391 mbx_cmd_t *mcp = &mc; 389 mbx_cmd_t *mcp = &mc;
392 390
393 ql_dbg(ql_dbg_mbx, vha, 0x1022, "Entered %s.\n", __func__); 391 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1022,
392 "Entered %s.\n", __func__);
394 393
395 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) { 394 if (MSW(risc_addr) || IS_FWI2_CAPABLE(ha)) {
396 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED; 395 mcp->mb[0] = MBC_LOAD_RISC_RAM_EXTENDED;
@@ -424,7 +423,8 @@ qla2x00_load_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t risc_addr,
424 ql_dbg(ql_dbg_mbx, vha, 0x1023, 423 ql_dbg(ql_dbg_mbx, vha, 0x1023,
425 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 424 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
426 } else { 425 } else {
427 ql_dbg(ql_dbg_mbx, vha, 0x1024, "Done %s.\n", __func__); 426 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1024,
427 "Done %s.\n", __func__);
428 } 428 }
429 429
430 return rval; 430 return rval;
@@ -454,7 +454,8 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
454 mbx_cmd_t mc; 454 mbx_cmd_t mc;
455 mbx_cmd_t *mcp = &mc; 455 mbx_cmd_t *mcp = &mc;
456 456
457 ql_dbg(ql_dbg_mbx, vha, 0x1025, "Entered %s.\n", __func__); 457 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1025,
458 "Entered %s.\n", __func__);
458 459
459 mcp->mb[0] = MBC_EXECUTE_FIRMWARE; 460 mcp->mb[0] = MBC_EXECUTE_FIRMWARE;
460 mcp->out_mb = MBX_0; 461 mcp->out_mb = MBX_0;
@@ -489,10 +490,11 @@ qla2x00_execute_fw(scsi_qla_host_t *vha, uint32_t risc_addr)
489 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 490 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
490 } else { 491 } else {
491 if (IS_FWI2_CAPABLE(ha)) { 492 if (IS_FWI2_CAPABLE(ha)) {
492 ql_dbg(ql_dbg_mbx, vha, 0x1027, 493 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1027,
493 "Done exchanges=%x.\n", mcp->mb[1]); 494 "Done exchanges=%x.\n", mcp->mb[1]);
494 } else { 495 } else {
495 ql_dbg(ql_dbg_mbx, vha, 0x1028, "Done %s.\n", __func__); 496 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1028,
497 "Done %s.\n", __func__);
496 } 498 }
497 } 499 }
498 500
@@ -523,7 +525,8 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
523 mbx_cmd_t *mcp = &mc; 525 mbx_cmd_t *mcp = &mc;
524 struct qla_hw_data *ha = vha->hw; 526 struct qla_hw_data *ha = vha->hw;
525 527
526 ql_dbg(ql_dbg_mbx, vha, 0x1029, "Entered %s.\n", __func__); 528 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1029,
529 "Entered %s.\n", __func__);
527 530
528 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION; 531 mcp->mb[0] = MBC_GET_FIRMWARE_VERSION;
529 mcp->out_mb = MBX_0; 532 mcp->out_mb = MBX_0;
@@ -561,11 +564,11 @@ qla2x00_get_fw_version(scsi_qla_host_t *vha)
561 ha->fw_attributes_h = mcp->mb[15]; 564 ha->fw_attributes_h = mcp->mb[15];
562 ha->fw_attributes_ext[0] = mcp->mb[16]; 565 ha->fw_attributes_ext[0] = mcp->mb[16];
563 ha->fw_attributes_ext[1] = mcp->mb[17]; 566 ha->fw_attributes_ext[1] = mcp->mb[17];
564 ql_dbg(ql_dbg_mbx, vha, 0x1139, 567 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1139,
565 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n", 568 "%s: FW_attributes Upper: 0x%x, Lower: 0x%x.\n",
566 __func__, mcp->mb[15], mcp->mb[6]); 569 __func__, mcp->mb[15], mcp->mb[6]);
567 } else 570 } else
568 ql_dbg(ql_dbg_mbx, vha, 0x112f, 571 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x112f,
569 "%s: FwAttributes [Upper] invalid, MB6:%04x\n", 572 "%s: FwAttributes [Upper] invalid, MB6:%04x\n",
570 __func__, mcp->mb[6]); 573 __func__, mcp->mb[6]);
571 } 574 }
@@ -576,7 +579,8 @@ failed:
576 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval); 579 ql_dbg(ql_dbg_mbx, vha, 0x102a, "Failed=%x.\n", rval);
577 } else { 580 } else {
578 /*EMPTY*/ 581 /*EMPTY*/
579 ql_dbg(ql_dbg_mbx, vha, 0x102b, "Done %s.\n", __func__); 582 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102b,
583 "Done %s.\n", __func__);
580 } 584 }
581 return rval; 585 return rval;
582} 586}
@@ -602,7 +606,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
602 mbx_cmd_t mc; 606 mbx_cmd_t mc;
603 mbx_cmd_t *mcp = &mc; 607 mbx_cmd_t *mcp = &mc;
604 608
605 ql_dbg(ql_dbg_mbx, vha, 0x102c, "Entered %s.\n", __func__); 609 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102c,
610 "Entered %s.\n", __func__);
606 611
607 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION; 612 mcp->mb[0] = MBC_GET_FIRMWARE_OPTION;
608 mcp->out_mb = MBX_0; 613 mcp->out_mb = MBX_0;
@@ -620,7 +625,8 @@ qla2x00_get_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
620 fwopts[2] = mcp->mb[2]; 625 fwopts[2] = mcp->mb[2];
621 fwopts[3] = mcp->mb[3]; 626 fwopts[3] = mcp->mb[3];
622 627
623 ql_dbg(ql_dbg_mbx, vha, 0x102e, "Done %s.\n", __func__); 628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102e,
629 "Done %s.\n", __func__);
624 } 630 }
625 631
626 return rval; 632 return rval;
@@ -648,7 +654,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
648 mbx_cmd_t mc; 654 mbx_cmd_t mc;
649 mbx_cmd_t *mcp = &mc; 655 mbx_cmd_t *mcp = &mc;
650 656
651 ql_dbg(ql_dbg_mbx, vha, 0x102f, "Entered %s.\n", __func__); 657 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x102f,
658 "Entered %s.\n", __func__);
652 659
653 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION; 660 mcp->mb[0] = MBC_SET_FIRMWARE_OPTION;
654 mcp->mb[1] = fwopts[1]; 661 mcp->mb[1] = fwopts[1];
@@ -676,7 +683,8 @@ qla2x00_set_fw_options(scsi_qla_host_t *vha, uint16_t *fwopts)
676 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]); 683 "Failed=%x (%x/%x).\n", rval, mcp->mb[0], mcp->mb[1]);
677 } else { 684 } else {
678 /*EMPTY*/ 685 /*EMPTY*/
679 ql_dbg(ql_dbg_mbx, vha, 0x1031, "Done %s.\n", __func__); 686 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1031,
687 "Done %s.\n", __func__);
680 } 688 }
681 689
682 return rval; 690 return rval;
@@ -704,7 +712,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
704 mbx_cmd_t mc; 712 mbx_cmd_t mc;
705 mbx_cmd_t *mcp = &mc; 713 mbx_cmd_t *mcp = &mc;
706 714
707 ql_dbg(ql_dbg_mbx, vha, 0x1032, "Entered %s.\n", __func__); 715 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1032,
716 "Entered %s.\n", __func__);
708 717
709 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST; 718 mcp->mb[0] = MBC_MAILBOX_REGISTER_TEST;
710 mcp->mb[1] = 0xAAAA; 719 mcp->mb[1] = 0xAAAA;
@@ -734,7 +743,8 @@ qla2x00_mbx_reg_test(scsi_qla_host_t *vha)
734 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval); 743 ql_dbg(ql_dbg_mbx, vha, 0x1033, "Failed=%x.\n", rval);
735 } else { 744 } else {
736 /*EMPTY*/ 745 /*EMPTY*/
737 ql_dbg(ql_dbg_mbx, vha, 0x1034, "Done %s.\n", __func__); 746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1034,
747 "Done %s.\n", __func__);
738 } 748 }
739 749
740 return rval; 750 return rval;
@@ -762,7 +772,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
762 mbx_cmd_t mc; 772 mbx_cmd_t mc;
763 mbx_cmd_t *mcp = &mc; 773 mbx_cmd_t *mcp = &mc;
764 774
765 ql_dbg(ql_dbg_mbx, vha, 0x1035, "Entered %s.\n", __func__); 775 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1035,
776 "Entered %s.\n", __func__);
766 777
767 mcp->mb[0] = MBC_VERIFY_CHECKSUM; 778 mcp->mb[0] = MBC_VERIFY_CHECKSUM;
768 mcp->out_mb = MBX_0; 779 mcp->out_mb = MBX_0;
@@ -787,7 +798,8 @@ qla2x00_verify_checksum(scsi_qla_host_t *vha, uint32_t risc_addr)
787 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ? 798 "Failed=%x chm sum=%x.\n", rval, IS_FWI2_CAPABLE(vha->hw) ?
788 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]); 799 (mcp->mb[2] << 16) | mcp->mb[1] : mcp->mb[1]);
789 } else { 800 } else {
790 ql_dbg(ql_dbg_mbx, vha, 0x1037, "Done %s.\n", __func__); 801 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1037,
802 "Done %s.\n", __func__);
791 } 803 }
792 804
793 return rval; 805 return rval;
@@ -819,7 +831,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
819 mbx_cmd_t mc; 831 mbx_cmd_t mc;
820 mbx_cmd_t *mcp = &mc; 832 mbx_cmd_t *mcp = &mc;
821 833
822 ql_dbg(ql_dbg_mbx, vha, 0x1038, "Entered %s.\n", __func__); 834 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1038,
835 "Entered %s.\n", __func__);
823 836
824 mcp->mb[0] = MBC_IOCB_COMMAND_A64; 837 mcp->mb[0] = MBC_IOCB_COMMAND_A64;
825 mcp->mb[1] = 0; 838 mcp->mb[1] = 0;
@@ -842,7 +855,8 @@ qla2x00_issue_iocb_timeout(scsi_qla_host_t *vha, void *buffer,
842 /* Mask reserved bits. */ 855 /* Mask reserved bits. */
843 sts_entry->entry_status &= 856 sts_entry->entry_status &=
844 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK; 857 IS_FWI2_CAPABLE(vha->hw) ? RF_MASK_24XX : RF_MASK;
845 ql_dbg(ql_dbg_mbx, vha, 0x103a, "Done %s.\n", __func__); 858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103a,
859 "Done %s.\n", __func__);
846 } 860 }
847 861
848 return rval; 862 return rval;
@@ -884,7 +898,8 @@ qla2x00_abort_command(srb_t *sp)
884 struct req_que *req = vha->req; 898 struct req_que *req = vha->req;
885 struct scsi_cmnd *cmd = GET_CMD_SP(sp); 899 struct scsi_cmnd *cmd = GET_CMD_SP(sp);
886 900
887 ql_dbg(ql_dbg_mbx, vha, 0x103b, "Entered %s.\n", __func__); 901 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103b,
902 "Entered %s.\n", __func__);
888 903
889 spin_lock_irqsave(&ha->hardware_lock, flags); 904 spin_lock_irqsave(&ha->hardware_lock, flags);
890 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 905 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -915,7 +930,8 @@ qla2x00_abort_command(srb_t *sp)
915 if (rval != QLA_SUCCESS) { 930 if (rval != QLA_SUCCESS) {
916 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval); 931 ql_dbg(ql_dbg_mbx, vha, 0x103c, "Failed=%x.\n", rval);
917 } else { 932 } else {
918 ql_dbg(ql_dbg_mbx, vha, 0x103d, "Done %s.\n", __func__); 933 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103d,
934 "Done %s.\n", __func__);
919 } 935 }
920 936
921 return rval; 937 return rval;
@@ -934,7 +950,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
934 l = l; 950 l = l;
935 vha = fcport->vha; 951 vha = fcport->vha;
936 952
937 ql_dbg(ql_dbg_mbx, vha, 0x103e, "Entered %s.\n", __func__); 953 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103e,
954 "Entered %s.\n", __func__);
938 955
939 req = vha->hw->req_q_map[0]; 956 req = vha->hw->req_q_map[0];
940 rsp = req->rsp; 957 rsp = req->rsp;
@@ -955,7 +972,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
955 mcp->flags = 0; 972 mcp->flags = 0;
956 rval = qla2x00_mailbox_command(vha, mcp); 973 rval = qla2x00_mailbox_command(vha, mcp);
957 if (rval != QLA_SUCCESS) { 974 if (rval != QLA_SUCCESS) {
958 ql_dbg(ql_dbg_mbx, vha, 0x103f, "Failed=%x.\n", rval); 975 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x103f,
976 "Failed=%x.\n", rval);
959 } 977 }
960 978
961 /* Issue marker IOCB. */ 979 /* Issue marker IOCB. */
@@ -965,7 +983,8 @@ qla2x00_abort_target(struct fc_port *fcport, unsigned int l, int tag)
965 ql_dbg(ql_dbg_mbx, vha, 0x1040, 983 ql_dbg(ql_dbg_mbx, vha, 0x1040,
966 "Failed to issue marker IOCB (%x).\n", rval2); 984 "Failed to issue marker IOCB (%x).\n", rval2);
967 } else { 985 } else {
968 ql_dbg(ql_dbg_mbx, vha, 0x1041, "Done %s.\n", __func__); 986 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1041,
987 "Done %s.\n", __func__);
969 } 988 }
970 989
971 return rval; 990 return rval;
@@ -983,7 +1002,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
983 1002
984 vha = fcport->vha; 1003 vha = fcport->vha;
985 1004
986 ql_dbg(ql_dbg_mbx, vha, 0x1042, "Entered %s.\n", __func__); 1005 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1042,
1006 "Entered %s.\n", __func__);
987 1007
988 req = vha->hw->req_q_map[0]; 1008 req = vha->hw->req_q_map[0];
989 rsp = req->rsp; 1009 rsp = req->rsp;
@@ -1012,7 +1032,8 @@ qla2x00_lun_reset(struct fc_port *fcport, unsigned int l, int tag)
1012 ql_dbg(ql_dbg_mbx, vha, 0x1044, 1032 ql_dbg(ql_dbg_mbx, vha, 0x1044,
1013 "Failed to issue marker IOCB (%x).\n", rval2); 1033 "Failed to issue marker IOCB (%x).\n", rval2);
1014 } else { 1034 } else {
1015 ql_dbg(ql_dbg_mbx, vha, 0x1045, "Done %s.\n", __func__); 1035 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1045,
1036 "Done %s.\n", __func__);
1016 } 1037 }
1017 1038
1018 return rval; 1039 return rval;
@@ -1046,7 +1067,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1046 mbx_cmd_t mc; 1067 mbx_cmd_t mc;
1047 mbx_cmd_t *mcp = &mc; 1068 mbx_cmd_t *mcp = &mc;
1048 1069
1049 ql_dbg(ql_dbg_mbx, vha, 0x1046, "Entered %s.\n", __func__); 1070 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1046,
1071 "Entered %s.\n", __func__);
1050 1072
1051 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID; 1073 mcp->mb[0] = MBC_GET_ADAPTER_LOOP_ID;
1052 mcp->mb[9] = vha->vp_idx; 1074 mcp->mb[9] = vha->vp_idx;
@@ -1074,7 +1096,8 @@ qla2x00_get_adapter_id(scsi_qla_host_t *vha, uint16_t *id, uint8_t *al_pa,
1074 /*EMPTY*/ 1096 /*EMPTY*/
1075 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval); 1097 ql_dbg(ql_dbg_mbx, vha, 0x1047, "Failed=%x.\n", rval);
1076 } else { 1098 } else {
1077 ql_dbg(ql_dbg_mbx, vha, 0x1048, "Done %s.\n", __func__); 1099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1048,
1100 "Done %s.\n", __func__);
1078 1101
1079 if (IS_CNA_CAPABLE(vha->hw)) { 1102 if (IS_CNA_CAPABLE(vha->hw)) {
1080 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff; 1103 vha->fcoe_vlan_id = mcp->mb[9] & 0xfff;
@@ -1115,7 +1138,8 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1115 mbx_cmd_t mc; 1138 mbx_cmd_t mc;
1116 mbx_cmd_t *mcp = &mc; 1139 mbx_cmd_t *mcp = &mc;
1117 1140
1118 ql_dbg(ql_dbg_mbx, vha, 0x1049, "Entered %s.\n", __func__); 1141 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1049,
1142 "Entered %s.\n", __func__);
1119 1143
1120 mcp->mb[0] = MBC_GET_RETRY_COUNT; 1144 mcp->mb[0] = MBC_GET_RETRY_COUNT;
1121 mcp->out_mb = MBX_0; 1145 mcp->out_mb = MBX_0;
@@ -1138,7 +1162,7 @@ qla2x00_get_retry_cnt(scsi_qla_host_t *vha, uint8_t *retry_cnt, uint8_t *tov,
1138 *tov = ratov; 1162 *tov = ratov;
1139 } 1163 }
1140 1164
1141 ql_dbg(ql_dbg_mbx, vha, 0x104b, 1165 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104b,
1142 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov); 1166 "Done %s mb3=%d ratov=%d.\n", __func__, mcp->mb[3], ratov);
1143 } 1167 }
1144 1168
@@ -1170,7 +1194,8 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1170 mbx_cmd_t *mcp = &mc; 1194 mbx_cmd_t *mcp = &mc;
1171 struct qla_hw_data *ha = vha->hw; 1195 struct qla_hw_data *ha = vha->hw;
1172 1196
1173 ql_dbg(ql_dbg_mbx, vha, 0x104c, "Entered %s.\n", __func__); 1197 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104c,
1198 "Entered %s.\n", __func__);
1174 1199
1175 if (IS_QLA82XX(ha) && ql2xdbwr) 1200 if (IS_QLA82XX(ha) && ql2xdbwr)
1176 qla82xx_wr_32(ha, ha->nxdb_wr_ptr, 1201 qla82xx_wr_32(ha, ha->nxdb_wr_ptr,
@@ -1213,9 +1238,100 @@ qla2x00_init_firmware(scsi_qla_host_t *vha, uint16_t size)
1213 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]); 1238 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], mcp->mb[3]);
1214 } else { 1239 } else {
1215 /*EMPTY*/ 1240 /*EMPTY*/
1216 ql_dbg(ql_dbg_mbx, vha, 0x104e, "Done %s.\n", __func__); 1241 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104e,
1242 "Done %s.\n", __func__);
1243 }
1244
1245 return rval;
1246}
1247
1248/*
1249 * qla2x00_get_node_name_list
1250 * Issue get node name list mailbox command, kmalloc()
1251 * and return the resulting list. Caller must kfree() it!
1252 *
1253 * Input:
1254 * ha = adapter state pointer.
1255 * out_data = resulting list
1256 * out_len = length of the resulting list
1257 *
1258 * Returns:
1259 * qla2x00 local function return status code.
1260 *
1261 * Context:
1262 * Kernel context.
1263 */
1264int
1265qla2x00_get_node_name_list(scsi_qla_host_t *vha, void **out_data, int *out_len)
1266{
1267 struct qla_hw_data *ha = vha->hw;
1268 struct qla_port_24xx_data *list = NULL;
1269 void *pmap;
1270 mbx_cmd_t mc;
1271 dma_addr_t pmap_dma;
1272 ulong dma_size;
1273 int rval, left;
1274
1275 left = 1;
1276 while (left > 0) {
1277 dma_size = left * sizeof(*list);
1278 pmap = dma_alloc_coherent(&ha->pdev->dev, dma_size,
1279 &pmap_dma, GFP_KERNEL);
1280 if (!pmap) {
1281 ql_log(ql_log_warn, vha, 0x113f,
1282 "%s(%ld): DMA Alloc failed of %ld\n",
1283 __func__, vha->host_no, dma_size);
1284 rval = QLA_MEMORY_ALLOC_FAILED;
1285 goto out;
1286 }
1287
1288 mc.mb[0] = MBC_PORT_NODE_NAME_LIST;
1289 mc.mb[1] = BIT_1 | BIT_3;
1290 mc.mb[2] = MSW(pmap_dma);
1291 mc.mb[3] = LSW(pmap_dma);
1292 mc.mb[6] = MSW(MSD(pmap_dma));
1293 mc.mb[7] = LSW(MSD(pmap_dma));
1294 mc.mb[8] = dma_size;
1295 mc.out_mb = MBX_0|MBX_1|MBX_2|MBX_3|MBX_6|MBX_7|MBX_8;
1296 mc.in_mb = MBX_0|MBX_1;
1297 mc.tov = 30;
1298 mc.flags = MBX_DMA_IN;
1299
1300 rval = qla2x00_mailbox_command(vha, &mc);
1301 if (rval != QLA_SUCCESS) {
1302 if ((mc.mb[0] == MBS_COMMAND_ERROR) &&
1303 (mc.mb[1] == 0xA)) {
1304 left += le16_to_cpu(mc.mb[2]) /
1305 sizeof(struct qla_port_24xx_data);
1306 goto restart;
1307 }
1308 goto out_free;
1309 }
1310
1311 left = 0;
1312
1313 list = kzalloc(dma_size, GFP_KERNEL);
1314 if (!list) {
1315 ql_log(ql_log_warn, vha, 0x1140,
1316 "%s(%ld): failed to allocate node names list "
1317 "structure.\n", __func__, vha->host_no);
1318 rval = QLA_MEMORY_ALLOC_FAILED;
1319 goto out_free;
1320 }
1321
1322 memcpy(list, pmap, dma_size);
1323restart:
1324 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1217 } 1325 }
1218 1326
1327 *out_data = list;
1328 *out_len = dma_size;
1329
1330out:
1331 return rval;
1332
1333out_free:
1334 dma_free_coherent(&ha->pdev->dev, dma_size, pmap, pmap_dma);
1219 return rval; 1335 return rval;
1220} 1336}
1221 1337
@@ -1246,7 +1362,8 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1246 dma_addr_t pd_dma; 1362 dma_addr_t pd_dma;
1247 struct qla_hw_data *ha = vha->hw; 1363 struct qla_hw_data *ha = vha->hw;
1248 1364
1249 ql_dbg(ql_dbg_mbx, vha, 0x104f, "Entered %s.\n", __func__); 1365 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x104f,
1366 "Entered %s.\n", __func__);
1250 1367
1251 pd24 = NULL; 1368 pd24 = NULL;
1252 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma); 1369 pd = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pd_dma);
@@ -1326,6 +1443,13 @@ qla2x00_get_port_database(scsi_qla_host_t *vha, fc_port_t *fcport, uint8_t opt)
1326 fcport->port_type = FCT_INITIATOR; 1443 fcport->port_type = FCT_INITIATOR;
1327 else 1444 else
1328 fcport->port_type = FCT_TARGET; 1445 fcport->port_type = FCT_TARGET;
1446
1447 /* Passback COS information. */
1448 fcport->supported_classes = (pd24->flags & PDF_CLASS_2) ?
1449 FC_COS_CLASS2 : FC_COS_CLASS3;
1450
1451 if (pd24->prli_svc_param_word_3[0] & BIT_7)
1452 fcport->flags |= FCF_CONF_COMP_SUPPORTED;
1329 } else { 1453 } else {
1330 uint64_t zero = 0; 1454 uint64_t zero = 0;
1331 1455
@@ -1378,7 +1502,8 @@ gpd_error_out:
1378 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval, 1502 "Failed=%x mb[0]=%x mb[1]=%x.\n", rval,
1379 mcp->mb[0], mcp->mb[1]); 1503 mcp->mb[0], mcp->mb[1]);
1380 } else { 1504 } else {
1381 ql_dbg(ql_dbg_mbx, vha, 0x1053, "Done %s.\n", __func__); 1505 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1053,
1506 "Done %s.\n", __func__);
1382 } 1507 }
1383 1508
1384 return rval; 1509 return rval;
@@ -1407,7 +1532,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1407 mbx_cmd_t mc; 1532 mbx_cmd_t mc;
1408 mbx_cmd_t *mcp = &mc; 1533 mbx_cmd_t *mcp = &mc;
1409 1534
1410 ql_dbg(ql_dbg_mbx, vha, 0x1054, "Entered %s.\n", __func__); 1535 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1054,
1536 "Entered %s.\n", __func__);
1411 1537
1412 mcp->mb[0] = MBC_GET_FIRMWARE_STATE; 1538 mcp->mb[0] = MBC_GET_FIRMWARE_STATE;
1413 mcp->out_mb = MBX_0; 1539 mcp->out_mb = MBX_0;
@@ -1433,7 +1559,8 @@ qla2x00_get_firmware_state(scsi_qla_host_t *vha, uint16_t *states)
1433 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval); 1559 ql_dbg(ql_dbg_mbx, vha, 0x1055, "Failed=%x.\n", rval);
1434 } else { 1560 } else {
1435 /*EMPTY*/ 1561 /*EMPTY*/
1436 ql_dbg(ql_dbg_mbx, vha, 0x1056, "Done %s.\n", __func__); 1562 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1056,
1563 "Done %s.\n", __func__);
1437 } 1564 }
1438 1565
1439 return rval; 1566 return rval;
@@ -1465,7 +1592,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1465 mbx_cmd_t mc; 1592 mbx_cmd_t mc;
1466 mbx_cmd_t *mcp = &mc; 1593 mbx_cmd_t *mcp = &mc;
1467 1594
1468 ql_dbg(ql_dbg_mbx, vha, 0x1057, "Entered %s.\n", __func__); 1595 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1057,
1596 "Entered %s.\n", __func__);
1469 1597
1470 mcp->mb[0] = MBC_GET_PORT_NAME; 1598 mcp->mb[0] = MBC_GET_PORT_NAME;
1471 mcp->mb[9] = vha->vp_idx; 1599 mcp->mb[9] = vha->vp_idx;
@@ -1499,7 +1627,8 @@ qla2x00_get_port_name(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t *name,
1499 name[7] = LSB(mcp->mb[7]); 1627 name[7] = LSB(mcp->mb[7]);
1500 } 1628 }
1501 1629
1502 ql_dbg(ql_dbg_mbx, vha, 0x1059, "Done %s.\n", __func__); 1630 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1059,
1631 "Done %s.\n", __func__);
1503 } 1632 }
1504 1633
1505 return rval; 1634 return rval;
@@ -1527,7 +1656,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1527 mbx_cmd_t mc; 1656 mbx_cmd_t mc;
1528 mbx_cmd_t *mcp = &mc; 1657 mbx_cmd_t *mcp = &mc;
1529 1658
1530 ql_dbg(ql_dbg_mbx, vha, 0x105a, "Entered %s.\n", __func__); 1659 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105a,
1660 "Entered %s.\n", __func__);
1531 1661
1532 if (IS_CNA_CAPABLE(vha->hw)) { 1662 if (IS_CNA_CAPABLE(vha->hw)) {
1533 /* Logout across all FCFs. */ 1663 /* Logout across all FCFs. */
@@ -1564,7 +1694,8 @@ qla2x00_lip_reset(scsi_qla_host_t *vha)
1564 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval); 1694 ql_dbg(ql_dbg_mbx, vha, 0x105b, "Failed=%x.\n", rval);
1565 } else { 1695 } else {
1566 /*EMPTY*/ 1696 /*EMPTY*/
1567 ql_dbg(ql_dbg_mbx, vha, 0x105c, "Done %s.\n", __func__); 1697 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105c,
1698 "Done %s.\n", __func__);
1568 } 1699 }
1569 1700
1570 return rval; 1701 return rval;
@@ -1596,9 +1727,10 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1596 mbx_cmd_t mc; 1727 mbx_cmd_t mc;
1597 mbx_cmd_t *mcp = &mc; 1728 mbx_cmd_t *mcp = &mc;
1598 1729
1599 ql_dbg(ql_dbg_mbx, vha, 0x105d, "Entered %s.\n", __func__); 1730 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105d,
1731 "Entered %s.\n", __func__);
1600 1732
1601 ql_dbg(ql_dbg_mbx, vha, 0x105e, 1733 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x105e,
1602 "Retry cnt=%d ratov=%d total tov=%d.\n", 1734 "Retry cnt=%d ratov=%d total tov=%d.\n",
1603 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov); 1735 vha->hw->retry_count, vha->hw->login_timeout, mcp->tov);
1604 1736
@@ -1622,7 +1754,8 @@ qla2x00_send_sns(scsi_qla_host_t *vha, dma_addr_t sns_phys_address,
1622 rval, mcp->mb[0], mcp->mb[1]); 1754 rval, mcp->mb[0], mcp->mb[1]);
1623 } else { 1755 } else {
1624 /*EMPTY*/ 1756 /*EMPTY*/
1625 ql_dbg(ql_dbg_mbx, vha, 0x1060, "Done %s.\n", __func__); 1757 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1060,
1758 "Done %s.\n", __func__);
1626 } 1759 }
1627 1760
1628 return rval; 1761 return rval;
@@ -1641,7 +1774,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1641 struct req_que *req; 1774 struct req_que *req;
1642 struct rsp_que *rsp; 1775 struct rsp_que *rsp;
1643 1776
1644 ql_dbg(ql_dbg_mbx, vha, 0x1061, "Entered %s.\n", __func__); 1777 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1061,
1778 "Entered %s.\n", __func__);
1645 1779
1646 if (ha->flags.cpu_affinity_enabled) 1780 if (ha->flags.cpu_affinity_enabled)
1647 req = ha->req_q_map[0]; 1781 req = ha->req_q_map[0];
@@ -1715,7 +1849,8 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1715 break; 1849 break;
1716 } 1850 }
1717 } else { 1851 } else {
1718 ql_dbg(ql_dbg_mbx, vha, 0x1066, "Done %s.\n", __func__); 1852 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1066,
1853 "Done %s.\n", __func__);
1719 1854
1720 iop[0] = le32_to_cpu(lg->io_parameter[0]); 1855 iop[0] = le32_to_cpu(lg->io_parameter[0]);
1721 1856
@@ -1733,6 +1868,10 @@ qla24xx_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1733 mb[10] |= BIT_0; /* Class 2. */ 1868 mb[10] |= BIT_0; /* Class 2. */
1734 if (lg->io_parameter[9] || lg->io_parameter[10]) 1869 if (lg->io_parameter[9] || lg->io_parameter[10])
1735 mb[10] |= BIT_1; /* Class 3. */ 1870 mb[10] |= BIT_1; /* Class 3. */
1871 if (lg->io_parameter[0] & __constant_cpu_to_le32(BIT_7))
1872 mb[10] |= BIT_7; /* Confirmed Completion
1873 * Allowed
1874 */
1736 } 1875 }
1737 1876
1738 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 1877 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1770,7 +1909,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1770 mbx_cmd_t *mcp = &mc; 1909 mbx_cmd_t *mcp = &mc;
1771 struct qla_hw_data *ha = vha->hw; 1910 struct qla_hw_data *ha = vha->hw;
1772 1911
1773 ql_dbg(ql_dbg_mbx, vha, 0x1067, "Entered %s.\n", __func__); 1912 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1067,
1913 "Entered %s.\n", __func__);
1774 1914
1775 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT; 1915 mcp->mb[0] = MBC_LOGIN_FABRIC_PORT;
1776 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0; 1916 mcp->out_mb = MBX_3|MBX_2|MBX_1|MBX_0;
@@ -1818,7 +1958,8 @@ qla2x00_login_fabric(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1818 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 1958 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
1819 } else { 1959 } else {
1820 /*EMPTY*/ 1960 /*EMPTY*/
1821 ql_dbg(ql_dbg_mbx, vha, 0x1069, "Done %s.\n", __func__); 1961 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1069,
1962 "Done %s.\n", __func__);
1822 } 1963 }
1823 1964
1824 return rval; 1965 return rval;
@@ -1849,7 +1990,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1849 mbx_cmd_t *mcp = &mc; 1990 mbx_cmd_t *mcp = &mc;
1850 struct qla_hw_data *ha = vha->hw; 1991 struct qla_hw_data *ha = vha->hw;
1851 1992
1852 ql_dbg(ql_dbg_mbx, vha, 0x106a, "Entered %s.\n", __func__); 1993 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106a,
1994 "Entered %s.\n", __func__);
1853 1995
1854 if (IS_FWI2_CAPABLE(ha)) 1996 if (IS_FWI2_CAPABLE(ha))
1855 return qla24xx_login_fabric(vha, fcport->loop_id, 1997 return qla24xx_login_fabric(vha, fcport->loop_id,
@@ -1891,7 +2033,8 @@ qla2x00_login_local_device(scsi_qla_host_t *vha, fc_port_t *fcport,
1891 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]); 2033 rval, mcp->mb[0], mcp->mb[1], mcp->mb[6], mcp->mb[7]);
1892 } else { 2034 } else {
1893 /*EMPTY*/ 2035 /*EMPTY*/
1894 ql_dbg(ql_dbg_mbx, vha, 0x106c, "Done %s.\n", __func__); 2036 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106c,
2037 "Done %s.\n", __func__);
1895 } 2038 }
1896 2039
1897 return (rval); 2040 return (rval);
@@ -1908,7 +2051,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1908 struct req_que *req; 2051 struct req_que *req;
1909 struct rsp_que *rsp; 2052 struct rsp_que *rsp;
1910 2053
1911 ql_dbg(ql_dbg_mbx, vha, 0x106d, "Entered %s.\n", __func__); 2054 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x106d,
2055 "Entered %s.\n", __func__);
1912 2056
1913 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma); 2057 lg = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &lg_dma);
1914 if (lg == NULL) { 2058 if (lg == NULL) {
@@ -1952,7 +2096,8 @@ qla24xx_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1952 le32_to_cpu(lg->io_parameter[1])); 2096 le32_to_cpu(lg->io_parameter[1]));
1953 } else { 2097 } else {
1954 /*EMPTY*/ 2098 /*EMPTY*/
1955 ql_dbg(ql_dbg_mbx, vha, 0x1072, "Done %s.\n", __func__); 2099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1072,
2100 "Done %s.\n", __func__);
1956 } 2101 }
1957 2102
1958 dma_pool_free(ha->s_dma_pool, lg, lg_dma); 2103 dma_pool_free(ha->s_dma_pool, lg, lg_dma);
@@ -1984,7 +2129,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
1984 mbx_cmd_t mc; 2129 mbx_cmd_t mc;
1985 mbx_cmd_t *mcp = &mc; 2130 mbx_cmd_t *mcp = &mc;
1986 2131
1987 ql_dbg(ql_dbg_mbx, vha, 0x1073, "Entered %s.\n", __func__); 2132 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1073,
2133 "Entered %s.\n", __func__);
1988 2134
1989 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT; 2135 mcp->mb[0] = MBC_LOGOUT_FABRIC_PORT;
1990 mcp->out_mb = MBX_1|MBX_0; 2136 mcp->out_mb = MBX_1|MBX_0;
@@ -2007,7 +2153,8 @@ qla2x00_fabric_logout(scsi_qla_host_t *vha, uint16_t loop_id, uint8_t domain,
2007 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]); 2153 "Failed=%x mb[1]=%x.\n", rval, mcp->mb[1]);
2008 } else { 2154 } else {
2009 /*EMPTY*/ 2155 /*EMPTY*/
2010 ql_dbg(ql_dbg_mbx, vha, 0x1075, "Done %s.\n", __func__); 2156 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1075,
2157 "Done %s.\n", __func__);
2011 } 2158 }
2012 2159
2013 return rval; 2160 return rval;
@@ -2035,7 +2182,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
2035 mbx_cmd_t mc; 2182 mbx_cmd_t mc;
2036 mbx_cmd_t *mcp = &mc; 2183 mbx_cmd_t *mcp = &mc;
2037 2184
2038 ql_dbg(ql_dbg_mbx, vha, 0x1076, "Entered %s.\n", __func__); 2185 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1076,
2186 "Entered %s.\n", __func__);
2039 2187
2040 mcp->mb[0] = MBC_LIP_FULL_LOGIN; 2188 mcp->mb[0] = MBC_LIP_FULL_LOGIN;
2041 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0; 2189 mcp->mb[1] = IS_FWI2_CAPABLE(vha->hw) ? BIT_3 : 0;
@@ -2052,7 +2200,8 @@ qla2x00_full_login_lip(scsi_qla_host_t *vha)
2052 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval); 2200 ql_dbg(ql_dbg_mbx, vha, 0x1077, "Failed=%x.\n", rval);
2053 } else { 2201 } else {
2054 /*EMPTY*/ 2202 /*EMPTY*/
2055 ql_dbg(ql_dbg_mbx, vha, 0x1078, "Done %s.\n", __func__); 2203 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1078,
2204 "Done %s.\n", __func__);
2056 } 2205 }
2057 2206
2058 return rval; 2207 return rval;
@@ -2078,7 +2227,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2078 mbx_cmd_t mc; 2227 mbx_cmd_t mc;
2079 mbx_cmd_t *mcp = &mc; 2228 mbx_cmd_t *mcp = &mc;
2080 2229
2081 ql_dbg(ql_dbg_mbx, vha, 0x1079, "Entered %s.\n", __func__); 2230 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1079,
2231 "Entered %s.\n", __func__);
2082 2232
2083 if (id_list == NULL) 2233 if (id_list == NULL)
2084 return QLA_FUNCTION_FAILED; 2234 return QLA_FUNCTION_FAILED;
@@ -2110,7 +2260,8 @@ qla2x00_get_id_list(scsi_qla_host_t *vha, void *id_list, dma_addr_t id_list_dma,
2110 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval); 2260 ql_dbg(ql_dbg_mbx, vha, 0x107a, "Failed=%x.\n", rval);
2111 } else { 2261 } else {
2112 *entries = mcp->mb[1]; 2262 *entries = mcp->mb[1];
2113 ql_dbg(ql_dbg_mbx, vha, 0x107b, "Done %s.\n", __func__); 2263 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107b,
2264 "Done %s.\n", __func__);
2114 } 2265 }
2115 2266
2116 return rval; 2267 return rval;
@@ -2138,7 +2289,8 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2138 mbx_cmd_t mc; 2289 mbx_cmd_t mc;
2139 mbx_cmd_t *mcp = &mc; 2290 mbx_cmd_t *mcp = &mc;
2140 2291
2141 ql_dbg(ql_dbg_mbx, vha, 0x107c, "Entered %s.\n", __func__); 2292 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107c,
2293 "Entered %s.\n", __func__);
2142 2294
2143 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS; 2295 mcp->mb[0] = MBC_GET_RESOURCE_COUNTS;
2144 mcp->out_mb = MBX_0; 2296 mcp->out_mb = MBX_0;
@@ -2154,7 +2306,7 @@ qla2x00_get_resource_cnts(scsi_qla_host_t *vha, uint16_t *cur_xchg_cnt,
2154 ql_dbg(ql_dbg_mbx, vha, 0x107d, 2306 ql_dbg(ql_dbg_mbx, vha, 0x107d,
2155 "Failed mb[0]=%x.\n", mcp->mb[0]); 2307 "Failed mb[0]=%x.\n", mcp->mb[0]);
2156 } else { 2308 } else {
2157 ql_dbg(ql_dbg_mbx, vha, 0x107e, 2309 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107e,
2158 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x " 2310 "Done %s mb1=%x mb2=%x mb3=%x mb6=%x mb7=%x mb10=%x "
2159 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2], 2311 "mb11=%x mb12=%x.\n", __func__, mcp->mb[1], mcp->mb[2],
2160 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10], 2312 mcp->mb[3], mcp->mb[6], mcp->mb[7], mcp->mb[10],
@@ -2201,7 +2353,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2201 dma_addr_t pmap_dma; 2353 dma_addr_t pmap_dma;
2202 struct qla_hw_data *ha = vha->hw; 2354 struct qla_hw_data *ha = vha->hw;
2203 2355
2204 ql_dbg(ql_dbg_mbx, vha, 0x107f, "Entered %s.\n", __func__); 2356 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x107f,
2357 "Entered %s.\n", __func__);
2205 2358
2206 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma); 2359 pmap = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &pmap_dma);
2207 if (pmap == NULL) { 2360 if (pmap == NULL) {
@@ -2224,7 +2377,7 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2224 rval = qla2x00_mailbox_command(vha, mcp); 2377 rval = qla2x00_mailbox_command(vha, mcp);
2225 2378
2226 if (rval == QLA_SUCCESS) { 2379 if (rval == QLA_SUCCESS) {
2227 ql_dbg(ql_dbg_mbx, vha, 0x1081, 2380 ql_dbg(ql_dbg_mbx + ql_dbg_buffer, vha, 0x1081,
2228 "mb0/mb1=%x/%X FC/AL position map size (%x).\n", 2381 "mb0/mb1=%x/%X FC/AL position map size (%x).\n",
2229 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]); 2382 mcp->mb[0], mcp->mb[1], (unsigned)pmap[0]);
2230 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d, 2383 ql_dump_buffer(ql_dbg_mbx + ql_dbg_buffer, vha, 0x111d,
@@ -2238,7 +2391,8 @@ qla2x00_get_fcal_position_map(scsi_qla_host_t *vha, char *pos_map)
2238 if (rval != QLA_SUCCESS) { 2391 if (rval != QLA_SUCCESS) {
2239 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval); 2392 ql_dbg(ql_dbg_mbx, vha, 0x1082, "Failed=%x.\n", rval);
2240 } else { 2393 } else {
2241 ql_dbg(ql_dbg_mbx, vha, 0x1083, "Done %s.\n", __func__); 2394 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1083,
2395 "Done %s.\n", __func__);
2242 } 2396 }
2243 2397
2244 return rval; 2398 return rval;
@@ -2267,7 +2421,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2267 uint32_t *siter, *diter, dwords; 2421 uint32_t *siter, *diter, dwords;
2268 struct qla_hw_data *ha = vha->hw; 2422 struct qla_hw_data *ha = vha->hw;
2269 2423
2270 ql_dbg(ql_dbg_mbx, vha, 0x1084, "Entered %s.\n", __func__); 2424 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1084,
2425 "Entered %s.\n", __func__);
2271 2426
2272 mcp->mb[0] = MBC_GET_LINK_STATUS; 2427 mcp->mb[0] = MBC_GET_LINK_STATUS;
2273 mcp->mb[2] = MSW(stats_dma); 2428 mcp->mb[2] = MSW(stats_dma);
@@ -2301,7 +2456,8 @@ qla2x00_get_link_status(scsi_qla_host_t *vha, uint16_t loop_id,
2301 rval = QLA_FUNCTION_FAILED; 2456 rval = QLA_FUNCTION_FAILED;
2302 } else { 2457 } else {
2303 /* Copy over data -- firmware data is LE. */ 2458 /* Copy over data -- firmware data is LE. */
2304 ql_dbg(ql_dbg_mbx, vha, 0x1086, "Done %s.\n", __func__); 2459 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1086,
2460 "Done %s.\n", __func__);
2305 dwords = offsetof(struct link_statistics, unused1) / 4; 2461 dwords = offsetof(struct link_statistics, unused1) / 4;
2306 siter = diter = &stats->link_fail_cnt; 2462 siter = diter = &stats->link_fail_cnt;
2307 while (dwords--) 2463 while (dwords--)
@@ -2324,7 +2480,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2324 mbx_cmd_t *mcp = &mc; 2480 mbx_cmd_t *mcp = &mc;
2325 uint32_t *siter, *diter, dwords; 2481 uint32_t *siter, *diter, dwords;
2326 2482
2327 ql_dbg(ql_dbg_mbx, vha, 0x1088, "Entered %s.\n", __func__); 2483 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1088,
2484 "Entered %s.\n", __func__);
2328 2485
2329 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS; 2486 mcp->mb[0] = MBC_GET_LINK_PRIV_STATS;
2330 mcp->mb[2] = MSW(stats_dma); 2487 mcp->mb[2] = MSW(stats_dma);
@@ -2346,7 +2503,8 @@ qla24xx_get_isp_stats(scsi_qla_host_t *vha, struct link_statistics *stats,
2346 "Failed mb[0]=%x.\n", mcp->mb[0]); 2503 "Failed mb[0]=%x.\n", mcp->mb[0]);
2347 rval = QLA_FUNCTION_FAILED; 2504 rval = QLA_FUNCTION_FAILED;
2348 } else { 2505 } else {
2349 ql_dbg(ql_dbg_mbx, vha, 0x108a, "Done %s.\n", __func__); 2506 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108a,
2507 "Done %s.\n", __func__);
2350 /* Copy over data -- firmware data is LE. */ 2508 /* Copy over data -- firmware data is LE. */
2351 dwords = sizeof(struct link_statistics) / 4; 2509 dwords = sizeof(struct link_statistics) / 4;
2352 siter = diter = &stats->link_fail_cnt; 2510 siter = diter = &stats->link_fail_cnt;
@@ -2375,7 +2533,8 @@ qla24xx_abort_command(srb_t *sp)
2375 struct qla_hw_data *ha = vha->hw; 2533 struct qla_hw_data *ha = vha->hw;
2376 struct req_que *req = vha->req; 2534 struct req_que *req = vha->req;
2377 2535
2378 ql_dbg(ql_dbg_mbx, vha, 0x108c, "Entered %s.\n", __func__); 2536 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x108c,
2537 "Entered %s.\n", __func__);
2379 2538
2380 spin_lock_irqsave(&ha->hardware_lock, flags); 2539 spin_lock_irqsave(&ha->hardware_lock, flags);
2381 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) { 2540 for (handle = 1; handle < MAX_OUTSTANDING_COMMANDS; handle++) {
@@ -2404,7 +2563,7 @@ qla24xx_abort_command(srb_t *sp)
2404 abt->port_id[0] = fcport->d_id.b.al_pa; 2563 abt->port_id[0] = fcport->d_id.b.al_pa;
2405 abt->port_id[1] = fcport->d_id.b.area; 2564 abt->port_id[1] = fcport->d_id.b.area;
2406 abt->port_id[2] = fcport->d_id.b.domain; 2565 abt->port_id[2] = fcport->d_id.b.domain;
2407 abt->vp_index = fcport->vp_idx; 2566 abt->vp_index = fcport->vha->vp_idx;
2408 2567
2409 abt->req_que_no = cpu_to_le16(req->id); 2568 abt->req_que_no = cpu_to_le16(req->id);
2410 2569
@@ -2423,7 +2582,8 @@ qla24xx_abort_command(srb_t *sp)
2423 le16_to_cpu(abt->nport_handle)); 2582 le16_to_cpu(abt->nport_handle));
2424 rval = QLA_FUNCTION_FAILED; 2583 rval = QLA_FUNCTION_FAILED;
2425 } else { 2584 } else {
2426 ql_dbg(ql_dbg_mbx, vha, 0x1091, "Done %s.\n", __func__); 2585 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1091,
2586 "Done %s.\n", __func__);
2427 } 2587 }
2428 2588
2429 dma_pool_free(ha->s_dma_pool, abt, abt_dma); 2589 dma_pool_free(ha->s_dma_pool, abt, abt_dma);
@@ -2455,7 +2615,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2455 ha = vha->hw; 2615 ha = vha->hw;
2456 req = vha->req; 2616 req = vha->req;
2457 2617
2458 ql_dbg(ql_dbg_mbx, vha, 0x1092, "Entered %s.\n", __func__); 2618 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1092,
2619 "Entered %s.\n", __func__);
2459 2620
2460 if (ha->flags.cpu_affinity_enabled) 2621 if (ha->flags.cpu_affinity_enabled)
2461 rsp = ha->rsp_q_map[tag + 1]; 2622 rsp = ha->rsp_q_map[tag + 1];
@@ -2478,7 +2639,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2478 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa; 2639 tsk->p.tsk.port_id[0] = fcport->d_id.b.al_pa;
2479 tsk->p.tsk.port_id[1] = fcport->d_id.b.area; 2640 tsk->p.tsk.port_id[1] = fcport->d_id.b.area;
2480 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain; 2641 tsk->p.tsk.port_id[2] = fcport->d_id.b.domain;
2481 tsk->p.tsk.vp_index = fcport->vp_idx; 2642 tsk->p.tsk.vp_index = fcport->vha->vp_idx;
2482 if (type == TCF_LUN_RESET) { 2643 if (type == TCF_LUN_RESET) {
2483 int_to_scsilun(l, &tsk->p.tsk.lun); 2644 int_to_scsilun(l, &tsk->p.tsk.lun);
2484 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun, 2645 host_to_fcp_swap((uint8_t *)&tsk->p.tsk.lun,
@@ -2504,7 +2665,7 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2504 } else if (le16_to_cpu(sts->scsi_status) & 2665 } else if (le16_to_cpu(sts->scsi_status) &
2505 SS_RESPONSE_INFO_LEN_VALID) { 2666 SS_RESPONSE_INFO_LEN_VALID) {
2506 if (le32_to_cpu(sts->rsp_data_len) < 4) { 2667 if (le32_to_cpu(sts->rsp_data_len) < 4) {
2507 ql_dbg(ql_dbg_mbx, vha, 0x1097, 2668 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1097,
2508 "Ignoring inconsistent data length -- not enough " 2669 "Ignoring inconsistent data length -- not enough "
2509 "response info (%d).\n", 2670 "response info (%d).\n",
2510 le32_to_cpu(sts->rsp_data_len)); 2671 le32_to_cpu(sts->rsp_data_len));
@@ -2523,7 +2684,8 @@ __qla24xx_issue_tmf(char *name, uint32_t type, struct fc_port *fcport,
2523 ql_dbg(ql_dbg_mbx, vha, 0x1099, 2684 ql_dbg(ql_dbg_mbx, vha, 0x1099,
2524 "Failed to issue marker IOCB (%x).\n", rval2); 2685 "Failed to issue marker IOCB (%x).\n", rval2);
2525 } else { 2686 } else {
2526 ql_dbg(ql_dbg_mbx, vha, 0x109a, "Done %s.\n", __func__); 2687 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109a,
2688 "Done %s.\n", __func__);
2527 } 2689 }
2528 2690
2529 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma); 2691 dma_pool_free(ha->s_dma_pool, tsk, tsk_dma);
@@ -2564,7 +2726,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2564 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha)) 2726 if (!IS_QLA23XX(ha) && !IS_FWI2_CAPABLE(ha))
2565 return QLA_FUNCTION_FAILED; 2727 return QLA_FUNCTION_FAILED;
2566 2728
2567 ql_dbg(ql_dbg_mbx, vha, 0x109b, "Entered %s.\n", __func__); 2729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109b,
2730 "Entered %s.\n", __func__);
2568 2731
2569 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR; 2732 mcp->mb[0] = MBC_GEN_SYSTEM_ERROR;
2570 mcp->out_mb = MBX_0; 2733 mcp->out_mb = MBX_0;
@@ -2576,7 +2739,8 @@ qla2x00_system_error(scsi_qla_host_t *vha)
2576 if (rval != QLA_SUCCESS) { 2739 if (rval != QLA_SUCCESS) {
2577 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval); 2740 ql_dbg(ql_dbg_mbx, vha, 0x109c, "Failed=%x.\n", rval);
2578 } else { 2741 } else {
2579 ql_dbg(ql_dbg_mbx, vha, 0x109d, "Done %s.\n", __func__); 2742 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109d,
2743 "Done %s.\n", __func__);
2580 } 2744 }
2581 2745
2582 return rval; 2746 return rval;
@@ -2596,7 +2760,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2596 mbx_cmd_t mc; 2760 mbx_cmd_t mc;
2597 mbx_cmd_t *mcp = &mc; 2761 mbx_cmd_t *mcp = &mc;
2598 2762
2599 ql_dbg(ql_dbg_mbx, vha, 0x109e, "Entered %s.\n", __func__); 2763 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x109e,
2764 "Entered %s.\n", __func__);
2600 2765
2601 mcp->mb[0] = MBC_SERDES_PARAMS; 2766 mcp->mb[0] = MBC_SERDES_PARAMS;
2602 mcp->mb[1] = BIT_0; 2767 mcp->mb[1] = BIT_0;
@@ -2615,7 +2780,8 @@ qla2x00_set_serdes_params(scsi_qla_host_t *vha, uint16_t sw_em_1g,
2615 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 2780 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
2616 } else { 2781 } else {
2617 /*EMPTY*/ 2782 /*EMPTY*/
2618 ql_dbg(ql_dbg_mbx, vha, 0x10a0, "Done %s.\n", __func__); 2783 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a0,
2784 "Done %s.\n", __func__);
2619 } 2785 }
2620 2786
2621 return rval; 2787 return rval;
@@ -2631,7 +2797,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2631 if (!IS_FWI2_CAPABLE(vha->hw)) 2797 if (!IS_FWI2_CAPABLE(vha->hw))
2632 return QLA_FUNCTION_FAILED; 2798 return QLA_FUNCTION_FAILED;
2633 2799
2634 ql_dbg(ql_dbg_mbx, vha, 0x10a1, "Entered %s.\n", __func__); 2800 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a1,
2801 "Entered %s.\n", __func__);
2635 2802
2636 mcp->mb[0] = MBC_STOP_FIRMWARE; 2803 mcp->mb[0] = MBC_STOP_FIRMWARE;
2637 mcp->mb[1] = 0; 2804 mcp->mb[1] = 0;
@@ -2646,7 +2813,8 @@ qla2x00_stop_firmware(scsi_qla_host_t *vha)
2646 if (mcp->mb[0] == MBS_INVALID_COMMAND) 2813 if (mcp->mb[0] == MBS_INVALID_COMMAND)
2647 rval = QLA_INVALID_COMMAND; 2814 rval = QLA_INVALID_COMMAND;
2648 } else { 2815 } else {
2649 ql_dbg(ql_dbg_mbx, vha, 0x10a3, "Done %s.\n", __func__); 2816 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a3,
2817 "Done %s.\n", __func__);
2650 } 2818 }
2651 2819
2652 return rval; 2820 return rval;
@@ -2660,7 +2828,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2660 mbx_cmd_t mc; 2828 mbx_cmd_t mc;
2661 mbx_cmd_t *mcp = &mc; 2829 mbx_cmd_t *mcp = &mc;
2662 2830
2663 ql_dbg(ql_dbg_mbx, vha, 0x10a4, "Entered %s.\n", __func__); 2831 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a4,
2832 "Entered %s.\n", __func__);
2664 2833
2665 if (!IS_FWI2_CAPABLE(vha->hw)) 2834 if (!IS_FWI2_CAPABLE(vha->hw))
2666 return QLA_FUNCTION_FAILED; 2835 return QLA_FUNCTION_FAILED;
@@ -2686,7 +2855,8 @@ qla2x00_enable_eft_trace(scsi_qla_host_t *vha, dma_addr_t eft_dma,
2686 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2855 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2687 rval, mcp->mb[0], mcp->mb[1]); 2856 rval, mcp->mb[0], mcp->mb[1]);
2688 } else { 2857 } else {
2689 ql_dbg(ql_dbg_mbx, vha, 0x10a6, "Done %s.\n", __func__); 2858 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a6,
2859 "Done %s.\n", __func__);
2690 } 2860 }
2691 2861
2692 return rval; 2862 return rval;
@@ -2699,7 +2869,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2699 mbx_cmd_t mc; 2869 mbx_cmd_t mc;
2700 mbx_cmd_t *mcp = &mc; 2870 mbx_cmd_t *mcp = &mc;
2701 2871
2702 ql_dbg(ql_dbg_mbx, vha, 0x10a7, "Entered %s.\n", __func__); 2872 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a7,
2873 "Entered %s.\n", __func__);
2703 2874
2704 if (!IS_FWI2_CAPABLE(vha->hw)) 2875 if (!IS_FWI2_CAPABLE(vha->hw))
2705 return QLA_FUNCTION_FAILED; 2876 return QLA_FUNCTION_FAILED;
@@ -2719,7 +2890,8 @@ qla2x00_disable_eft_trace(scsi_qla_host_t *vha)
2719 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2890 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2720 rval, mcp->mb[0], mcp->mb[1]); 2891 rval, mcp->mb[0], mcp->mb[1]);
2721 } else { 2892 } else {
2722 ql_dbg(ql_dbg_mbx, vha, 0x10a9, "Done %s.\n", __func__); 2893 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10a9,
2894 "Done %s.\n", __func__);
2723 } 2895 }
2724 2896
2725 return rval; 2897 return rval;
@@ -2733,7 +2905,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2733 mbx_cmd_t mc; 2905 mbx_cmd_t mc;
2734 mbx_cmd_t *mcp = &mc; 2906 mbx_cmd_t *mcp = &mc;
2735 2907
2736 ql_dbg(ql_dbg_mbx, vha, 0x10aa, "Entered %s.\n", __func__); 2908 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10aa,
2909 "Entered %s.\n", __func__);
2737 2910
2738 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) && 2911 if (!IS_QLA25XX(vha->hw) && !IS_QLA81XX(vha->hw) &&
2739 !IS_QLA83XX(vha->hw)) 2912 !IS_QLA83XX(vha->hw))
@@ -2764,7 +2937,8 @@ qla2x00_enable_fce_trace(scsi_qla_host_t *vha, dma_addr_t fce_dma,
2764 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2937 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2765 rval, mcp->mb[0], mcp->mb[1]); 2938 rval, mcp->mb[0], mcp->mb[1]);
2766 } else { 2939 } else {
2767 ql_dbg(ql_dbg_mbx, vha, 0x10ac, "Done %s.\n", __func__); 2940 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ac,
2941 "Done %s.\n", __func__);
2768 2942
2769 if (mb) 2943 if (mb)
2770 memcpy(mb, mcp->mb, 8 * sizeof(*mb)); 2944 memcpy(mb, mcp->mb, 8 * sizeof(*mb));
@@ -2782,7 +2956,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2782 mbx_cmd_t mc; 2956 mbx_cmd_t mc;
2783 mbx_cmd_t *mcp = &mc; 2957 mbx_cmd_t *mcp = &mc;
2784 2958
2785 ql_dbg(ql_dbg_mbx, vha, 0x10ad, "Entered %s.\n", __func__); 2959 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ad,
2960 "Entered %s.\n", __func__);
2786 2961
2787 if (!IS_FWI2_CAPABLE(vha->hw)) 2962 if (!IS_FWI2_CAPABLE(vha->hw))
2788 return QLA_FUNCTION_FAILED; 2963 return QLA_FUNCTION_FAILED;
@@ -2804,7 +2979,8 @@ qla2x00_disable_fce_trace(scsi_qla_host_t *vha, uint64_t *wr, uint64_t *rd)
2804 "Failed=%x mb[0]=%x mb[1]=%x.\n", 2979 "Failed=%x mb[0]=%x mb[1]=%x.\n",
2805 rval, mcp->mb[0], mcp->mb[1]); 2980 rval, mcp->mb[0], mcp->mb[1]);
2806 } else { 2981 } else {
2807 ql_dbg(ql_dbg_mbx, vha, 0x10af, "Done %s.\n", __func__); 2982 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10af,
2983 "Done %s.\n", __func__);
2808 2984
2809 if (wr) 2985 if (wr)
2810 *wr = (uint64_t) mcp->mb[5] << 48 | 2986 *wr = (uint64_t) mcp->mb[5] << 48 |
@@ -2829,7 +3005,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2829 mbx_cmd_t mc; 3005 mbx_cmd_t mc;
2830 mbx_cmd_t *mcp = &mc; 3006 mbx_cmd_t *mcp = &mc;
2831 3007
2832 ql_dbg(ql_dbg_mbx, vha, 0x10b0, "Entered %s.\n", __func__); 3008 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b0,
3009 "Entered %s.\n", __func__);
2833 3010
2834 if (!IS_IIDMA_CAPABLE(vha->hw)) 3011 if (!IS_IIDMA_CAPABLE(vha->hw))
2835 return QLA_FUNCTION_FAILED; 3012 return QLA_FUNCTION_FAILED;
@@ -2854,7 +3031,8 @@ qla2x00_get_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2854 if (rval != QLA_SUCCESS) { 3031 if (rval != QLA_SUCCESS) {
2855 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval); 3032 ql_dbg(ql_dbg_mbx, vha, 0x10b1, "Failed=%x.\n", rval);
2856 } else { 3033 } else {
2857 ql_dbg(ql_dbg_mbx, vha, 0x10b2, "Done %s.\n", __func__); 3034 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b2,
3035 "Done %s.\n", __func__);
2858 if (port_speed) 3036 if (port_speed)
2859 *port_speed = mcp->mb[3]; 3037 *port_speed = mcp->mb[3];
2860 } 3038 }
@@ -2870,7 +3048,8 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2870 mbx_cmd_t mc; 3048 mbx_cmd_t mc;
2871 mbx_cmd_t *mcp = &mc; 3049 mbx_cmd_t *mcp = &mc;
2872 3050
2873 ql_dbg(ql_dbg_mbx, vha, 0x10b3, "Entered %s.\n", __func__); 3051 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b3,
3052 "Entered %s.\n", __func__);
2874 3053
2875 if (!IS_IIDMA_CAPABLE(vha->hw)) 3054 if (!IS_IIDMA_CAPABLE(vha->hw))
2876 return QLA_FUNCTION_FAILED; 3055 return QLA_FUNCTION_FAILED;
@@ -2897,9 +3076,11 @@ qla2x00_set_idma_speed(scsi_qla_host_t *vha, uint16_t loop_id,
2897 } 3076 }
2898 3077
2899 if (rval != QLA_SUCCESS) { 3078 if (rval != QLA_SUCCESS) {
2900 ql_dbg(ql_dbg_mbx, vha, 0x10b4, "Failed=%x.\n", rval); 3079 ql_dbg(ql_dbg_mbx, vha, 0x10b4,
3080 "Failed=%x.\n", rval);
2901 } else { 3081 } else {
2902 ql_dbg(ql_dbg_mbx, vha, 0x10b5, "Done %s.\n", __func__); 3082 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b5,
3083 "Done %s.\n", __func__);
2903 } 3084 }
2904 3085
2905 return rval; 3086 return rval;
@@ -2915,24 +3096,25 @@ qla24xx_report_id_acquisition(scsi_qla_host_t *vha,
2915 scsi_qla_host_t *vp; 3096 scsi_qla_host_t *vp;
2916 unsigned long flags; 3097 unsigned long flags;
2917 3098
2918 ql_dbg(ql_dbg_mbx, vha, 0x10b6, "Entered %s.\n", __func__); 3099 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b6,
3100 "Entered %s.\n", __func__);
2919 3101
2920 if (rptid_entry->entry_status != 0) 3102 if (rptid_entry->entry_status != 0)
2921 return; 3103 return;
2922 3104
2923 if (rptid_entry->format == 0) { 3105 if (rptid_entry->format == 0) {
2924 ql_dbg(ql_dbg_mbx, vha, 0x10b7, 3106 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b7,
2925 "Format 0 : Number of VPs setup %d, number of " 3107 "Format 0 : Number of VPs setup %d, number of "
2926 "VPs acquired %d.\n", 3108 "VPs acquired %d.\n",
2927 MSB(le16_to_cpu(rptid_entry->vp_count)), 3109 MSB(le16_to_cpu(rptid_entry->vp_count)),
2928 LSB(le16_to_cpu(rptid_entry->vp_count))); 3110 LSB(le16_to_cpu(rptid_entry->vp_count)));
2929 ql_dbg(ql_dbg_mbx, vha, 0x10b8, 3111 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b8,
2930 "Primary port id %02x%02x%02x.\n", 3112 "Primary port id %02x%02x%02x.\n",
2931 rptid_entry->port_id[2], rptid_entry->port_id[1], 3113 rptid_entry->port_id[2], rptid_entry->port_id[1],
2932 rptid_entry->port_id[0]); 3114 rptid_entry->port_id[0]);
2933 } else if (rptid_entry->format == 1) { 3115 } else if (rptid_entry->format == 1) {
2934 vp_idx = LSB(stat); 3116 vp_idx = LSB(stat);
2935 ql_dbg(ql_dbg_mbx, vha, 0x10b9, 3117 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10b9,
2936 "Format 1: VP[%d] enabled - status %d - with " 3118 "Format 1: VP[%d] enabled - status %d - with "
2937 "port id %02x%02x%02x.\n", vp_idx, MSB(stat), 3119 "port id %02x%02x%02x.\n", vp_idx, MSB(stat),
2938 rptid_entry->port_id[2], rptid_entry->port_id[1], 3120 rptid_entry->port_id[2], rptid_entry->port_id[1],
@@ -2999,7 +3181,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
2999 3181
3000 /* This can be called by the parent */ 3182 /* This can be called by the parent */
3001 3183
3002 ql_dbg(ql_dbg_mbx, vha, 0x10bb, "Entered %s.\n", __func__); 3184 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10bb,
3185 "Entered %s.\n", __func__);
3003 3186
3004 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma); 3187 vpmod = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &vpmod_dma);
3005 if (!vpmod) { 3188 if (!vpmod) {
@@ -3015,6 +3198,9 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3015 vpmod->vp_count = 1; 3198 vpmod->vp_count = 1;
3016 vpmod->vp_index1 = vha->vp_idx; 3199 vpmod->vp_index1 = vha->vp_idx;
3017 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5; 3200 vpmod->options_idx1 = BIT_3|BIT_4|BIT_5;
3201
3202 qlt_modify_vp_config(vha, vpmod);
3203
3018 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE); 3204 memcpy(vpmod->node_name_idx1, vha->node_name, WWN_SIZE);
3019 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE); 3205 memcpy(vpmod->port_name_idx1, vha->port_name, WWN_SIZE);
3020 vpmod->entry_count = 1; 3206 vpmod->entry_count = 1;
@@ -3035,7 +3221,8 @@ qla24xx_modify_vp_config(scsi_qla_host_t *vha)
3035 rval = QLA_FUNCTION_FAILED; 3221 rval = QLA_FUNCTION_FAILED;
3036 } else { 3222 } else {
3037 /* EMPTY */ 3223 /* EMPTY */
3038 ql_dbg(ql_dbg_mbx, vha, 0x10c0, "Done %s.\n", __func__); 3224 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c0,
3225 "Done %s.\n", __func__);
3039 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING); 3226 fc_vport_set_state(vha->fc_vport, FC_VPORT_INITIALIZING);
3040 } 3227 }
3041 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma); 3228 dma_pool_free(ha->s_dma_pool, vpmod, vpmod_dma);
@@ -3069,7 +3256,7 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3069 int vp_index = vha->vp_idx; 3256 int vp_index = vha->vp_idx;
3070 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); 3257 struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev);
3071 3258
3072 ql_dbg(ql_dbg_mbx, vha, 0x10c1, 3259 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c1,
3073 "Entered %s enabling index %d.\n", __func__, vp_index); 3260 "Entered %s enabling index %d.\n", __func__, vp_index);
3074 3261
3075 if (vp_index == 0 || vp_index >= ha->max_npiv_vports) 3262 if (vp_index == 0 || vp_index >= ha->max_npiv_vports)
@@ -3112,7 +3299,8 @@ qla24xx_control_vp(scsi_qla_host_t *vha, int cmd)
3112 le16_to_cpu(vce->comp_status)); 3299 le16_to_cpu(vce->comp_status));
3113 rval = QLA_FUNCTION_FAILED; 3300 rval = QLA_FUNCTION_FAILED;
3114 } else { 3301 } else {
3115 ql_dbg(ql_dbg_mbx, vha, 0x10c6, "Done %s.\n", __func__); 3302 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c6,
3303 "Done %s.\n", __func__);
3116 } 3304 }
3117 3305
3118 dma_pool_free(ha->s_dma_pool, vce, vce_dma); 3306 dma_pool_free(ha->s_dma_pool, vce, vce_dma);
@@ -3149,14 +3337,8 @@ qla2x00_send_change_request(scsi_qla_host_t *vha, uint16_t format,
3149 mbx_cmd_t mc; 3337 mbx_cmd_t mc;
3150 mbx_cmd_t *mcp = &mc; 3338 mbx_cmd_t *mcp = &mc;
3151 3339
3152 ql_dbg(ql_dbg_mbx, vha, 0x10c7, "Entered %s.\n", __func__); 3340 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c7,
3153 3341 "Entered %s.\n", __func__);
3154 /*
3155 * This command is implicitly executed by firmware during login for the
3156 * physical hosts
3157 */
3158 if (vp_idx == 0)
3159 return QLA_FUNCTION_FAILED;
3160 3342
3161 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST; 3343 mcp->mb[0] = MBC_SEND_CHANGE_REQUEST;
3162 mcp->mb[1] = format; 3344 mcp->mb[1] = format;
@@ -3185,7 +3367,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3185 mbx_cmd_t mc; 3367 mbx_cmd_t mc;
3186 mbx_cmd_t *mcp = &mc; 3368 mbx_cmd_t *mcp = &mc;
3187 3369
3188 ql_dbg(ql_dbg_mbx, vha, 0x1009, "Entered %s.\n", __func__); 3370 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1009,
3371 "Entered %s.\n", __func__);
3189 3372
3190 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) { 3373 if (MSW(addr) || IS_FWI2_CAPABLE(vha->hw)) {
3191 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED; 3374 mcp->mb[0] = MBC_DUMP_RISC_RAM_EXTENDED;
@@ -3219,7 +3402,8 @@ qla2x00_dump_ram(scsi_qla_host_t *vha, dma_addr_t req_dma, uint32_t addr,
3219 ql_dbg(ql_dbg_mbx, vha, 0x1008, 3402 ql_dbg(ql_dbg_mbx, vha, 0x1008,
3220 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3403 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3221 } else { 3404 } else {
3222 ql_dbg(ql_dbg_mbx, vha, 0x1007, "Done %s.\n", __func__); 3405 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1007,
3406 "Done %s.\n", __func__);
3223 } 3407 }
3224 3408
3225 return rval; 3409 return rval;
@@ -3244,7 +3428,8 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3244 unsigned long flags; 3428 unsigned long flags;
3245 struct qla_hw_data *ha = vha->hw; 3429 struct qla_hw_data *ha = vha->hw;
3246 3430
3247 ql_dbg(ql_dbg_mbx, vha, 0x10c8, "Entered %s.\n", __func__); 3431 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10c8,
3432 "Entered %s.\n", __func__);
3248 3433
3249 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma); 3434 mn = dma_pool_alloc(ha->s_dma_pool, GFP_KERNEL, &mn_dma);
3250 if (mn == NULL) { 3435 if (mn == NULL) {
@@ -3285,7 +3470,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3285 status[0] = le16_to_cpu(mn->p.rsp.comp_status); 3470 status[0] = le16_to_cpu(mn->p.rsp.comp_status);
3286 status[1] = status[0] == CS_VCS_CHIP_FAILURE ? 3471 status[1] = status[0] == CS_VCS_CHIP_FAILURE ?
3287 le16_to_cpu(mn->p.rsp.failure_code) : 0; 3472 le16_to_cpu(mn->p.rsp.failure_code) : 0;
3288 ql_dbg(ql_dbg_mbx, vha, 0x10ce, 3473 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ce,
3289 "cs=%x fc=%x.\n", status[0], status[1]); 3474 "cs=%x fc=%x.\n", status[0], status[1]);
3290 3475
3291 if (status[0] != CS_COMPLETE) { 3476 if (status[0] != CS_COMPLETE) {
@@ -3299,7 +3484,7 @@ qla84xx_verify_chip(struct scsi_qla_host *vha, uint16_t *status)
3299 retry = 1; 3484 retry = 1;
3300 } 3485 }
3301 } else { 3486 } else {
3302 ql_dbg(ql_dbg_mbx, vha, 0x10d0, 3487 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d0,
3303 "Firmware updated to %x.\n", 3488 "Firmware updated to %x.\n",
3304 le32_to_cpu(mn->p.rsp.fw_ver)); 3489 le32_to_cpu(mn->p.rsp.fw_ver));
3305 3490
@@ -3316,9 +3501,11 @@ verify_done:
3316 dma_pool_free(ha->s_dma_pool, mn, mn_dma); 3501 dma_pool_free(ha->s_dma_pool, mn, mn_dma);
3317 3502
3318 if (rval != QLA_SUCCESS) { 3503 if (rval != QLA_SUCCESS) {
3319 ql_dbg(ql_dbg_mbx, vha, 0x10d1, "Failed=%x.\n", rval); 3504 ql_dbg(ql_dbg_mbx, vha, 0x10d1,
3505 "Failed=%x.\n", rval);
3320 } else { 3506 } else {
3321 ql_dbg(ql_dbg_mbx, vha, 0x10d2, "Done %s.\n", __func__); 3507 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d2,
3508 "Done %s.\n", __func__);
3322 } 3509 }
3323 3510
3324 return rval; 3511 return rval;
@@ -3334,7 +3521,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3334 struct device_reg_25xxmq __iomem *reg; 3521 struct device_reg_25xxmq __iomem *reg;
3335 struct qla_hw_data *ha = vha->hw; 3522 struct qla_hw_data *ha = vha->hw;
3336 3523
3337 ql_dbg(ql_dbg_mbx, vha, 0x10d3, "Entered %s.\n", __func__); 3524 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d3,
3525 "Entered %s.\n", __func__);
3338 3526
3339 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3527 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3340 mcp->mb[1] = req->options; 3528 mcp->mb[1] = req->options;
@@ -3388,7 +3576,8 @@ qla25xx_init_req_que(struct scsi_qla_host *vha, struct req_que *req)
3388 ql_dbg(ql_dbg_mbx, vha, 0x10d4, 3576 ql_dbg(ql_dbg_mbx, vha, 0x10d4,
3389 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3577 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3390 } else { 3578 } else {
3391 ql_dbg(ql_dbg_mbx, vha, 0x10d5, "Done %s.\n", __func__); 3579 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d5,
3580 "Done %s.\n", __func__);
3392 } 3581 }
3393 3582
3394 return rval; 3583 return rval;
@@ -3404,7 +3593,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3404 struct device_reg_25xxmq __iomem *reg; 3593 struct device_reg_25xxmq __iomem *reg;
3405 struct qla_hw_data *ha = vha->hw; 3594 struct qla_hw_data *ha = vha->hw;
3406 3595
3407 ql_dbg(ql_dbg_mbx, vha, 0x10d6, "Entered %s.\n", __func__); 3596 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d6,
3597 "Entered %s.\n", __func__);
3408 3598
3409 mcp->mb[0] = MBC_INITIALIZE_MULTIQ; 3599 mcp->mb[0] = MBC_INITIALIZE_MULTIQ;
3410 mcp->mb[1] = rsp->options; 3600 mcp->mb[1] = rsp->options;
@@ -3456,7 +3646,8 @@ qla25xx_init_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp)
3456 ql_dbg(ql_dbg_mbx, vha, 0x10d7, 3646 ql_dbg(ql_dbg_mbx, vha, 0x10d7,
3457 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3647 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3458 } else { 3648 } else {
3459 ql_dbg(ql_dbg_mbx, vha, 0x10d8, "Done %s.\n", __func__); 3649 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d8,
3650 "Done %s.\n", __func__);
3460 } 3651 }
3461 3652
3462 return rval; 3653 return rval;
@@ -3469,7 +3660,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3469 mbx_cmd_t mc; 3660 mbx_cmd_t mc;
3470 mbx_cmd_t *mcp = &mc; 3661 mbx_cmd_t *mcp = &mc;
3471 3662
3472 ql_dbg(ql_dbg_mbx, vha, 0x10d9, "Entered %s.\n", __func__); 3663 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10d9,
3664 "Entered %s.\n", __func__);
3473 3665
3474 mcp->mb[0] = MBC_IDC_ACK; 3666 mcp->mb[0] = MBC_IDC_ACK;
3475 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t)); 3667 memcpy(&mcp->mb[1], mb, QLA_IDC_ACK_REGS * sizeof(uint16_t));
@@ -3483,7 +3675,8 @@ qla81xx_idc_ack(scsi_qla_host_t *vha, uint16_t *mb)
3483 ql_dbg(ql_dbg_mbx, vha, 0x10da, 3675 ql_dbg(ql_dbg_mbx, vha, 0x10da,
3484 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3676 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3485 } else { 3677 } else {
3486 ql_dbg(ql_dbg_mbx, vha, 0x10db, "Done %s.\n", __func__); 3678 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10db,
3679 "Done %s.\n", __func__);
3487 } 3680 }
3488 3681
3489 return rval; 3682 return rval;
@@ -3496,7 +3689,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3496 mbx_cmd_t mc; 3689 mbx_cmd_t mc;
3497 mbx_cmd_t *mcp = &mc; 3690 mbx_cmd_t *mcp = &mc;
3498 3691
3499 ql_dbg(ql_dbg_mbx, vha, 0x10dc, "Entered %s.\n", __func__); 3692 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10dc,
3693 "Entered %s.\n", __func__);
3500 3694
3501 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3695 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3502 return QLA_FUNCTION_FAILED; 3696 return QLA_FUNCTION_FAILED;
@@ -3514,7 +3708,8 @@ qla81xx_fac_get_sector_size(scsi_qla_host_t *vha, uint32_t *sector_size)
3514 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3708 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3515 rval, mcp->mb[0], mcp->mb[1]); 3709 rval, mcp->mb[0], mcp->mb[1]);
3516 } else { 3710 } else {
3517 ql_dbg(ql_dbg_mbx, vha, 0x10de, "Done %s.\n", __func__); 3711 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10de,
3712 "Done %s.\n", __func__);
3518 *sector_size = mcp->mb[1]; 3713 *sector_size = mcp->mb[1];
3519 } 3714 }
3520 3715
@@ -3531,7 +3726,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3531 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3726 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3532 return QLA_FUNCTION_FAILED; 3727 return QLA_FUNCTION_FAILED;
3533 3728
3534 ql_dbg(ql_dbg_mbx, vha, 0x10df, "Entered %s.\n", __func__); 3729 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10df,
3730 "Entered %s.\n", __func__);
3535 3731
3536 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3732 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3537 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE : 3733 mcp->mb[1] = enable ? FAC_OPT_CMD_WRITE_ENABLE :
@@ -3547,7 +3743,8 @@ qla81xx_fac_do_write_enable(scsi_qla_host_t *vha, int enable)
3547 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3743 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3548 rval, mcp->mb[0], mcp->mb[1]); 3744 rval, mcp->mb[0], mcp->mb[1]);
3549 } else { 3745 } else {
3550 ql_dbg(ql_dbg_mbx, vha, 0x10e1, "Done %s.\n", __func__); 3746 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e1,
3747 "Done %s.\n", __func__);
3551 } 3748 }
3552 3749
3553 return rval; 3750 return rval;
@@ -3563,7 +3760,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3563 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw)) 3760 if (!IS_QLA81XX(vha->hw) && !IS_QLA83XX(vha->hw))
3564 return QLA_FUNCTION_FAILED; 3761 return QLA_FUNCTION_FAILED;
3565 3762
3566 ql_dbg(ql_dbg_mbx, vha, 0x10e2, "Entered %s.\n", __func__); 3763 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e2,
3764 "Entered %s.\n", __func__);
3567 3765
3568 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL; 3766 mcp->mb[0] = MBC_FLASH_ACCESS_CTRL;
3569 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR; 3767 mcp->mb[1] = FAC_OPT_CMD_ERASE_SECTOR;
@@ -3582,7 +3780,8 @@ qla81xx_fac_erase_sector(scsi_qla_host_t *vha, uint32_t start, uint32_t finish)
3582 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 3780 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3583 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 3781 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3584 } else { 3782 } else {
3585 ql_dbg(ql_dbg_mbx, vha, 0x10e4, "Done %s.\n", __func__); 3783 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e4,
3784 "Done %s.\n", __func__);
3586 } 3785 }
3587 3786
3588 return rval; 3787 return rval;
@@ -3595,7 +3794,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3595 mbx_cmd_t mc; 3794 mbx_cmd_t mc;
3596 mbx_cmd_t *mcp = &mc; 3795 mbx_cmd_t *mcp = &mc;
3597 3796
3598 ql_dbg(ql_dbg_mbx, vha, 0x10e5, "Entered %s.\n", __func__); 3797 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e5,
3798 "Entered %s.\n", __func__);
3599 3799
3600 mcp->mb[0] = MBC_RESTART_MPI_FW; 3800 mcp->mb[0] = MBC_RESTART_MPI_FW;
3601 mcp->out_mb = MBX_0; 3801 mcp->out_mb = MBX_0;
@@ -3609,7 +3809,8 @@ qla81xx_restart_mpi_firmware(scsi_qla_host_t *vha)
3609 "Failed=%x mb[0]=%x mb[1]=%x.\n", 3809 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3610 rval, mcp->mb[0], mcp->mb[1]); 3810 rval, mcp->mb[0], mcp->mb[1]);
3611 } else { 3811 } else {
3612 ql_dbg(ql_dbg_mbx, vha, 0x10e7, "Done %s.\n", __func__); 3812 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e7,
3813 "Done %s.\n", __func__);
3613 } 3814 }
3614 3815
3615 return rval; 3816 return rval;
@@ -3624,7 +3825,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3624 mbx_cmd_t *mcp = &mc; 3825 mbx_cmd_t *mcp = &mc;
3625 struct qla_hw_data *ha = vha->hw; 3826 struct qla_hw_data *ha = vha->hw;
3626 3827
3627 ql_dbg(ql_dbg_mbx, vha, 0x10e8, "Entered %s.\n", __func__); 3828 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10e8,
3829 "Entered %s.\n", __func__);
3628 3830
3629 if (!IS_FWI2_CAPABLE(ha)) 3831 if (!IS_FWI2_CAPABLE(ha))
3630 return QLA_FUNCTION_FAILED; 3832 return QLA_FUNCTION_FAILED;
@@ -3654,7 +3856,8 @@ qla2x00_read_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3654 ql_dbg(ql_dbg_mbx, vha, 0x10e9, 3856 ql_dbg(ql_dbg_mbx, vha, 0x10e9,
3655 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3857 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3656 } else { 3858 } else {
3657 ql_dbg(ql_dbg_mbx, vha, 0x10ea, "Done %s.\n", __func__); 3859 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ea,
3860 "Done %s.\n", __func__);
3658 } 3861 }
3659 3862
3660 return rval; 3863 return rval;
@@ -3669,7 +3872,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3669 mbx_cmd_t *mcp = &mc; 3872 mbx_cmd_t *mcp = &mc;
3670 struct qla_hw_data *ha = vha->hw; 3873 struct qla_hw_data *ha = vha->hw;
3671 3874
3672 ql_dbg(ql_dbg_mbx, vha, 0x10eb, "Entered %s.\n", __func__); 3875 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10eb,
3876 "Entered %s.\n", __func__);
3673 3877
3674 if (!IS_FWI2_CAPABLE(ha)) 3878 if (!IS_FWI2_CAPABLE(ha))
3675 return QLA_FUNCTION_FAILED; 3879 return QLA_FUNCTION_FAILED;
@@ -3699,7 +3903,8 @@ qla2x00_write_sfp(scsi_qla_host_t *vha, dma_addr_t sfp_dma, uint8_t *sfp,
3699 ql_dbg(ql_dbg_mbx, vha, 0x10ec, 3903 ql_dbg(ql_dbg_mbx, vha, 0x10ec,
3700 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 3904 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3701 } else { 3905 } else {
3702 ql_dbg(ql_dbg_mbx, vha, 0x10ed, "Done %s.\n", __func__); 3906 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ed,
3907 "Done %s.\n", __func__);
3703 } 3908 }
3704 3909
3705 return rval; 3910 return rval;
@@ -3713,7 +3918,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3713 mbx_cmd_t mc; 3918 mbx_cmd_t mc;
3714 mbx_cmd_t *mcp = &mc; 3919 mbx_cmd_t *mcp = &mc;
3715 3920
3716 ql_dbg(ql_dbg_mbx, vha, 0x10ee, "Entered %s.\n", __func__); 3921 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ee,
3922 "Entered %s.\n", __func__);
3717 3923
3718 if (!IS_CNA_CAPABLE(vha->hw)) 3924 if (!IS_CNA_CAPABLE(vha->hw))
3719 return QLA_FUNCTION_FAILED; 3925 return QLA_FUNCTION_FAILED;
@@ -3735,7 +3941,8 @@ qla2x00_get_xgmac_stats(scsi_qla_host_t *vha, dma_addr_t stats_dma,
3735 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 3941 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3736 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 3942 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3737 } else { 3943 } else {
3738 ql_dbg(ql_dbg_mbx, vha, 0x10f0, "Done %s.\n", __func__); 3944 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f0,
3945 "Done %s.\n", __func__);
3739 3946
3740 3947
3741 *actual_size = mcp->mb[2] << 2; 3948 *actual_size = mcp->mb[2] << 2;
@@ -3752,7 +3959,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3752 mbx_cmd_t mc; 3959 mbx_cmd_t mc;
3753 mbx_cmd_t *mcp = &mc; 3960 mbx_cmd_t *mcp = &mc;
3754 3961
3755 ql_dbg(ql_dbg_mbx, vha, 0x10f1, "Entered %s.\n", __func__); 3962 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f1,
3963 "Entered %s.\n", __func__);
3756 3964
3757 if (!IS_CNA_CAPABLE(vha->hw)) 3965 if (!IS_CNA_CAPABLE(vha->hw))
3758 return QLA_FUNCTION_FAILED; 3966 return QLA_FUNCTION_FAILED;
@@ -3775,7 +3983,8 @@ qla2x00_get_dcbx_params(scsi_qla_host_t *vha, dma_addr_t tlv_dma,
3775 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n", 3983 "Failed=%x mb[0]=%x mb[1]=%x mb[2]=%x.\n",
3776 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]); 3984 rval, mcp->mb[0], mcp->mb[1], mcp->mb[2]);
3777 } else { 3985 } else {
3778 ql_dbg(ql_dbg_mbx, vha, 0x10f3, "Done %s.\n", __func__); 3986 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f3,
3987 "Done %s.\n", __func__);
3779 } 3988 }
3780 3989
3781 return rval; 3990 return rval;
@@ -3788,7 +3997,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3788 mbx_cmd_t mc; 3997 mbx_cmd_t mc;
3789 mbx_cmd_t *mcp = &mc; 3998 mbx_cmd_t *mcp = &mc;
3790 3999
3791 ql_dbg(ql_dbg_mbx, vha, 0x10f4, "Entered %s.\n", __func__); 4000 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f4,
4001 "Entered %s.\n", __func__);
3792 4002
3793 if (!IS_FWI2_CAPABLE(vha->hw)) 4003 if (!IS_FWI2_CAPABLE(vha->hw))
3794 return QLA_FUNCTION_FAILED; 4004 return QLA_FUNCTION_FAILED;
@@ -3805,7 +4015,8 @@ qla2x00_read_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t *data)
3805 ql_dbg(ql_dbg_mbx, vha, 0x10f5, 4015 ql_dbg(ql_dbg_mbx, vha, 0x10f5,
3806 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4016 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3807 } else { 4017 } else {
3808 ql_dbg(ql_dbg_mbx, vha, 0x10f6, "Done %s.\n", __func__); 4018 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f6,
4019 "Done %s.\n", __func__);
3809 *data = mcp->mb[3] << 16 | mcp->mb[2]; 4020 *data = mcp->mb[3] << 16 | mcp->mb[2];
3810 } 4021 }
3811 4022
@@ -3821,7 +4032,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3821 mbx_cmd_t *mcp = &mc; 4032 mbx_cmd_t *mcp = &mc;
3822 uint32_t iter_cnt = 0x1; 4033 uint32_t iter_cnt = 0x1;
3823 4034
3824 ql_dbg(ql_dbg_mbx, vha, 0x10f7, "Entered %s.\n", __func__); 4035 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f7,
4036 "Entered %s.\n", __func__);
3825 4037
3826 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4038 memset(mcp->mb, 0 , sizeof(mcp->mb));
3827 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK; 4039 mcp->mb[0] = MBC_DIAGNOSTIC_LOOP_BACK;
@@ -3865,7 +4077,8 @@ qla2x00_loopback_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3865 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2], 4077 "mb[19]=%x.\n", rval, mcp->mb[0], mcp->mb[1], mcp->mb[2],
3866 mcp->mb[3], mcp->mb[18], mcp->mb[19]); 4078 mcp->mb[3], mcp->mb[18], mcp->mb[19]);
3867 } else { 4079 } else {
3868 ql_dbg(ql_dbg_mbx, vha, 0x10f9, "Done %s.\n", __func__); 4080 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10f9,
4081 "Done %s.\n", __func__);
3869 } 4082 }
3870 4083
3871 /* Copy mailbox information */ 4084 /* Copy mailbox information */
@@ -3882,7 +4095,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3882 mbx_cmd_t *mcp = &mc; 4095 mbx_cmd_t *mcp = &mc;
3883 struct qla_hw_data *ha = vha->hw; 4096 struct qla_hw_data *ha = vha->hw;
3884 4097
3885 ql_dbg(ql_dbg_mbx, vha, 0x10fa, "Entered %s.\n", __func__); 4098 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fa,
4099 "Entered %s.\n", __func__);
3886 4100
3887 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4101 memset(mcp->mb, 0 , sizeof(mcp->mb));
3888 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO; 4102 mcp->mb[0] = MBC_DIAGNOSTIC_ECHO;
@@ -3926,7 +4140,8 @@ qla2x00_echo_test(scsi_qla_host_t *vha, struct msg_echo_lb *mreq,
3926 "Failed=%x mb[0]=%x mb[1]=%x.\n", 4140 "Failed=%x mb[0]=%x mb[1]=%x.\n",
3927 rval, mcp->mb[0], mcp->mb[1]); 4141 rval, mcp->mb[0], mcp->mb[1]);
3928 } else { 4142 } else {
3929 ql_dbg(ql_dbg_mbx, vha, 0x10fc, "Done %s.\n", __func__); 4143 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fc,
4144 "Done %s.\n", __func__);
3930 } 4145 }
3931 4146
3932 /* Copy mailbox information */ 4147 /* Copy mailbox information */
@@ -3941,7 +4156,7 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3941 mbx_cmd_t mc; 4156 mbx_cmd_t mc;
3942 mbx_cmd_t *mcp = &mc; 4157 mbx_cmd_t *mcp = &mc;
3943 4158
3944 ql_dbg(ql_dbg_mbx, vha, 0x10fd, 4159 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10fd,
3945 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic); 4160 "Entered %s enable_diag=%d.\n", __func__, enable_diagnostic);
3946 4161
3947 mcp->mb[0] = MBC_ISP84XX_RESET; 4162 mcp->mb[0] = MBC_ISP84XX_RESET;
@@ -3955,7 +4170,8 @@ qla84xx_reset_chip(scsi_qla_host_t *vha, uint16_t enable_diagnostic)
3955 if (rval != QLA_SUCCESS) 4170 if (rval != QLA_SUCCESS)
3956 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval); 4171 ql_dbg(ql_dbg_mbx, vha, 0x10fe, "Failed=%x.\n", rval);
3957 else 4172 else
3958 ql_dbg(ql_dbg_mbx, vha, 0x10ff, "Done %s.\n", __func__); 4173 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ff,
4174 "Done %s.\n", __func__);
3959 4175
3960 return rval; 4176 return rval;
3961} 4177}
@@ -3967,7 +4183,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3967 mbx_cmd_t mc; 4183 mbx_cmd_t mc;
3968 mbx_cmd_t *mcp = &mc; 4184 mbx_cmd_t *mcp = &mc;
3969 4185
3970 ql_dbg(ql_dbg_mbx, vha, 0x1100, "Entered %s.\n", __func__); 4186 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1100,
4187 "Entered %s.\n", __func__);
3971 4188
3972 if (!IS_FWI2_CAPABLE(vha->hw)) 4189 if (!IS_FWI2_CAPABLE(vha->hw))
3973 return QLA_FUNCTION_FAILED; 4190 return QLA_FUNCTION_FAILED;
@@ -3986,7 +4203,8 @@ qla2x00_write_ram_word(scsi_qla_host_t *vha, uint32_t risc_addr, uint32_t data)
3986 ql_dbg(ql_dbg_mbx, vha, 0x1101, 4203 ql_dbg(ql_dbg_mbx, vha, 0x1101,
3987 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4204 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
3988 } else { 4205 } else {
3989 ql_dbg(ql_dbg_mbx, vha, 0x1102, "Done %s.\n", __func__); 4206 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1102,
4207 "Done %s.\n", __func__);
3990 } 4208 }
3991 4209
3992 return rval; 4210 return rval;
@@ -4003,7 +4221,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
4003 4221
4004 rval = QLA_SUCCESS; 4222 rval = QLA_SUCCESS;
4005 4223
4006 ql_dbg(ql_dbg_mbx, vha, 0x1103, "Entered %s.\n", __func__); 4224 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1103,
4225 "Entered %s.\n", __func__);
4007 4226
4008 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags); 4227 clear_bit(MBX_INTERRUPT, &ha->mbx_cmd_flags);
4009 4228
@@ -4046,7 +4265,8 @@ qla81xx_write_mpi_register(scsi_qla_host_t *vha, uint16_t *mb)
4046 ql_dbg(ql_dbg_mbx, vha, 0x1104, 4265 ql_dbg(ql_dbg_mbx, vha, 0x1104,
4047 "Failed=%x mb[0]=%x.\n", rval, mb[0]); 4266 "Failed=%x mb[0]=%x.\n", rval, mb[0]);
4048 } else { 4267 } else {
4049 ql_dbg(ql_dbg_mbx, vha, 0x1105, "Done %s.\n", __func__); 4268 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1105,
4269 "Done %s.\n", __func__);
4050 } 4270 }
4051 4271
4052 return rval; 4272 return rval;
@@ -4060,7 +4280,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
4060 mbx_cmd_t *mcp = &mc; 4280 mbx_cmd_t *mcp = &mc;
4061 struct qla_hw_data *ha = vha->hw; 4281 struct qla_hw_data *ha = vha->hw;
4062 4282
4063 ql_dbg(ql_dbg_mbx, vha, 0x1106, "Entered %s.\n", __func__); 4283 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1106,
4284 "Entered %s.\n", __func__);
4064 4285
4065 if (!IS_FWI2_CAPABLE(ha)) 4286 if (!IS_FWI2_CAPABLE(ha))
4066 return QLA_FUNCTION_FAILED; 4287 return QLA_FUNCTION_FAILED;
@@ -4078,7 +4299,8 @@ qla2x00_get_data_rate(scsi_qla_host_t *vha)
4078 ql_dbg(ql_dbg_mbx, vha, 0x1107, 4299 ql_dbg(ql_dbg_mbx, vha, 0x1107,
4079 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4300 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4080 } else { 4301 } else {
4081 ql_dbg(ql_dbg_mbx, vha, 0x1108, "Done %s.\n", __func__); 4302 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1108,
4303 "Done %s.\n", __func__);
4082 if (mcp->mb[1] != 0x7) 4304 if (mcp->mb[1] != 0x7)
4083 ha->link_data_rate = mcp->mb[1]; 4305 ha->link_data_rate = mcp->mb[1];
4084 } 4306 }
@@ -4094,7 +4316,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4094 mbx_cmd_t *mcp = &mc; 4316 mbx_cmd_t *mcp = &mc;
4095 struct qla_hw_data *ha = vha->hw; 4317 struct qla_hw_data *ha = vha->hw;
4096 4318
4097 ql_dbg(ql_dbg_mbx, vha, 0x1109, "Entered %s.\n", __func__); 4319 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1109,
4320 "Entered %s.\n", __func__);
4098 4321
4099 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha)) 4322 if (!IS_QLA81XX(ha) && !IS_QLA83XX(ha))
4100 return QLA_FUNCTION_FAILED; 4323 return QLA_FUNCTION_FAILED;
@@ -4113,7 +4336,8 @@ qla81xx_get_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4113 /* Copy all bits to preserve original value */ 4336 /* Copy all bits to preserve original value */
4114 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4); 4337 memcpy(mb, &mcp->mb[1], sizeof(uint16_t) * 4);
4115 4338
4116 ql_dbg(ql_dbg_mbx, vha, 0x110b, "Done %s.\n", __func__); 4339 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110b,
4340 "Done %s.\n", __func__);
4117 } 4341 }
4118 return rval; 4342 return rval;
4119} 4343}
@@ -4125,7 +4349,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4125 mbx_cmd_t mc; 4349 mbx_cmd_t mc;
4126 mbx_cmd_t *mcp = &mc; 4350 mbx_cmd_t *mcp = &mc;
4127 4351
4128 ql_dbg(ql_dbg_mbx, vha, 0x110c, "Entered %s.\n", __func__); 4352 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110c,
4353 "Entered %s.\n", __func__);
4129 4354
4130 mcp->mb[0] = MBC_SET_PORT_CONFIG; 4355 mcp->mb[0] = MBC_SET_PORT_CONFIG;
4131 /* Copy all bits to preserve original setting */ 4356 /* Copy all bits to preserve original setting */
@@ -4140,7 +4365,8 @@ qla81xx_set_port_config(scsi_qla_host_t *vha, uint16_t *mb)
4140 ql_dbg(ql_dbg_mbx, vha, 0x110d, 4365 ql_dbg(ql_dbg_mbx, vha, 0x110d,
4141 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4366 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4142 } else 4367 } else
4143 ql_dbg(ql_dbg_mbx, vha, 0x110e, "Done %s.\n", __func__); 4368 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110e,
4369 "Done %s.\n", __func__);
4144 4370
4145 return rval; 4371 return rval;
4146} 4372}
@@ -4155,7 +4381,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4155 mbx_cmd_t *mcp = &mc; 4381 mbx_cmd_t *mcp = &mc;
4156 struct qla_hw_data *ha = vha->hw; 4382 struct qla_hw_data *ha = vha->hw;
4157 4383
4158 ql_dbg(ql_dbg_mbx, vha, 0x110f, "Entered %s.\n", __func__); 4384 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x110f,
4385 "Entered %s.\n", __func__);
4159 4386
4160 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha)) 4387 if (!IS_QLA24XX_TYPE(ha) && !IS_QLA25XX(ha))
4161 return QLA_FUNCTION_FAILED; 4388 return QLA_FUNCTION_FAILED;
@@ -4183,7 +4410,8 @@ qla24xx_set_fcp_prio(scsi_qla_host_t *vha, uint16_t loop_id, uint16_t priority,
4183 if (rval != QLA_SUCCESS) { 4410 if (rval != QLA_SUCCESS) {
4184 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval); 4411 ql_dbg(ql_dbg_mbx, vha, 0x10cd, "Failed=%x.\n", rval);
4185 } else { 4412 } else {
4186 ql_dbg(ql_dbg_mbx, vha, 0x10cc, "Done %s.\n", __func__); 4413 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10cc,
4414 "Done %s.\n", __func__);
4187 } 4415 }
4188 4416
4189 return rval; 4417 return rval;
@@ -4196,7 +4424,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4196 uint8_t byte; 4424 uint8_t byte;
4197 struct qla_hw_data *ha = vha->hw; 4425 struct qla_hw_data *ha = vha->hw;
4198 4426
4199 ql_dbg(ql_dbg_mbx, vha, 0x10ca, "Entered %s.\n", __func__); 4427 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x10ca,
4428 "Entered %s.\n", __func__);
4200 4429
4201 /* Integer part */ 4430 /* Integer part */
4202 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0); 4431 rval = qla2x00_read_sfp(vha, 0, &byte, 0x98, 0x01, 1, BIT_13|BIT_0);
@@ -4216,7 +4445,8 @@ qla2x00_get_thermal_temp(scsi_qla_host_t *vha, uint16_t *temp, uint16_t *frac)
4216 } 4445 }
4217 *frac = (byte >> 6) * 25; 4446 *frac = (byte >> 6) * 25;
4218 4447
4219 ql_dbg(ql_dbg_mbx, vha, 0x1018, "Done %s.\n", __func__); 4448 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1018,
4449 "Done %s.\n", __func__);
4220fail: 4450fail:
4221 return rval; 4451 return rval;
4222} 4452}
@@ -4229,7 +4459,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4229 mbx_cmd_t mc; 4459 mbx_cmd_t mc;
4230 mbx_cmd_t *mcp = &mc; 4460 mbx_cmd_t *mcp = &mc;
4231 4461
4232 ql_dbg(ql_dbg_mbx, vha, 0x1017, "Entered %s.\n", __func__); 4462 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1017,
4463 "Entered %s.\n", __func__);
4233 4464
4234 if (!IS_FWI2_CAPABLE(ha)) 4465 if (!IS_FWI2_CAPABLE(ha))
4235 return QLA_FUNCTION_FAILED; 4466 return QLA_FUNCTION_FAILED;
@@ -4248,7 +4479,8 @@ qla82xx_mbx_intr_enable(scsi_qla_host_t *vha)
4248 ql_dbg(ql_dbg_mbx, vha, 0x1016, 4479 ql_dbg(ql_dbg_mbx, vha, 0x1016,
4249 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4480 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4250 } else { 4481 } else {
4251 ql_dbg(ql_dbg_mbx, vha, 0x100e, "Done %s.\n", __func__); 4482 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100e,
4483 "Done %s.\n", __func__);
4252 } 4484 }
4253 4485
4254 return rval; 4486 return rval;
@@ -4262,7 +4494,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4262 mbx_cmd_t mc; 4494 mbx_cmd_t mc;
4263 mbx_cmd_t *mcp = &mc; 4495 mbx_cmd_t *mcp = &mc;
4264 4496
4265 ql_dbg(ql_dbg_mbx, vha, 0x100d, "Entered %s.\n", __func__); 4497 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100d,
4498 "Entered %s.\n", __func__);
4266 4499
4267 if (!IS_QLA82XX(ha)) 4500 if (!IS_QLA82XX(ha))
4268 return QLA_FUNCTION_FAILED; 4501 return QLA_FUNCTION_FAILED;
@@ -4281,7 +4514,8 @@ qla82xx_mbx_intr_disable(scsi_qla_host_t *vha)
4281 ql_dbg(ql_dbg_mbx, vha, 0x100c, 4514 ql_dbg(ql_dbg_mbx, vha, 0x100c,
4282 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4515 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4283 } else { 4516 } else {
4284 ql_dbg(ql_dbg_mbx, vha, 0x100b, "Done %s.\n", __func__); 4517 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x100b,
4518 "Done %s.\n", __func__);
4285 } 4519 }
4286 4520
4287 return rval; 4521 return rval;
@@ -4295,7 +4529,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4295 mbx_cmd_t *mcp = &mc; 4529 mbx_cmd_t *mcp = &mc;
4296 int rval = QLA_FUNCTION_FAILED; 4530 int rval = QLA_FUNCTION_FAILED;
4297 4531
4298 ql_dbg(ql_dbg_mbx, vha, 0x111f, "Entered %s.\n", __func__); 4532 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x111f,
4533 "Entered %s.\n", __func__);
4299 4534
4300 memset(mcp->mb, 0 , sizeof(mcp->mb)); 4535 memset(mcp->mb, 0 , sizeof(mcp->mb));
4301 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE); 4536 mcp->mb[0] = LSW(MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE);
@@ -4318,7 +4553,8 @@ qla82xx_md_get_template_size(scsi_qla_host_t *vha)
4318 (mcp->mb[1] << 16) | mcp->mb[0], 4553 (mcp->mb[1] << 16) | mcp->mb[0],
4319 (mcp->mb[3] << 16) | mcp->mb[2]); 4554 (mcp->mb[3] << 16) | mcp->mb[2]);
4320 } else { 4555 } else {
4321 ql_dbg(ql_dbg_mbx, vha, 0x1121, "Done %s.\n", __func__); 4556 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1121,
4557 "Done %s.\n", __func__);
4322 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]); 4558 ha->md_template_size = ((mcp->mb[3] << 16) | mcp->mb[2]);
4323 if (!ha->md_template_size) { 4559 if (!ha->md_template_size) {
4324 ql_dbg(ql_dbg_mbx, vha, 0x1122, 4560 ql_dbg(ql_dbg_mbx, vha, 0x1122,
@@ -4337,7 +4573,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
4337 mbx_cmd_t *mcp = &mc; 4573 mbx_cmd_t *mcp = &mc;
4338 int rval = QLA_FUNCTION_FAILED; 4574 int rval = QLA_FUNCTION_FAILED;
4339 4575
4340 ql_dbg(ql_dbg_mbx, vha, 0x1123, "Entered %s.\n", __func__); 4576 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1123,
4577 "Entered %s.\n", __func__);
4341 4578
4342 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev, 4579 ha->md_tmplt_hdr = dma_alloc_coherent(&ha->pdev->dev,
4343 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL); 4580 ha->md_template_size, &ha->md_tmplt_hdr_dma, GFP_KERNEL);
@@ -4372,7 +4609,8 @@ qla82xx_md_get_template(scsi_qla_host_t *vha)
4372 ((mcp->mb[1] << 16) | mcp->mb[0]), 4609 ((mcp->mb[1] << 16) | mcp->mb[0]),
4373 ((mcp->mb[3] << 16) | mcp->mb[2])); 4610 ((mcp->mb[3] << 16) | mcp->mb[2]));
4374 } else 4611 } else
4375 ql_dbg(ql_dbg_mbx, vha, 0x1126, "Done %s.\n", __func__); 4612 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1126,
4613 "Done %s.\n", __func__);
4376 return rval; 4614 return rval;
4377} 4615}
4378 4616
@@ -4387,7 +4625,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4387 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 4625 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
4388 return QLA_FUNCTION_FAILED; 4626 return QLA_FUNCTION_FAILED;
4389 4627
4390 ql_dbg(ql_dbg_mbx, vha, 0x1133, "Entered %s.\n", __func__); 4628 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1133,
4629 "Entered %s.\n", __func__);
4391 4630
4392 memset(mcp, 0, sizeof(mbx_cmd_t)); 4631 memset(mcp, 0, sizeof(mbx_cmd_t));
4393 mcp->mb[0] = MBC_SET_LED_CONFIG; 4632 mcp->mb[0] = MBC_SET_LED_CONFIG;
@@ -4412,7 +4651,8 @@ qla81xx_set_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4412 ql_dbg(ql_dbg_mbx, vha, 0x1134, 4651 ql_dbg(ql_dbg_mbx, vha, 0x1134,
4413 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4652 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4414 } else { 4653 } else {
4415 ql_dbg(ql_dbg_mbx, vha, 0x1135, "Done %s.\n", __func__); 4654 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1135,
4655 "Done %s.\n", __func__);
4416 } 4656 }
4417 4657
4418 return rval; 4658 return rval;
@@ -4429,7 +4669,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4429 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha)) 4669 if (!IS_QLA81XX(ha) && !IS_QLA8031(ha))
4430 return QLA_FUNCTION_FAILED; 4670 return QLA_FUNCTION_FAILED;
4431 4671
4432 ql_dbg(ql_dbg_mbx, vha, 0x1136, "Entered %s.\n", __func__); 4672 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1136,
4673 "Entered %s.\n", __func__);
4433 4674
4434 memset(mcp, 0, sizeof(mbx_cmd_t)); 4675 memset(mcp, 0, sizeof(mbx_cmd_t));
4435 mcp->mb[0] = MBC_GET_LED_CONFIG; 4676 mcp->mb[0] = MBC_GET_LED_CONFIG;
@@ -4454,7 +4695,8 @@ qla81xx_get_led_config(scsi_qla_host_t *vha, uint16_t *led_cfg)
4454 led_cfg[4] = mcp->mb[5]; 4695 led_cfg[4] = mcp->mb[5];
4455 led_cfg[5] = mcp->mb[6]; 4696 led_cfg[5] = mcp->mb[6];
4456 } 4697 }
4457 ql_dbg(ql_dbg_mbx, vha, 0x1138, "Done %s.\n", __func__); 4698 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1138,
4699 "Done %s.\n", __func__);
4458 } 4700 }
4459 4701
4460 return rval; 4702 return rval;
@@ -4471,7 +4713,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4471 if (!IS_QLA82XX(ha)) 4713 if (!IS_QLA82XX(ha))
4472 return QLA_FUNCTION_FAILED; 4714 return QLA_FUNCTION_FAILED;
4473 4715
4474 ql_dbg(ql_dbg_mbx, vha, 0x1127, 4716 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1127,
4475 "Entered %s.\n", __func__); 4717 "Entered %s.\n", __func__);
4476 4718
4477 memset(mcp, 0, sizeof(mbx_cmd_t)); 4719 memset(mcp, 0, sizeof(mbx_cmd_t));
@@ -4491,7 +4733,7 @@ qla82xx_mbx_beacon_ctl(scsi_qla_host_t *vha, int enable)
4491 ql_dbg(ql_dbg_mbx, vha, 0x1128, 4733 ql_dbg(ql_dbg_mbx, vha, 0x1128,
4492 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4734 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4493 } else { 4735 } else {
4494 ql_dbg(ql_dbg_mbx, vha, 0x1129, 4736 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1129,
4495 "Done %s.\n", __func__); 4737 "Done %s.\n", __func__);
4496 } 4738 }
4497 4739
@@ -4509,7 +4751,8 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
4509 if (!IS_QLA83XX(ha)) 4751 if (!IS_QLA83XX(ha))
4510 return QLA_FUNCTION_FAILED; 4752 return QLA_FUNCTION_FAILED;
4511 4753
4512 ql_dbg(ql_dbg_mbx, vha, 0x1130, "Entered %s.\n", __func__); 4754 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1130,
4755 "Entered %s.\n", __func__);
4513 4756
4514 mcp->mb[0] = MBC_WRITE_REMOTE_REG; 4757 mcp->mb[0] = MBC_WRITE_REMOTE_REG;
4515 mcp->mb[1] = LSW(reg); 4758 mcp->mb[1] = LSW(reg);
@@ -4527,7 +4770,7 @@ qla83xx_write_remote_reg(scsi_qla_host_t *vha, uint32_t reg, uint32_t data)
4527 ql_dbg(ql_dbg_mbx, vha, 0x1131, 4770 ql_dbg(ql_dbg_mbx, vha, 0x1131,
4528 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4771 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4529 } else { 4772 } else {
4530 ql_dbg(ql_dbg_mbx, vha, 0x1132, 4773 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x1132,
4531 "Done %s.\n", __func__); 4774 "Done %s.\n", __func__);
4532 } 4775 }
4533 4776
@@ -4543,13 +4786,14 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
4543 mbx_cmd_t *mcp = &mc; 4786 mbx_cmd_t *mcp = &mc;
4544 4787
4545 if (IS_QLA2100(ha) || IS_QLA2200(ha)) { 4788 if (IS_QLA2100(ha) || IS_QLA2200(ha)) {
4546 ql_dbg(ql_dbg_mbx, vha, 0x113b, 4789 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113b,
4547 "Implicit LOGO Unsupported.\n"); 4790 "Implicit LOGO Unsupported.\n");
4548 return QLA_FUNCTION_FAILED; 4791 return QLA_FUNCTION_FAILED;
4549 } 4792 }
4550 4793
4551 4794
4552 ql_dbg(ql_dbg_mbx, vha, 0x113c, "Done %s.\n", __func__); 4795 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113c,
4796 "Entering %s.\n", __func__);
4553 4797
4554 /* Perform Implicit LOGO. */ 4798 /* Perform Implicit LOGO. */
4555 mcp->mb[0] = MBC_PORT_LOGOUT; 4799 mcp->mb[0] = MBC_PORT_LOGOUT;
@@ -4564,7 +4808,8 @@ qla2x00_port_logout(scsi_qla_host_t *vha, struct fc_port *fcport)
4564 ql_dbg(ql_dbg_mbx, vha, 0x113d, 4808 ql_dbg(ql_dbg_mbx, vha, 0x113d,
4565 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]); 4809 "Failed=%x mb[0]=%x.\n", rval, mcp->mb[0]);
4566 else 4810 else
4567 ql_dbg(ql_dbg_mbx, vha, 0x113e, "Done %s.\n", __func__); 4811 ql_dbg(ql_dbg_mbx + ql_dbg_verbose, vha, 0x113e,
4812 "Done %s.\n", __func__);
4568 4813
4569 return rval; 4814 return rval;
4570} 4815}
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index aa062a1b0ca4..3e8b32419e68 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -6,6 +6,7 @@
6 */ 6 */
7#include "qla_def.h" 7#include "qla_def.h"
8#include "qla_gbl.h" 8#include "qla_gbl.h"
9#include "qla_target.h"
9 10
10#include <linux/moduleparam.h> 11#include <linux/moduleparam.h>
11#include <linux/vmalloc.h> 12#include <linux/vmalloc.h>
@@ -49,6 +50,9 @@ qla24xx_allocate_vp_id(scsi_qla_host_t *vha)
49 50
50 spin_lock_irqsave(&ha->vport_slock, flags); 51 spin_lock_irqsave(&ha->vport_slock, flags);
51 list_add_tail(&vha->list, &ha->vp_list); 52 list_add_tail(&vha->list, &ha->vp_list);
53
54 qlt_update_vp_map(vha, SET_VP_IDX);
55
52 spin_unlock_irqrestore(&ha->vport_slock, flags); 56 spin_unlock_irqrestore(&ha->vport_slock, flags);
53 57
54 mutex_unlock(&ha->vport_lock); 58 mutex_unlock(&ha->vport_lock);
@@ -79,6 +83,7 @@ qla24xx_deallocate_vp_id(scsi_qla_host_t *vha)
79 spin_lock_irqsave(&ha->vport_slock, flags); 83 spin_lock_irqsave(&ha->vport_slock, flags);
80 } 84 }
81 list_del(&vha->list); 85 list_del(&vha->list);
86 qlt_update_vp_map(vha, RESET_VP_IDX);
82 spin_unlock_irqrestore(&ha->vport_slock, flags); 87 spin_unlock_irqrestore(&ha->vport_slock, flags);
83 88
84 vp_id = vha->vp_idx; 89 vp_id = vha->vp_idx;
@@ -134,7 +139,7 @@ qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha)
134 list_for_each_entry(fcport, &vha->vp_fcports, list) { 139 list_for_each_entry(fcport, &vha->vp_fcports, list) {
135 ql_dbg(ql_dbg_vport, vha, 0xa001, 140 ql_dbg(ql_dbg_vport, vha, 0xa001,
136 "Marking port dead, loop_id=0x%04x : %x.\n", 141 "Marking port dead, loop_id=0x%04x : %x.\n",
137 fcport->loop_id, fcport->vp_idx); 142 fcport->loop_id, fcport->vha->vp_idx);
138 143
139 qla2x00_mark_device_lost(vha, fcport, 0, 0); 144 qla2x00_mark_device_lost(vha, fcport, 0, 0);
140 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); 145 qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED);
@@ -150,6 +155,9 @@ qla24xx_disable_vp(scsi_qla_host_t *vha)
150 atomic_set(&vha->loop_state, LOOP_DOWN); 155 atomic_set(&vha->loop_state, LOOP_DOWN);
151 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); 156 atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME);
152 157
158 /* Remove port id from vp target map */
159 qlt_update_vp_map(vha, RESET_AL_PA);
160
153 qla2x00_mark_vp_devices_dead(vha); 161 qla2x00_mark_vp_devices_dead(vha);
154 atomic_set(&vha->vp_state, VP_FAILED); 162 atomic_set(&vha->vp_state, VP_FAILED);
155 vha->flags.management_server_logged_in = 0; 163 vha->flags.management_server_logged_in = 0;
@@ -295,10 +303,8 @@ qla2x00_vp_abort_isp(scsi_qla_host_t *vha)
295static int 303static int
296qla2x00_do_dpc_vp(scsi_qla_host_t *vha) 304qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
297{ 305{
298 ql_dbg(ql_dbg_dpc, vha, 0x4012, 306 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x4012,
299 "Entering %s.\n", __func__); 307 "Entering %s vp_flags: 0x%lx.\n", __func__, vha->vp_flags);
300 ql_dbg(ql_dbg_dpc, vha, 0x4013,
301 "vp_flags: 0x%lx.\n", vha->vp_flags);
302 308
303 qla2x00_do_work(vha); 309 qla2x00_do_work(vha);
304 310
@@ -348,7 +354,7 @@ qla2x00_do_dpc_vp(scsi_qla_host_t *vha)
348 } 354 }
349 } 355 }
350 356
351 ql_dbg(ql_dbg_dpc, vha, 0x401c, 357 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, vha, 0x401c,
352 "Exiting %s.\n", __func__); 358 "Exiting %s.\n", __func__);
353 return 0; 359 return 0;
354} 360}
diff --git a/drivers/scsi/qla2xxx/qla_nx.c b/drivers/scsi/qla2xxx/qla_nx.c
index de722a933438..caf627ba7fa8 100644
--- a/drivers/scsi/qla2xxx/qla_nx.c
+++ b/drivers/scsi/qla2xxx/qla_nx.c
@@ -1190,12 +1190,12 @@ qla82xx_pinit_from_rom(scsi_qla_host_t *vha)
1190 } 1190 }
1191 1191
1192 /* Offset in flash = lower 16 bits 1192 /* Offset in flash = lower 16 bits
1193 * Number of enteries = upper 16 bits 1193 * Number of entries = upper 16 bits
1194 */ 1194 */
1195 offset = n & 0xffffU; 1195 offset = n & 0xffffU;
1196 n = (n >> 16) & 0xffffU; 1196 n = (n >> 16) & 0xffffU;
1197 1197
1198 /* number of addr/value pair should not exceed 1024 enteries */ 1198 /* number of addr/value pair should not exceed 1024 entries */
1199 if (n >= 1024) { 1199 if (n >= 1024) {
1200 ql_log(ql_log_fatal, vha, 0x0071, 1200 ql_log(ql_log_fatal, vha, 0x0071,
1201 "Card flash not initialized:n=0x%x.\n", n); 1201 "Card flash not initialized:n=0x%x.\n", n);
@@ -2050,7 +2050,7 @@ qla82xx_intr_handler(int irq, void *dev_id)
2050 2050
2051 rsp = (struct rsp_que *) dev_id; 2051 rsp = (struct rsp_que *) dev_id;
2052 if (!rsp) { 2052 if (!rsp) {
2053 ql_log(ql_log_info, NULL, 0xb054, 2053 ql_log(ql_log_info, NULL, 0xb053,
2054 "%s: NULL response queue pointer.\n", __func__); 2054 "%s: NULL response queue pointer.\n", __func__);
2055 return IRQ_NONE; 2055 return IRQ_NONE;
2056 } 2056 }
@@ -2446,7 +2446,7 @@ qla82xx_load_fw(scsi_qla_host_t *vha)
2446 2446
2447 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) { 2447 if (qla82xx_fw_load_from_flash(ha) == QLA_SUCCESS) {
2448 ql_log(ql_log_info, vha, 0x00a1, 2448 ql_log(ql_log_info, vha, 0x00a1,
2449 "Firmware loaded successully from flash.\n"); 2449 "Firmware loaded successfully from flash.\n");
2450 return QLA_SUCCESS; 2450 return QLA_SUCCESS;
2451 } else { 2451 } else {
2452 ql_log(ql_log_warn, vha, 0x0108, 2452 ql_log(ql_log_warn, vha, 0x0108,
@@ -2461,7 +2461,7 @@ try_blob_fw:
2461 blob = ha->hablob = qla2x00_request_firmware(vha); 2461 blob = ha->hablob = qla2x00_request_firmware(vha);
2462 if (!blob) { 2462 if (!blob) {
2463 ql_log(ql_log_fatal, vha, 0x00a3, 2463 ql_log(ql_log_fatal, vha, 0x00a3,
2464 "Firmware image not preset.\n"); 2464 "Firmware image not present.\n");
2465 goto fw_load_failed; 2465 goto fw_load_failed;
2466 } 2466 }
2467 2467
@@ -2689,7 +2689,7 @@ qla82xx_write_flash_data(struct scsi_qla_host *vha, uint32_t *dwptr,
2689 if (!optrom) { 2689 if (!optrom) {
2690 ql_log(ql_log_warn, vha, 0xb01b, 2690 ql_log(ql_log_warn, vha, 0xb01b,
2691 "Unable to allocate memory " 2691 "Unable to allocate memory "
2692 "for optron burst write (%x KB).\n", 2692 "for optrom burst write (%x KB).\n",
2693 OPTROM_BURST_SIZE / 1024); 2693 OPTROM_BURST_SIZE / 1024);
2694 } 2694 }
2695 } 2695 }
@@ -2960,9 +2960,8 @@ qla82xx_need_qsnt_handler(scsi_qla_host_t *vha)
2960 * changing the state to DEV_READY 2960 * changing the state to DEV_READY
2961 */ 2961 */
2962 ql_log(ql_log_info, vha, 0xb023, 2962 ql_log(ql_log_info, vha, 0xb023,
2963 "%s : QUIESCENT TIMEOUT.\n", QLA2XXX_DRIVER_NAME); 2963 "%s : QUIESCENT TIMEOUT DRV_ACTIVE:%d "
2964 ql_log(ql_log_info, vha, 0xb024, 2964 "DRV_STATE:%d.\n", QLA2XXX_DRIVER_NAME,
2965 "DRV_ACTIVE:%d DRV_STATE:%d.\n",
2966 drv_active, drv_state); 2965 drv_active, drv_state);
2967 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2966 qla82xx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2968 QLA82XX_DEV_READY); 2967 QLA82XX_DEV_READY);
@@ -3129,7 +3128,7 @@ qla82xx_need_reset_handler(scsi_qla_host_t *vha)
3129 if (ql2xmdenable) { 3128 if (ql2xmdenable) {
3130 if (qla82xx_md_collect(vha)) 3129 if (qla82xx_md_collect(vha))
3131 ql_log(ql_log_warn, vha, 0xb02c, 3130 ql_log(ql_log_warn, vha, 0xb02c,
3132 "Not able to collect minidump.\n"); 3131 "Minidump not collected.\n");
3133 } else 3132 } else
3134 ql_log(ql_log_warn, vha, 0xb04f, 3133 ql_log(ql_log_warn, vha, 0xb04f,
3135 "Minidump disabled.\n"); 3134 "Minidump disabled.\n");
@@ -3160,11 +3159,11 @@ qla82xx_check_md_needed(scsi_qla_host_t *vha)
3160 "Firmware version differs " 3159 "Firmware version differs "
3161 "Previous version: %d:%d:%d - " 3160 "Previous version: %d:%d:%d - "
3162 "New version: %d:%d:%d\n", 3161 "New version: %d:%d:%d\n",
3162 fw_major_version, fw_minor_version,
3163 fw_subminor_version,
3163 ha->fw_major_version, 3164 ha->fw_major_version,
3164 ha->fw_minor_version, 3165 ha->fw_minor_version,
3165 ha->fw_subminor_version, 3166 ha->fw_subminor_version);
3166 fw_major_version, fw_minor_version,
3167 fw_subminor_version);
3168 /* Release MiniDump resources */ 3167 /* Release MiniDump resources */
3169 qla82xx_md_free(vha); 3168 qla82xx_md_free(vha);
3170 /* ALlocate MiniDump resources */ 3169 /* ALlocate MiniDump resources */
@@ -3325,6 +3324,30 @@ exit:
3325 return rval; 3324 return rval;
3326} 3325}
3327 3326
3327static int qla82xx_check_temp(scsi_qla_host_t *vha)
3328{
3329 uint32_t temp, temp_state, temp_val;
3330 struct qla_hw_data *ha = vha->hw;
3331
3332 temp = qla82xx_rd_32(ha, CRB_TEMP_STATE);
3333 temp_state = qla82xx_get_temp_state(temp);
3334 temp_val = qla82xx_get_temp_val(temp);
3335
3336 if (temp_state == QLA82XX_TEMP_PANIC) {
3337 ql_log(ql_log_warn, vha, 0x600e,
3338 "Device temperature %d degrees C exceeds "
3339 " maximum allowed. Hardware has been shut down.\n",
3340 temp_val);
3341 return 1;
3342 } else if (temp_state == QLA82XX_TEMP_WARN) {
3343 ql_log(ql_log_warn, vha, 0x600f,
3344 "Device temperature %d degrees C exceeds "
3345 "operating range. Immediate action needed.\n",
3346 temp_val);
3347 }
3348 return 0;
3349}
3350
3328void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha) 3351void qla82xx_clear_pending_mbx(scsi_qla_host_t *vha)
3329{ 3352{
3330 struct qla_hw_data *ha = vha->hw; 3353 struct qla_hw_data *ha = vha->hw;
@@ -3347,18 +3370,20 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3347 /* don't poll if reset is going on */ 3370 /* don't poll if reset is going on */
3348 if (!ha->flags.isp82xx_reset_hdlr_active) { 3371 if (!ha->flags.isp82xx_reset_hdlr_active) {
3349 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 3372 dev_state = qla82xx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
3350 if (dev_state == QLA82XX_DEV_NEED_RESET && 3373 if (qla82xx_check_temp(vha)) {
3374 set_bit(ISP_UNRECOVERABLE, &vha->dpc_flags);
3375 ha->flags.isp82xx_fw_hung = 1;
3376 qla82xx_clear_pending_mbx(vha);
3377 } else if (dev_state == QLA82XX_DEV_NEED_RESET &&
3351 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) { 3378 !test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags)) {
3352 ql_log(ql_log_warn, vha, 0x6001, 3379 ql_log(ql_log_warn, vha, 0x6001,
3353 "Adapter reset needed.\n"); 3380 "Adapter reset needed.\n");
3354 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags); 3381 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
3355 qla2xxx_wake_dpc(vha);
3356 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT && 3382 } else if (dev_state == QLA82XX_DEV_NEED_QUIESCENT &&
3357 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) { 3383 !test_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags)) {
3358 ql_log(ql_log_warn, vha, 0x6002, 3384 ql_log(ql_log_warn, vha, 0x6002,
3359 "Quiescent needed.\n"); 3385 "Quiescent needed.\n");
3360 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags); 3386 set_bit(ISP_QUIESCE_NEEDED, &vha->dpc_flags);
3361 qla2xxx_wake_dpc(vha);
3362 } else { 3387 } else {
3363 if (qla82xx_check_fw_alive(vha)) { 3388 if (qla82xx_check_fw_alive(vha)) {
3364 ql_dbg(ql_dbg_timer, vha, 0x6011, 3389 ql_dbg(ql_dbg_timer, vha, 0x6011,
@@ -3398,7 +3423,6 @@ void qla82xx_watchdog(scsi_qla_host_t *vha)
3398 set_bit(ISP_ABORT_NEEDED, 3423 set_bit(ISP_ABORT_NEEDED,
3399 &vha->dpc_flags); 3424 &vha->dpc_flags);
3400 } 3425 }
3401 qla2xxx_wake_dpc(vha);
3402 ha->flags.isp82xx_fw_hung = 1; 3426 ha->flags.isp82xx_fw_hung = 1;
3403 ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n"); 3427 ql_log(ql_log_warn, vha, 0x6007, "Firmware hung.\n");
3404 qla82xx_clear_pending_mbx(vha); 3428 qla82xx_clear_pending_mbx(vha);
@@ -4113,6 +4137,14 @@ qla82xx_md_collect(scsi_qla_host_t *vha)
4113 goto md_failed; 4137 goto md_failed;
4114 } 4138 }
4115 4139
4140 if (ha->flags.isp82xx_no_md_cap) {
4141 ql_log(ql_log_warn, vha, 0xb054,
4142 "Forced reset from application, "
4143 "ignore minidump capture\n");
4144 ha->flags.isp82xx_no_md_cap = 0;
4145 goto md_failed;
4146 }
4147
4116 if (qla82xx_validate_template_chksum(vha)) { 4148 if (qla82xx_validate_template_chksum(vha)) {
4117 ql_log(ql_log_info, vha, 0xb039, 4149 ql_log(ql_log_info, vha, 0xb039,
4118 "Template checksum validation error\n"); 4150 "Template checksum validation error\n");
diff --git a/drivers/scsi/qla2xxx/qla_nx.h b/drivers/scsi/qla2xxx/qla_nx.h
index 4ac50e274661..6eb210e3cc63 100644
--- a/drivers/scsi/qla2xxx/qla_nx.h
+++ b/drivers/scsi/qla2xxx/qla_nx.h
@@ -26,6 +26,7 @@
26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c) 26#define CRB_RCVPEG_STATE QLA82XX_REG(0x13c)
27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54) 27#define BOOT_LOADER_DIMM_STATUS QLA82XX_REG(0x54)
28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc) 28#define CRB_DMA_SHIFT QLA82XX_REG(0xcc)
29#define CRB_TEMP_STATE QLA82XX_REG(0x1b4)
29#define QLA82XX_DMA_SHIFT_VALUE 0x55555555 30#define QLA82XX_DMA_SHIFT_VALUE 0x55555555
30 31
31#define QLA82XX_HW_H0_CH_HUB_ADR 0x05 32#define QLA82XX_HW_H0_CH_HUB_ADR 0x05
@@ -561,7 +562,6 @@
561#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158)) 562#define QLA82XX_FW_VERSION_SUB (QLA82XX_CAM_RAM(0x158))
562#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg)) 563#define QLA82XX_PCIE_REG(reg) (QLA82XX_CRB_PCIE + (reg))
563 564
564#define PCIE_CHICKEN3 (0x120c8)
565#define PCIE_SETUP_FUNCTION (0x12040) 565#define PCIE_SETUP_FUNCTION (0x12040)
566#define PCIE_SETUP_FUNCTION2 (0x12048) 566#define PCIE_SETUP_FUNCTION2 (0x12048)
567 567
@@ -1178,4 +1178,16 @@ static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8, 0x410000AC,
1178#define CRB_NIU_XG_PAUSE_CTL_P0 0x1 1178#define CRB_NIU_XG_PAUSE_CTL_P0 0x1
1179#define CRB_NIU_XG_PAUSE_CTL_P1 0x8 1179#define CRB_NIU_XG_PAUSE_CTL_P1 0x8
1180 1180
1181#define qla82xx_get_temp_val(x) ((x) >> 16)
1182#define qla82xx_get_temp_state(x) ((x) & 0xffff)
1183#define qla82xx_encode_temp(val, state) (((val) << 16) | (state))
1184
1185/*
1186 * Temperature control.
1187 */
1188enum {
1189 QLA82XX_TEMP_NORMAL = 0x1, /* Normal operating range */
1190 QLA82XX_TEMP_WARN, /* Sound alert, temperature getting high */
1191 QLA82XX_TEMP_PANIC /* Fatal error, hardware has shut down. */
1192};
1181#endif 1193#endif
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index c9c56a8427f3..6d1d873a20e2 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -13,12 +13,13 @@
13#include <linux/mutex.h> 13#include <linux/mutex.h>
14#include <linux/kobject.h> 14#include <linux/kobject.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16
17#include <scsi/scsi_tcq.h> 16#include <scsi/scsi_tcq.h>
18#include <scsi/scsicam.h> 17#include <scsi/scsicam.h>
19#include <scsi/scsi_transport.h> 18#include <scsi/scsi_transport.h>
20#include <scsi/scsi_transport_fc.h> 19#include <scsi/scsi_transport_fc.h>
21 20
21#include "qla_target.h"
22
22/* 23/*
23 * Driver version 24 * Driver version
24 */ 25 */
@@ -40,6 +41,12 @@ static struct kmem_cache *ctx_cachep;
40 */ 41 */
41int ql_errlev = ql_log_all; 42int ql_errlev = ql_log_all;
42 43
44int ql2xenableclass2;
45module_param(ql2xenableclass2, int, S_IRUGO|S_IRUSR);
46MODULE_PARM_DESC(ql2xenableclass2,
47 "Specify if Class 2 operations are supported from the very "
48 "beginning. Default is 0 - class 2 not supported.");
49
43int ql2xlogintimeout = 20; 50int ql2xlogintimeout = 20;
44module_param(ql2xlogintimeout, int, S_IRUGO); 51module_param(ql2xlogintimeout, int, S_IRUGO);
45MODULE_PARM_DESC(ql2xlogintimeout, 52MODULE_PARM_DESC(ql2xlogintimeout,
@@ -255,6 +262,8 @@ struct scsi_host_template qla2xxx_driver_template = {
255 262
256 .max_sectors = 0xFFFF, 263 .max_sectors = 0xFFFF,
257 .shost_attrs = qla2x00_host_attrs, 264 .shost_attrs = qla2x00_host_attrs,
265
266 .supported_mode = MODE_INITIATOR,
258}; 267};
259 268
260static struct scsi_transport_template *qla2xxx_transport_template = NULL; 269static struct scsi_transport_template *qla2xxx_transport_template = NULL;
@@ -306,7 +315,8 @@ static void qla2x00_free_fw_dump(struct qla_hw_data *);
306static void qla2x00_mem_free(struct qla_hw_data *); 315static void qla2x00_mem_free(struct qla_hw_data *);
307 316
308/* -------------------------------------------------------------------------- */ 317/* -------------------------------------------------------------------------- */
309static int qla2x00_alloc_queues(struct qla_hw_data *ha) 318static int qla2x00_alloc_queues(struct qla_hw_data *ha, struct req_que *req,
319 struct rsp_que *rsp)
310{ 320{
311 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev); 321 scsi_qla_host_t *vha = pci_get_drvdata(ha->pdev);
312 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues, 322 ha->req_q_map = kzalloc(sizeof(struct req_que *) * ha->max_req_queues,
@@ -324,6 +334,12 @@ static int qla2x00_alloc_queues(struct qla_hw_data *ha)
324 "Unable to allocate memory for response queue ptrs.\n"); 334 "Unable to allocate memory for response queue ptrs.\n");
325 goto fail_rsp_map; 335 goto fail_rsp_map;
326 } 336 }
337 /*
338 * Make sure we record at least the request and response queue zero in
339 * case we need to free them if part of the probe fails.
340 */
341 ha->rsp_q_map[0] = rsp;
342 ha->req_q_map[0] = req;
327 set_bit(0, ha->rsp_qid_map); 343 set_bit(0, ha->rsp_qid_map);
328 set_bit(0, ha->req_qid_map); 344 set_bit(0, ha->req_qid_map);
329 return 1; 345 return 1;
@@ -642,12 +658,12 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
642 658
643 if (ha->flags.eeh_busy) { 659 if (ha->flags.eeh_busy) {
644 if (ha->flags.pci_channel_io_perm_failure) { 660 if (ha->flags.pci_channel_io_perm_failure) {
645 ql_dbg(ql_dbg_io, vha, 0x3001, 661 ql_dbg(ql_dbg_aer, vha, 0x9010,
646 "PCI Channel IO permanent failure, exiting " 662 "PCI Channel IO permanent failure, exiting "
647 "cmd=%p.\n", cmd); 663 "cmd=%p.\n", cmd);
648 cmd->result = DID_NO_CONNECT << 16; 664 cmd->result = DID_NO_CONNECT << 16;
649 } else { 665 } else {
650 ql_dbg(ql_dbg_io, vha, 0x3002, 666 ql_dbg(ql_dbg_aer, vha, 0x9011,
651 "EEH_Busy, Requeuing the cmd=%p.\n", cmd); 667 "EEH_Busy, Requeuing the cmd=%p.\n", cmd);
652 cmd->result = DID_REQUEUE << 16; 668 cmd->result = DID_REQUEUE << 16;
653 } 669 }
@@ -657,7 +673,7 @@ qla2xxx_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
657 rval = fc_remote_port_chkready(rport); 673 rval = fc_remote_port_chkready(rport);
658 if (rval) { 674 if (rval) {
659 cmd->result = rval; 675 cmd->result = rval;
660 ql_dbg(ql_dbg_io, vha, 0x3003, 676 ql_dbg(ql_dbg_io + ql_dbg_verbose, vha, 0x3003,
661 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n", 677 "fc_remote_port_chkready failed for cmd=%p, rval=0x%x.\n",
662 cmd, rval); 678 cmd, rval);
663 goto qc24_fail_command; 679 goto qc24_fail_command;
@@ -1136,7 +1152,7 @@ qla2xxx_eh_bus_reset(struct scsi_cmnd *cmd)
1136 ret = FAILED; 1152 ret = FAILED;
1137 1153
1138 ql_log(ql_log_info, vha, 0x8012, 1154 ql_log(ql_log_info, vha, 0x8012,
1139 "BUS RESET ISSUED nexus=%ld:%d%d.\n", vha->host_no, id, lun); 1155 "BUS RESET ISSUED nexus=%ld:%d:%d.\n", vha->host_no, id, lun);
1140 1156
1141 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) { 1157 if (qla2x00_wait_for_hba_online(vha) != QLA_SUCCESS) {
1142 ql_log(ql_log_fatal, vha, 0x8013, 1158 ql_log(ql_log_fatal, vha, 0x8013,
@@ -2180,6 +2196,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2180 ql_dbg_pci(ql_dbg_init, pdev, 0x000a, 2196 ql_dbg_pci(ql_dbg_init, pdev, 0x000a,
2181 "Memory allocated for ha=%p.\n", ha); 2197 "Memory allocated for ha=%p.\n", ha);
2182 ha->pdev = pdev; 2198 ha->pdev = pdev;
2199 ha->tgt.enable_class_2 = ql2xenableclass2;
2183 2200
2184 /* Clear our data area */ 2201 /* Clear our data area */
2185 ha->bars = bars; 2202 ha->bars = bars;
@@ -2243,6 +2260,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2243 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2260 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2244 req_length = REQUEST_ENTRY_CNT_24XX; 2261 req_length = REQUEST_ENTRY_CNT_24XX;
2245 rsp_length = RESPONSE_ENTRY_CNT_2300; 2262 rsp_length = RESPONSE_ENTRY_CNT_2300;
2263 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2246 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2264 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2247 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2265 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2248 ha->gid_list_info_size = 8; 2266 ha->gid_list_info_size = 8;
@@ -2258,6 +2276,7 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2258 ha->mbx_count = MAILBOX_REGISTER_COUNT; 2276 ha->mbx_count = MAILBOX_REGISTER_COUNT;
2259 req_length = REQUEST_ENTRY_CNT_24XX; 2277 req_length = REQUEST_ENTRY_CNT_24XX;
2260 rsp_length = RESPONSE_ENTRY_CNT_2300; 2278 rsp_length = RESPONSE_ENTRY_CNT_2300;
2279 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
2261 ha->max_loop_id = SNS_LAST_LOOP_ID_2300; 2280 ha->max_loop_id = SNS_LAST_LOOP_ID_2300;
2262 ha->init_cb_size = sizeof(struct mid_init_cb_24xx); 2281 ha->init_cb_size = sizeof(struct mid_init_cb_24xx);
2263 ha->gid_list_info_size = 8; 2282 ha->gid_list_info_size = 8;
@@ -2417,6 +2436,17 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2417 host->max_cmd_len, host->max_channel, host->max_lun, 2436 host->max_cmd_len, host->max_channel, host->max_lun,
2418 host->transportt, sht->vendor_id); 2437 host->transportt, sht->vendor_id);
2419 2438
2439que_init:
2440 /* Alloc arrays of request and response ring ptrs */
2441 if (!qla2x00_alloc_queues(ha, req, rsp)) {
2442 ql_log(ql_log_fatal, base_vha, 0x003d,
2443 "Failed to allocate memory for queue pointers..."
2444 "aborting.\n");
2445 goto probe_init_failed;
2446 }
2447
2448 qlt_probe_one_stage1(base_vha, ha);
2449
2420 /* Set up the irqs */ 2450 /* Set up the irqs */
2421 ret = qla2x00_request_irqs(ha, rsp); 2451 ret = qla2x00_request_irqs(ha, rsp);
2422 if (ret) 2452 if (ret)
@@ -2424,20 +2454,10 @@ qla2x00_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
2424 2454
2425 pci_save_state(pdev); 2455 pci_save_state(pdev);
2426 2456
2427 /* Alloc arrays of request and response ring ptrs */ 2457 /* Assign back pointers */
2428que_init:
2429 if (!qla2x00_alloc_queues(ha)) {
2430 ql_log(ql_log_fatal, base_vha, 0x003d,
2431 "Failed to allocate memory for queue pointers.. aborting.\n");
2432 goto probe_init_failed;
2433 }
2434
2435 ha->rsp_q_map[0] = rsp;
2436 ha->req_q_map[0] = req;
2437 rsp->req = req; 2458 rsp->req = req;
2438 req->rsp = rsp; 2459 req->rsp = rsp;
2439 set_bit(0, ha->req_qid_map); 2460
2440 set_bit(0, ha->rsp_qid_map);
2441 /* FWI2-capable only. */ 2461 /* FWI2-capable only. */
2442 req->req_q_in = &ha->iobase->isp24.req_q_in; 2462 req->req_q_in = &ha->iobase->isp24.req_q_in;
2443 req->req_q_out = &ha->iobase->isp24.req_q_out; 2463 req->req_q_out = &ha->iobase->isp24.req_q_out;
@@ -2514,6 +2534,14 @@ que_init:
2514 ql_dbg(ql_dbg_init, base_vha, 0x00ee, 2534 ql_dbg(ql_dbg_init, base_vha, 0x00ee,
2515 "DPC thread started successfully.\n"); 2535 "DPC thread started successfully.\n");
2516 2536
2537 /*
2538 * If we're not coming up in initiator mode, we might sit for
2539 * a while without waking up the dpc thread, which leads to a
2540 * stuck process warning. So just kick the dpc once here and
2541 * let the kthread start (and go back to sleep in qla2x00_do_dpc).
2542 */
2543 qla2xxx_wake_dpc(base_vha);
2544
2517skip_dpc: 2545skip_dpc:
2518 list_add_tail(&base_vha->list, &ha->vp_list); 2546 list_add_tail(&base_vha->list, &ha->vp_list);
2519 base_vha->host->irq = ha->pdev->irq; 2547 base_vha->host->irq = ha->pdev->irq;
@@ -2559,7 +2587,11 @@ skip_dpc:
2559 ql_dbg(ql_dbg_init, base_vha, 0x00f2, 2587 ql_dbg(ql_dbg_init, base_vha, 0x00f2,
2560 "Init done and hba is online.\n"); 2588 "Init done and hba is online.\n");
2561 2589
2562 scsi_scan_host(host); 2590 if (qla_ini_mode_enabled(base_vha))
2591 scsi_scan_host(host);
2592 else
2593 ql_dbg(ql_dbg_init, base_vha, 0x0122,
2594 "skipping scsi_scan_host() for non-initiator port\n");
2563 2595
2564 qla2x00_alloc_sysfs_attr(base_vha); 2596 qla2x00_alloc_sysfs_attr(base_vha);
2565 2597
@@ -2577,11 +2609,17 @@ skip_dpc:
2577 base_vha->host_no, 2609 base_vha->host_no,
2578 ha->isp_ops->fw_version_str(base_vha, fw_str)); 2610 ha->isp_ops->fw_version_str(base_vha, fw_str));
2579 2611
2612 qlt_add_target(ha, base_vha);
2613
2580 return 0; 2614 return 0;
2581 2615
2582probe_init_failed: 2616probe_init_failed:
2583 qla2x00_free_req_que(ha, req); 2617 qla2x00_free_req_que(ha, req);
2618 ha->req_q_map[0] = NULL;
2619 clear_bit(0, ha->req_qid_map);
2584 qla2x00_free_rsp_que(ha, rsp); 2620 qla2x00_free_rsp_que(ha, rsp);
2621 ha->rsp_q_map[0] = NULL;
2622 clear_bit(0, ha->rsp_qid_map);
2585 ha->max_req_queues = ha->max_rsp_queues = 0; 2623 ha->max_req_queues = ha->max_rsp_queues = 0;
2586 2624
2587probe_failed: 2625probe_failed:
@@ -2621,6 +2659,22 @@ probe_out:
2621} 2659}
2622 2660
2623static void 2661static void
2662qla2x00_stop_dpc_thread(scsi_qla_host_t *vha)
2663{
2664 struct qla_hw_data *ha = vha->hw;
2665 struct task_struct *t = ha->dpc_thread;
2666
2667 if (ha->dpc_thread == NULL)
2668 return;
2669 /*
2670 * qla2xxx_wake_dpc checks for ->dpc_thread
2671 * so we need to zero it out.
2672 */
2673 ha->dpc_thread = NULL;
2674 kthread_stop(t);
2675}
2676
2677static void
2624qla2x00_shutdown(struct pci_dev *pdev) 2678qla2x00_shutdown(struct pci_dev *pdev)
2625{ 2679{
2626 scsi_qla_host_t *vha; 2680 scsi_qla_host_t *vha;
@@ -2663,9 +2717,18 @@ qla2x00_remove_one(struct pci_dev *pdev)
2663 struct qla_hw_data *ha; 2717 struct qla_hw_data *ha;
2664 unsigned long flags; 2718 unsigned long flags;
2665 2719
2720 /*
2721 * If the PCI device is disabled that means that probe failed and any
2722 * resources should be have cleaned up on probe exit.
2723 */
2724 if (!atomic_read(&pdev->enable_cnt))
2725 return;
2726
2666 base_vha = pci_get_drvdata(pdev); 2727 base_vha = pci_get_drvdata(pdev);
2667 ha = base_vha->hw; 2728 ha = base_vha->hw;
2668 2729
2730 ha->flags.host_shutting_down = 1;
2731
2669 mutex_lock(&ha->vport_lock); 2732 mutex_lock(&ha->vport_lock);
2670 while (ha->cur_vport_count) { 2733 while (ha->cur_vport_count) {
2671 struct Scsi_Host *scsi_host; 2734 struct Scsi_Host *scsi_host;
@@ -2719,6 +2782,7 @@ qla2x00_remove_one(struct pci_dev *pdev)
2719 ha->dpc_thread = NULL; 2782 ha->dpc_thread = NULL;
2720 kthread_stop(t); 2783 kthread_stop(t);
2721 } 2784 }
2785 qlt_remove_target(ha, base_vha);
2722 2786
2723 qla2x00_free_sysfs_attr(base_vha); 2787 qla2x00_free_sysfs_attr(base_vha);
2724 2788
@@ -2770,17 +2834,7 @@ qla2x00_free_device(scsi_qla_host_t *vha)
2770 if (vha->timer_active) 2834 if (vha->timer_active)
2771 qla2x00_stop_timer(vha); 2835 qla2x00_stop_timer(vha);
2772 2836
2773 /* Kill the kernel thread for this host */ 2837 qla2x00_stop_dpc_thread(vha);
2774 if (ha->dpc_thread) {
2775 struct task_struct *t = ha->dpc_thread;
2776
2777 /*
2778 * qla2xxx_wake_dpc checks for ->dpc_thread
2779 * so we need to zero it out.
2780 */
2781 ha->dpc_thread = NULL;
2782 kthread_stop(t);
2783 }
2784 2838
2785 qla25xx_delete_queues(vha); 2839 qla25xx_delete_queues(vha);
2786 2840
@@ -2842,8 +2896,10 @@ qla2x00_schedule_rport_del(struct scsi_qla_host *vha, fc_port_t *fcport,
2842 spin_unlock_irqrestore(vha->host->host_lock, flags); 2896 spin_unlock_irqrestore(vha->host->host_lock, flags);
2843 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 2897 set_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
2844 qla2xxx_wake_dpc(base_vha); 2898 qla2xxx_wake_dpc(base_vha);
2845 } else 2899 } else {
2846 fc_remote_port_delete(rport); 2900 fc_remote_port_delete(rport);
2901 qlt_fc_port_deleted(vha, fcport);
2902 }
2847} 2903}
2848 2904
2849/* 2905/*
@@ -2859,7 +2915,7 @@ void qla2x00_mark_device_lost(scsi_qla_host_t *vha, fc_port_t *fcport,
2859 int do_login, int defer) 2915 int do_login, int defer)
2860{ 2916{
2861 if (atomic_read(&fcport->state) == FCS_ONLINE && 2917 if (atomic_read(&fcport->state) == FCS_ONLINE &&
2862 vha->vp_idx == fcport->vp_idx) { 2918 vha->vp_idx == fcport->vha->vp_idx) {
2863 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2919 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2864 qla2x00_schedule_rport_del(vha, fcport, defer); 2920 qla2x00_schedule_rport_del(vha, fcport, defer);
2865 } 2921 }
@@ -2908,7 +2964,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2908 fc_port_t *fcport; 2964 fc_port_t *fcport;
2909 2965
2910 list_for_each_entry(fcport, &vha->vp_fcports, list) { 2966 list_for_each_entry(fcport, &vha->vp_fcports, list) {
2911 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vp_idx) 2967 if (vha->vp_idx != 0 && vha->vp_idx != fcport->vha->vp_idx)
2912 continue; 2968 continue;
2913 2969
2914 /* 2970 /*
@@ -2921,7 +2977,7 @@ qla2x00_mark_all_devices_lost(scsi_qla_host_t *vha, int defer)
2921 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST); 2977 qla2x00_set_fcport_state(fcport, FCS_DEVICE_LOST);
2922 if (defer) 2978 if (defer)
2923 qla2x00_schedule_rport_del(vha, fcport, defer); 2979 qla2x00_schedule_rport_del(vha, fcport, defer);
2924 else if (vha->vp_idx == fcport->vp_idx) 2980 else if (vha->vp_idx == fcport->vha->vp_idx)
2925 qla2x00_schedule_rport_del(vha, fcport, defer); 2981 qla2x00_schedule_rport_del(vha, fcport, defer);
2926 } 2982 }
2927 } 2983 }
@@ -2946,10 +3002,13 @@ qla2x00_mem_alloc(struct qla_hw_data *ha, uint16_t req_len, uint16_t rsp_len,
2946 if (!ha->init_cb) 3002 if (!ha->init_cb)
2947 goto fail; 3003 goto fail;
2948 3004
3005 if (qlt_mem_alloc(ha) < 0)
3006 goto fail_free_init_cb;
3007
2949 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev, 3008 ha->gid_list = dma_alloc_coherent(&ha->pdev->dev,
2950 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL); 3009 qla2x00_gid_list_size(ha), &ha->gid_list_dma, GFP_KERNEL);
2951 if (!ha->gid_list) 3010 if (!ha->gid_list)
2952 goto fail_free_init_cb; 3011 goto fail_free_tgt_mem;
2953 3012
2954 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep); 3013 ha->srb_mempool = mempool_create_slab_pool(SRB_MIN_REQ, srb_cachep);
2955 if (!ha->srb_mempool) 3014 if (!ha->srb_mempool)
@@ -3167,6 +3226,8 @@ fail_free_gid_list:
3167 ha->gid_list_dma); 3226 ha->gid_list_dma);
3168 ha->gid_list = NULL; 3227 ha->gid_list = NULL;
3169 ha->gid_list_dma = 0; 3228 ha->gid_list_dma = 0;
3229fail_free_tgt_mem:
3230 qlt_mem_free(ha);
3170fail_free_init_cb: 3231fail_free_init_cb:
3171 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb, 3232 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, ha->init_cb,
3172 ha->init_cb_dma); 3233 ha->init_cb_dma);
@@ -3282,6 +3343,8 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3282 if (ha->ctx_mempool) 3343 if (ha->ctx_mempool)
3283 mempool_destroy(ha->ctx_mempool); 3344 mempool_destroy(ha->ctx_mempool);
3284 3345
3346 qlt_mem_free(ha);
3347
3285 if (ha->init_cb) 3348 if (ha->init_cb)
3286 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size, 3349 dma_free_coherent(&ha->pdev->dev, ha->init_cb_size,
3287 ha->init_cb, ha->init_cb_dma); 3350 ha->init_cb, ha->init_cb_dma);
@@ -3311,6 +3374,10 @@ qla2x00_mem_free(struct qla_hw_data *ha)
3311 3374
3312 ha->gid_list = NULL; 3375 ha->gid_list = NULL;
3313 ha->gid_list_dma = 0; 3376 ha->gid_list_dma = 0;
3377
3378 ha->tgt.atio_ring = NULL;
3379 ha->tgt.atio_dma = 0;
3380 ha->tgt.tgt_vp_map = NULL;
3314} 3381}
3315 3382
3316struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht, 3383struct scsi_qla_host *qla2x00_create_host(struct scsi_host_template *sht,
@@ -3671,10 +3738,9 @@ qla2x00_do_dpc(void *data)
3671 3738
3672 ha->dpc_active = 1; 3739 ha->dpc_active = 1;
3673 3740
3674 ql_dbg(ql_dbg_dpc, base_vha, 0x4001, 3741 ql_dbg(ql_dbg_dpc + ql_dbg_verbose, base_vha, 0x4001,
3675 "DPC handler waking up.\n"); 3742 "DPC handler waking up, dpc_flags=0x%lx.\n",
3676 ql_dbg(ql_dbg_dpc, base_vha, 0x4002, 3743 base_vha->dpc_flags);
3677 "dpc_flags=0x%lx.\n", base_vha->dpc_flags);
3678 3744
3679 qla2x00_do_work(base_vha); 3745 qla2x00_do_work(base_vha);
3680 3746
@@ -3740,6 +3806,16 @@ qla2x00_do_dpc(void *data)
3740 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags); 3806 clear_bit(FCPORT_UPDATE_NEEDED, &base_vha->dpc_flags);
3741 } 3807 }
3742 3808
3809 if (test_bit(SCR_PENDING, &base_vha->dpc_flags)) {
3810 int ret;
3811 ret = qla2x00_send_change_request(base_vha, 0x3, 0);
3812 if (ret != QLA_SUCCESS)
3813 ql_log(ql_log_warn, base_vha, 0x121,
3814 "Failed to enable receiving of RSCN "
3815 "requests: 0x%x.\n", ret);
3816 clear_bit(SCR_PENDING, &base_vha->dpc_flags);
3817 }
3818
3743 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) { 3819 if (test_bit(ISP_QUIESCE_NEEDED, &base_vha->dpc_flags)) {
3744 ql_dbg(ql_dbg_dpc, base_vha, 0x4009, 3820 ql_dbg(ql_dbg_dpc, base_vha, 0x4009,
3745 "Quiescence mode scheduled.\n"); 3821 "Quiescence mode scheduled.\n");
@@ -4457,6 +4533,21 @@ qla2x00_module_init(void)
4457 return -ENOMEM; 4533 return -ENOMEM;
4458 } 4534 }
4459 4535
4536 /* Initialize target kmem_cache and mem_pools */
4537 ret = qlt_init();
4538 if (ret < 0) {
4539 kmem_cache_destroy(srb_cachep);
4540 return ret;
4541 } else if (ret > 0) {
4542 /*
4543 * If initiator mode is explictly disabled by qlt_init(),
4544 * prevent scsi_transport_fc.c:fc_scsi_scan_rport() from
4545 * performing scsi_scan_target() during LOOP UP event.
4546 */
4547 qla2xxx_transport_functions.disable_target_scan = 1;
4548 qla2xxx_transport_vport_functions.disable_target_scan = 1;
4549 }
4550
4460 /* Derive version string. */ 4551 /* Derive version string. */
4461 strcpy(qla2x00_version_str, QLA2XXX_VERSION); 4552 strcpy(qla2x00_version_str, QLA2XXX_VERSION);
4462 if (ql2xextended_error_logging) 4553 if (ql2xextended_error_logging)
@@ -4468,6 +4559,7 @@ qla2x00_module_init(void)
4468 kmem_cache_destroy(srb_cachep); 4559 kmem_cache_destroy(srb_cachep);
4469 ql_log(ql_log_fatal, NULL, 0x0002, 4560 ql_log(ql_log_fatal, NULL, 0x0002,
4470 "fc_attach_transport failed...Failing load!.\n"); 4561 "fc_attach_transport failed...Failing load!.\n");
4562 qlt_exit();
4471 return -ENODEV; 4563 return -ENODEV;
4472 } 4564 }
4473 4565
@@ -4481,6 +4573,7 @@ qla2x00_module_init(void)
4481 fc_attach_transport(&qla2xxx_transport_vport_functions); 4573 fc_attach_transport(&qla2xxx_transport_vport_functions);
4482 if (!qla2xxx_transport_vport_template) { 4574 if (!qla2xxx_transport_vport_template) {
4483 kmem_cache_destroy(srb_cachep); 4575 kmem_cache_destroy(srb_cachep);
4576 qlt_exit();
4484 fc_release_transport(qla2xxx_transport_template); 4577 fc_release_transport(qla2xxx_transport_template);
4485 ql_log(ql_log_fatal, NULL, 0x0004, 4578 ql_log(ql_log_fatal, NULL, 0x0004,
4486 "fc_attach_transport vport failed...Failing load!.\n"); 4579 "fc_attach_transport vport failed...Failing load!.\n");
@@ -4492,6 +4585,7 @@ qla2x00_module_init(void)
4492 ret = pci_register_driver(&qla2xxx_pci_driver); 4585 ret = pci_register_driver(&qla2xxx_pci_driver);
4493 if (ret) { 4586 if (ret) {
4494 kmem_cache_destroy(srb_cachep); 4587 kmem_cache_destroy(srb_cachep);
4588 qlt_exit();
4495 fc_release_transport(qla2xxx_transport_template); 4589 fc_release_transport(qla2xxx_transport_template);
4496 fc_release_transport(qla2xxx_transport_vport_template); 4590 fc_release_transport(qla2xxx_transport_vport_template);
4497 ql_log(ql_log_fatal, NULL, 0x0006, 4591 ql_log(ql_log_fatal, NULL, 0x0006,
@@ -4511,6 +4605,7 @@ qla2x00_module_exit(void)
4511 pci_unregister_driver(&qla2xxx_pci_driver); 4605 pci_unregister_driver(&qla2xxx_pci_driver);
4512 qla2x00_release_firmware(); 4606 qla2x00_release_firmware();
4513 kmem_cache_destroy(srb_cachep); 4607 kmem_cache_destroy(srb_cachep);
4608 qlt_exit();
4514 if (ctx_cachep) 4609 if (ctx_cachep)
4515 kmem_cache_destroy(ctx_cachep); 4610 kmem_cache_destroy(ctx_cachep);
4516 fc_release_transport(qla2xxx_transport_template); 4611 fc_release_transport(qla2xxx_transport_template);
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
new file mode 100644
index 000000000000..6986552b47e6
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -0,0 +1,4972 @@
1/*
2 * qla_target.c SCSI LLD infrastructure for QLogic 22xx/23xx/24xx/25xx
3 *
4 * based on qla2x00t.c code:
5 *
6 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
7 * Copyright (C) 2004 - 2005 Leonid Stoljar
8 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
9 * Copyright (C) 2006 - 2010 ID7 Ltd.
10 *
11 * Forward port and refactoring to modern qla2xxx and target/configfs
12 *
13 * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation, version 2
18 * of the License.
19 *
20 * This program is distributed in the hope that it will be useful,
21 * but WITHOUT ANY WARRANTY; without even the implied warranty of
22 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
23 * GNU General Public License for more details.
24 */
25
26#include <linux/module.h>
27#include <linux/init.h>
28#include <linux/types.h>
29#include <linux/blkdev.h>
30#include <linux/interrupt.h>
31#include <linux/pci.h>
32#include <linux/delay.h>
33#include <linux/list.h>
34#include <linux/workqueue.h>
35#include <asm/unaligned.h>
36#include <scsi/scsi.h>
37#include <scsi/scsi_host.h>
38#include <scsi/scsi_tcq.h>
39#include <target/target_core_base.h>
40#include <target/target_core_fabric.h>
41
42#include "qla_def.h"
43#include "qla_target.h"
44
45static char *qlini_mode = QLA2XXX_INI_MODE_STR_ENABLED;
46module_param(qlini_mode, charp, S_IRUGO);
47MODULE_PARM_DESC(qlini_mode,
48 "Determines when initiator mode will be enabled. Possible values: "
49 "\"exclusive\" - initiator mode will be enabled on load, "
50 "disabled on enabling target mode and then on disabling target mode "
51 "enabled back; "
52 "\"disabled\" - initiator mode will never be enabled; "
53 "\"enabled\" (default) - initiator mode will always stay enabled.");
54
55static int ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
56
57/*
58 * From scsi/fc/fc_fcp.h
59 */
60enum fcp_resp_rsp_codes {
61 FCP_TMF_CMPL = 0,
62 FCP_DATA_LEN_INVALID = 1,
63 FCP_CMND_FIELDS_INVALID = 2,
64 FCP_DATA_PARAM_MISMATCH = 3,
65 FCP_TMF_REJECTED = 4,
66 FCP_TMF_FAILED = 5,
67 FCP_TMF_INVALID_LUN = 9,
68};
69
70/*
71 * fc_pri_ta from scsi/fc/fc_fcp.h
72 */
73#define FCP_PTA_SIMPLE 0 /* simple task attribute */
74#define FCP_PTA_HEADQ 1 /* head of queue task attribute */
75#define FCP_PTA_ORDERED 2 /* ordered task attribute */
76#define FCP_PTA_ACA 4 /* auto. contigent allegiance */
77#define FCP_PTA_MASK 7 /* mask for task attribute field */
78#define FCP_PRI_SHIFT 3 /* priority field starts in bit 3 */
79#define FCP_PRI_RESVD_MASK 0x80 /* reserved bits in priority field */
80
81/*
82 * This driver calls qla2x00_alloc_iocbs() and qla2x00_issue_marker(), which
83 * must be called under HW lock and could unlock/lock it inside.
84 * It isn't an issue, since in the current implementation on the time when
85 * those functions are called:
86 *
87 * - Either context is IRQ and only IRQ handler can modify HW data,
88 * including rings related fields,
89 *
90 * - Or access to target mode variables from struct qla_tgt doesn't
91 * cross those functions boundaries, except tgt_stop, which
92 * additionally protected by irq_cmd_count.
93 */
94/* Predefs for callbacks handed to qla2xxx LLD */
95static void qlt_24xx_atio_pkt(struct scsi_qla_host *ha,
96 struct atio_from_isp *pkt);
97static void qlt_response_pkt(struct scsi_qla_host *ha, response_t *pkt);
98static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
99 int fn, void *iocb, int flags);
100static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
101 *cmd, struct atio_from_isp *atio, int ha_locked);
102static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
103 struct qla_tgt_srr_imm *imm, int ha_lock);
104/*
105 * Global Variables
106 */
107static struct kmem_cache *qla_tgt_cmd_cachep;
108static struct kmem_cache *qla_tgt_mgmt_cmd_cachep;
109static mempool_t *qla_tgt_mgmt_cmd_mempool;
110static struct workqueue_struct *qla_tgt_wq;
111static DEFINE_MUTEX(qla_tgt_mutex);
112static LIST_HEAD(qla_tgt_glist);
113
114/* ha->hardware_lock supposed to be held on entry (to protect tgt->sess_list) */
115static struct qla_tgt_sess *qlt_find_sess_by_port_name(
116 struct qla_tgt *tgt,
117 const uint8_t *port_name)
118{
119 struct qla_tgt_sess *sess;
120
121 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry) {
122 if (!memcmp(sess->port_name, port_name, WWN_SIZE))
123 return sess;
124 }
125
126 return NULL;
127}
128
129/* Might release hw lock, then reaquire!! */
130static inline int qlt_issue_marker(struct scsi_qla_host *vha, int vha_locked)
131{
132 /* Send marker if required */
133 if (unlikely(vha->marker_needed != 0)) {
134 int rc = qla2x00_issue_marker(vha, vha_locked);
135 if (rc != QLA_SUCCESS) {
136 ql_dbg(ql_dbg_tgt, vha, 0xe03d,
137 "qla_target(%d): issue_marker() failed\n",
138 vha->vp_idx);
139 }
140 return rc;
141 }
142 return QLA_SUCCESS;
143}
144
145static inline
146struct scsi_qla_host *qlt_find_host_by_d_id(struct scsi_qla_host *vha,
147 uint8_t *d_id)
148{
149 struct qla_hw_data *ha = vha->hw;
150 uint8_t vp_idx;
151
152 if ((vha->d_id.b.area != d_id[1]) || (vha->d_id.b.domain != d_id[0]))
153 return NULL;
154
155 if (vha->d_id.b.al_pa == d_id[2])
156 return vha;
157
158 BUG_ON(ha->tgt.tgt_vp_map == NULL);
159 vp_idx = ha->tgt.tgt_vp_map[d_id[2]].idx;
160 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
161 return ha->tgt.tgt_vp_map[vp_idx].vha;
162
163 return NULL;
164}
165
166static inline
167struct scsi_qla_host *qlt_find_host_by_vp_idx(struct scsi_qla_host *vha,
168 uint16_t vp_idx)
169{
170 struct qla_hw_data *ha = vha->hw;
171
172 if (vha->vp_idx == vp_idx)
173 return vha;
174
175 BUG_ON(ha->tgt.tgt_vp_map == NULL);
176 if (likely(test_bit(vp_idx, ha->vp_idx_map)))
177 return ha->tgt.tgt_vp_map[vp_idx].vha;
178
179 return NULL;
180}
181
182void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *vha,
183 struct atio_from_isp *atio)
184{
185 switch (atio->u.raw.entry_type) {
186 case ATIO_TYPE7:
187 {
188 struct scsi_qla_host *host = qlt_find_host_by_d_id(vha,
189 atio->u.isp24.fcp_hdr.d_id);
190 if (unlikely(NULL == host)) {
191 ql_dbg(ql_dbg_tgt, vha, 0xe03e,
192 "qla_target(%d): Received ATIO_TYPE7 "
193 "with unknown d_id %x:%x:%x\n", vha->vp_idx,
194 atio->u.isp24.fcp_hdr.d_id[0],
195 atio->u.isp24.fcp_hdr.d_id[1],
196 atio->u.isp24.fcp_hdr.d_id[2]);
197 break;
198 }
199 qlt_24xx_atio_pkt(host, atio);
200 break;
201 }
202
203 case IMMED_NOTIFY_TYPE:
204 {
205 struct scsi_qla_host *host = vha;
206 struct imm_ntfy_from_isp *entry =
207 (struct imm_ntfy_from_isp *)atio;
208
209 if ((entry->u.isp24.vp_index != 0xFF) &&
210 (entry->u.isp24.nport_handle != 0xFFFF)) {
211 host = qlt_find_host_by_vp_idx(vha,
212 entry->u.isp24.vp_index);
213 if (unlikely(!host)) {
214 ql_dbg(ql_dbg_tgt, vha, 0xe03f,
215 "qla_target(%d): Received "
216 "ATIO (IMMED_NOTIFY_TYPE) "
217 "with unknown vp_index %d\n",
218 vha->vp_idx, entry->u.isp24.vp_index);
219 break;
220 }
221 }
222 qlt_24xx_atio_pkt(host, atio);
223 break;
224 }
225
226 default:
227 ql_dbg(ql_dbg_tgt, vha, 0xe040,
228 "qla_target(%d): Received unknown ATIO atio "
229 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
230 break;
231 }
232
233 return;
234}
235
236void qlt_response_pkt_all_vps(struct scsi_qla_host *vha, response_t *pkt)
237{
238 switch (pkt->entry_type) {
239 case CTIO_TYPE7:
240 {
241 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
242 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
243 entry->vp_index);
244 if (unlikely(!host)) {
245 ql_dbg(ql_dbg_tgt, vha, 0xe041,
246 "qla_target(%d): Response pkt (CTIO_TYPE7) "
247 "received, with unknown vp_index %d\n",
248 vha->vp_idx, entry->vp_index);
249 break;
250 }
251 qlt_response_pkt(host, pkt);
252 break;
253 }
254
255 case IMMED_NOTIFY_TYPE:
256 {
257 struct scsi_qla_host *host = vha;
258 struct imm_ntfy_from_isp *entry =
259 (struct imm_ntfy_from_isp *)pkt;
260
261 host = qlt_find_host_by_vp_idx(vha, entry->u.isp24.vp_index);
262 if (unlikely(!host)) {
263 ql_dbg(ql_dbg_tgt, vha, 0xe042,
264 "qla_target(%d): Response pkt (IMMED_NOTIFY_TYPE) "
265 "received, with unknown vp_index %d\n",
266 vha->vp_idx, entry->u.isp24.vp_index);
267 break;
268 }
269 qlt_response_pkt(host, pkt);
270 break;
271 }
272
273 case NOTIFY_ACK_TYPE:
274 {
275 struct scsi_qla_host *host = vha;
276 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
277
278 if (0xFF != entry->u.isp24.vp_index) {
279 host = qlt_find_host_by_vp_idx(vha,
280 entry->u.isp24.vp_index);
281 if (unlikely(!host)) {
282 ql_dbg(ql_dbg_tgt, vha, 0xe043,
283 "qla_target(%d): Response "
284 "pkt (NOTIFY_ACK_TYPE) "
285 "received, with unknown "
286 "vp_index %d\n", vha->vp_idx,
287 entry->u.isp24.vp_index);
288 break;
289 }
290 }
291 qlt_response_pkt(host, pkt);
292 break;
293 }
294
295 case ABTS_RECV_24XX:
296 {
297 struct abts_recv_from_24xx *entry =
298 (struct abts_recv_from_24xx *)pkt;
299 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
300 entry->vp_index);
301 if (unlikely(!host)) {
302 ql_dbg(ql_dbg_tgt, vha, 0xe044,
303 "qla_target(%d): Response pkt "
304 "(ABTS_RECV_24XX) received, with unknown "
305 "vp_index %d\n", vha->vp_idx, entry->vp_index);
306 break;
307 }
308 qlt_response_pkt(host, pkt);
309 break;
310 }
311
312 case ABTS_RESP_24XX:
313 {
314 struct abts_resp_to_24xx *entry =
315 (struct abts_resp_to_24xx *)pkt;
316 struct scsi_qla_host *host = qlt_find_host_by_vp_idx(vha,
317 entry->vp_index);
318 if (unlikely(!host)) {
319 ql_dbg(ql_dbg_tgt, vha, 0xe045,
320 "qla_target(%d): Response pkt "
321 "(ABTS_RECV_24XX) received, with unknown "
322 "vp_index %d\n", vha->vp_idx, entry->vp_index);
323 break;
324 }
325 qlt_response_pkt(host, pkt);
326 break;
327 }
328
329 default:
330 qlt_response_pkt(vha, pkt);
331 break;
332 }
333
334}
335
336static void qlt_free_session_done(struct work_struct *work)
337{
338 struct qla_tgt_sess *sess = container_of(work, struct qla_tgt_sess,
339 free_work);
340 struct qla_tgt *tgt = sess->tgt;
341 struct scsi_qla_host *vha = sess->vha;
342 struct qla_hw_data *ha = vha->hw;
343
344 BUG_ON(!tgt);
345 /*
346 * Release the target session for FC Nexus from fabric module code.
347 */
348 if (sess->se_sess != NULL)
349 ha->tgt.tgt_ops->free_session(sess);
350
351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf001,
352 "Unregistration of sess %p finished\n", sess);
353
354 kfree(sess);
355 /*
356 * We need to protect against race, when tgt is freed before or
357 * inside wake_up()
358 */
359 tgt->sess_count--;
360 if (tgt->sess_count == 0)
361 wake_up_all(&tgt->waitQ);
362}
363
364/* ha->hardware_lock supposed to be held on entry */
365void qlt_unreg_sess(struct qla_tgt_sess *sess)
366{
367 struct scsi_qla_host *vha = sess->vha;
368
369 vha->hw->tgt.tgt_ops->clear_nacl_from_fcport_map(sess);
370
371 list_del(&sess->sess_list_entry);
372 if (sess->deleted)
373 list_del(&sess->del_list_entry);
374
375 INIT_WORK(&sess->free_work, qlt_free_session_done);
376 schedule_work(&sess->free_work);
377}
378EXPORT_SYMBOL(qlt_unreg_sess);
379
380/* ha->hardware_lock supposed to be held on entry */
381static int qlt_reset(struct scsi_qla_host *vha, void *iocb, int mcmd)
382{
383 struct qla_hw_data *ha = vha->hw;
384 struct qla_tgt_sess *sess = NULL;
385 uint32_t unpacked_lun, lun = 0;
386 uint16_t loop_id;
387 int res = 0;
388 struct imm_ntfy_from_isp *n = (struct imm_ntfy_from_isp *)iocb;
389 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
390
391 loop_id = le16_to_cpu(n->u.isp24.nport_handle);
392 if (loop_id == 0xFFFF) {
393#if 0 /* FIXME: Re-enable Global event handling.. */
394 /* Global event */
395 atomic_inc(&ha->tgt.qla_tgt->tgt_global_resets_count);
396 qlt_clear_tgt_db(ha->tgt.qla_tgt, 1);
397 if (!list_empty(&ha->tgt.qla_tgt->sess_list)) {
398 sess = list_entry(ha->tgt.qla_tgt->sess_list.next,
399 typeof(*sess), sess_list_entry);
400 switch (mcmd) {
401 case QLA_TGT_NEXUS_LOSS_SESS:
402 mcmd = QLA_TGT_NEXUS_LOSS;
403 break;
404 case QLA_TGT_ABORT_ALL_SESS:
405 mcmd = QLA_TGT_ABORT_ALL;
406 break;
407 case QLA_TGT_NEXUS_LOSS:
408 case QLA_TGT_ABORT_ALL:
409 break;
410 default:
411 ql_dbg(ql_dbg_tgt, vha, 0xe046,
412 "qla_target(%d): Not allowed "
413 "command %x in %s", vha->vp_idx,
414 mcmd, __func__);
415 sess = NULL;
416 break;
417 }
418 } else
419 sess = NULL;
420#endif
421 } else {
422 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
423 }
424
425 ql_dbg(ql_dbg_tgt, vha, 0xe000,
426 "Using sess for qla_tgt_reset: %p\n", sess);
427 if (!sess) {
428 res = -ESRCH;
429 return res;
430 }
431
432 ql_dbg(ql_dbg_tgt, vha, 0xe047,
433 "scsi(%ld): resetting (session %p from port "
434 "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x, "
435 "mcmd %x, loop_id %d)\n", vha->host_no, sess,
436 sess->port_name[0], sess->port_name[1],
437 sess->port_name[2], sess->port_name[3],
438 sess->port_name[4], sess->port_name[5],
439 sess->port_name[6], sess->port_name[7],
440 mcmd, loop_id);
441
442 lun = a->u.isp24.fcp_cmnd.lun;
443 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
444
445 return qlt_issue_task_mgmt(sess, unpacked_lun, mcmd,
446 iocb, QLA24XX_MGMT_SEND_NACK);
447}
448
449/* ha->hardware_lock supposed to be held on entry */
450static void qlt_schedule_sess_for_deletion(struct qla_tgt_sess *sess,
451 bool immediate)
452{
453 struct qla_tgt *tgt = sess->tgt;
454 uint32_t dev_loss_tmo = tgt->ha->port_down_retry_count + 5;
455
456 if (sess->deleted)
457 return;
458
459 ql_dbg(ql_dbg_tgt, sess->vha, 0xe001,
460 "Scheduling sess %p for deletion\n", sess);
461 list_add_tail(&sess->del_list_entry, &tgt->del_sess_list);
462 sess->deleted = 1;
463
464 if (immediate)
465 dev_loss_tmo = 0;
466
467 sess->expires = jiffies + dev_loss_tmo * HZ;
468
469 ql_dbg(ql_dbg_tgt, sess->vha, 0xe048,
470 "qla_target(%d): session for port %02x:%02x:%02x:"
471 "%02x:%02x:%02x:%02x:%02x (loop ID %d) scheduled for "
472 "deletion in %u secs (expires: %lu) immed: %d\n",
473 sess->vha->vp_idx,
474 sess->port_name[0], sess->port_name[1],
475 sess->port_name[2], sess->port_name[3],
476 sess->port_name[4], sess->port_name[5],
477 sess->port_name[6], sess->port_name[7],
478 sess->loop_id, dev_loss_tmo, sess->expires, immediate);
479
480 if (immediate)
481 schedule_delayed_work(&tgt->sess_del_work, 0);
482 else
483 schedule_delayed_work(&tgt->sess_del_work,
484 jiffies - sess->expires);
485}
486
487/* ha->hardware_lock supposed to be held on entry */
488static void qlt_clear_tgt_db(struct qla_tgt *tgt, bool local_only)
489{
490 struct qla_tgt_sess *sess;
491
492 list_for_each_entry(sess, &tgt->sess_list, sess_list_entry)
493 qlt_schedule_sess_for_deletion(sess, true);
494
495 /* At this point tgt could be already dead */
496}
497
498static int qla24xx_get_loop_id(struct scsi_qla_host *vha, const uint8_t *s_id,
499 uint16_t *loop_id)
500{
501 struct qla_hw_data *ha = vha->hw;
502 dma_addr_t gid_list_dma;
503 struct gid_list_info *gid_list;
504 char *id_iter;
505 int res, rc, i;
506 uint16_t entries;
507
508 gid_list = dma_alloc_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
509 &gid_list_dma, GFP_KERNEL);
510 if (!gid_list) {
511 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf044,
512 "qla_target(%d): DMA Alloc failed of %u\n",
513 vha->vp_idx, qla2x00_gid_list_size(ha));
514 return -ENOMEM;
515 }
516
517 /* Get list of logged in devices */
518 rc = qla2x00_get_id_list(vha, gid_list, gid_list_dma, &entries);
519 if (rc != QLA_SUCCESS) {
520 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf045,
521 "qla_target(%d): get_id_list() failed: %x\n",
522 vha->vp_idx, rc);
523 res = -1;
524 goto out_free_id_list;
525 }
526
527 id_iter = (char *)gid_list;
528 res = -1;
529 for (i = 0; i < entries; i++) {
530 struct gid_list_info *gid = (struct gid_list_info *)id_iter;
531 if ((gid->al_pa == s_id[2]) &&
532 (gid->area == s_id[1]) &&
533 (gid->domain == s_id[0])) {
534 *loop_id = le16_to_cpu(gid->loop_id);
535 res = 0;
536 break;
537 }
538 id_iter += ha->gid_list_info_size;
539 }
540
541out_free_id_list:
542 dma_free_coherent(&ha->pdev->dev, qla2x00_gid_list_size(ha),
543 gid_list, gid_list_dma);
544 return res;
545}
546
547static bool qlt_check_fcport_exist(struct scsi_qla_host *vha,
548 struct qla_tgt_sess *sess)
549{
550 struct qla_hw_data *ha = vha->hw;
551 struct qla_port_24xx_data *pmap24;
552 bool res, found = false;
553 int rc, i;
554 uint16_t loop_id = 0xFFFF; /* to eliminate compiler's warning */
555 uint16_t entries;
556 void *pmap;
557 int pmap_len;
558 fc_port_t *fcport;
559 int global_resets;
560
561retry:
562 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
563
564 rc = qla2x00_get_node_name_list(vha, &pmap, &pmap_len);
565 if (rc != QLA_SUCCESS) {
566 res = false;
567 goto out;
568 }
569
570 pmap24 = pmap;
571 entries = pmap_len/sizeof(*pmap24);
572
573 for (i = 0; i < entries; ++i) {
574 if (!memcmp(sess->port_name, pmap24[i].port_name, WWN_SIZE)) {
575 loop_id = le16_to_cpu(pmap24[i].loop_id);
576 found = true;
577 break;
578 }
579 }
580
581 kfree(pmap);
582
583 if (!found) {
584 res = false;
585 goto out;
586 }
587
588 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf046,
589 "qlt_check_fcport_exist(): loop_id %d", loop_id);
590
591 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
592 if (fcport == NULL) {
593 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf047,
594 "qla_target(%d): Allocation of tmp FC port failed",
595 vha->vp_idx);
596 res = false;
597 goto out;
598 }
599
600 fcport->loop_id = loop_id;
601
602 rc = qla2x00_get_port_database(vha, fcport, 0);
603 if (rc != QLA_SUCCESS) {
604 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf048,
605 "qla_target(%d): Failed to retrieve fcport "
606 "information -- get_port_database() returned %x "
607 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
608 res = false;
609 goto out_free_fcport;
610 }
611
612 if (global_resets !=
613 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
614 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf002,
615 "qla_target(%d): global reset during session discovery"
616 " (counter was %d, new %d), retrying",
617 vha->vp_idx, global_resets,
618 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
619 goto retry;
620 }
621
622 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf003,
623 "Updating sess %p s_id %x:%x:%x, loop_id %d) to d_id %x:%x:%x, "
624 "loop_id %d", sess, sess->s_id.b.domain, sess->s_id.b.al_pa,
625 sess->s_id.b.area, sess->loop_id, fcport->d_id.b.domain,
626 fcport->d_id.b.al_pa, fcport->d_id.b.area, fcport->loop_id);
627
628 sess->s_id = fcport->d_id;
629 sess->loop_id = fcport->loop_id;
630 sess->conf_compl_supported = !!(fcport->flags &
631 FCF_CONF_COMP_SUPPORTED);
632
633 res = true;
634
635out_free_fcport:
636 kfree(fcport);
637
638out:
639 return res;
640}
641
642/* ha->hardware_lock supposed to be held on entry */
643static void qlt_undelete_sess(struct qla_tgt_sess *sess)
644{
645 BUG_ON(!sess->deleted);
646
647 list_del(&sess->del_list_entry);
648 sess->deleted = 0;
649}
650
651static void qlt_del_sess_work_fn(struct delayed_work *work)
652{
653 struct qla_tgt *tgt = container_of(work, struct qla_tgt,
654 sess_del_work);
655 struct scsi_qla_host *vha = tgt->vha;
656 struct qla_hw_data *ha = vha->hw;
657 struct qla_tgt_sess *sess;
658 unsigned long flags;
659
660 spin_lock_irqsave(&ha->hardware_lock, flags);
661 while (!list_empty(&tgt->del_sess_list)) {
662 sess = list_entry(tgt->del_sess_list.next, typeof(*sess),
663 del_list_entry);
664 if (time_after_eq(jiffies, sess->expires)) {
665 bool cancel;
666
667 qlt_undelete_sess(sess);
668
669 spin_unlock_irqrestore(&ha->hardware_lock, flags);
670 cancel = qlt_check_fcport_exist(vha, sess);
671
672 if (cancel) {
673 if (sess->deleted) {
674 /*
675 * sess was again deleted while we were
676 * discovering it
677 */
678 spin_lock_irqsave(&ha->hardware_lock,
679 flags);
680 continue;
681 }
682
683 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf049,
684 "qla_target(%d): cancel deletion of "
685 "session for port %02x:%02x:%02x:%02x:%02x:"
686 "%02x:%02x:%02x (loop ID %d), because "
687 " it isn't deleted by firmware",
688 vha->vp_idx, sess->port_name[0],
689 sess->port_name[1], sess->port_name[2],
690 sess->port_name[3], sess->port_name[4],
691 sess->port_name[5], sess->port_name[6],
692 sess->port_name[7], sess->loop_id);
693 } else {
694 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf004,
695 "Timeout: sess %p about to be deleted\n",
696 sess);
697 ha->tgt.tgt_ops->shutdown_sess(sess);
698 ha->tgt.tgt_ops->put_sess(sess);
699 }
700
701 spin_lock_irqsave(&ha->hardware_lock, flags);
702 } else {
703 schedule_delayed_work(&tgt->sess_del_work,
704 jiffies - sess->expires);
705 break;
706 }
707 }
708 spin_unlock_irqrestore(&ha->hardware_lock, flags);
709}
710
711/*
712 * Adds an extra ref to allow to drop hw lock after adding sess to the list.
713 * Caller must put it.
714 */
715static struct qla_tgt_sess *qlt_create_sess(
716 struct scsi_qla_host *vha,
717 fc_port_t *fcport,
718 bool local)
719{
720 struct qla_hw_data *ha = vha->hw;
721 struct qla_tgt_sess *sess;
722 unsigned long flags;
723 unsigned char be_sid[3];
724
725 /* Check to avoid double sessions */
726 spin_lock_irqsave(&ha->hardware_lock, flags);
727 list_for_each_entry(sess, &ha->tgt.qla_tgt->sess_list,
728 sess_list_entry) {
729 if (!memcmp(sess->port_name, fcport->port_name, WWN_SIZE)) {
730 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf005,
731 "Double sess %p found (s_id %x:%x:%x, "
732 "loop_id %d), updating to d_id %x:%x:%x, "
733 "loop_id %d", sess, sess->s_id.b.domain,
734 sess->s_id.b.al_pa, sess->s_id.b.area,
735 sess->loop_id, fcport->d_id.b.domain,
736 fcport->d_id.b.al_pa, fcport->d_id.b.area,
737 fcport->loop_id);
738
739 if (sess->deleted)
740 qlt_undelete_sess(sess);
741
742 kref_get(&sess->se_sess->sess_kref);
743 sess->s_id = fcport->d_id;
744 sess->loop_id = fcport->loop_id;
745 sess->conf_compl_supported = !!(fcport->flags &
746 FCF_CONF_COMP_SUPPORTED);
747 if (sess->local && !local)
748 sess->local = 0;
749 spin_unlock_irqrestore(&ha->hardware_lock, flags);
750
751 return sess;
752 }
753 }
754 spin_unlock_irqrestore(&ha->hardware_lock, flags);
755
756 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
757 if (!sess) {
758 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04a,
759 "qla_target(%u): session allocation failed, "
760 "all commands from port %02x:%02x:%02x:%02x:"
761 "%02x:%02x:%02x:%02x will be refused", vha->vp_idx,
762 fcport->port_name[0], fcport->port_name[1],
763 fcport->port_name[2], fcport->port_name[3],
764 fcport->port_name[4], fcport->port_name[5],
765 fcport->port_name[6], fcport->port_name[7]);
766
767 return NULL;
768 }
769 sess->tgt = ha->tgt.qla_tgt;
770 sess->vha = vha;
771 sess->s_id = fcport->d_id;
772 sess->loop_id = fcport->loop_id;
773 sess->local = local;
774
775 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf006,
776 "Adding sess %p to tgt %p via ->check_initiator_node_acl()\n",
777 sess, ha->tgt.qla_tgt);
778
779 be_sid[0] = sess->s_id.b.domain;
780 be_sid[1] = sess->s_id.b.area;
781 be_sid[2] = sess->s_id.b.al_pa;
782 /*
783 * Determine if this fc_port->port_name is allowed to access
784 * target mode using explict NodeACLs+MappedLUNs, or using
785 * TPG demo mode. If this is successful a target mode FC nexus
786 * is created.
787 */
788 if (ha->tgt.tgt_ops->check_initiator_node_acl(vha,
789 &fcport->port_name[0], sess, &be_sid[0], fcport->loop_id) < 0) {
790 kfree(sess);
791 return NULL;
792 }
793 /*
794 * Take an extra reference to ->sess_kref here to handle qla_tgt_sess
795 * access across ->hardware_lock reaquire.
796 */
797 kref_get(&sess->se_sess->sess_kref);
798
799 sess->conf_compl_supported = !!(fcport->flags &
800 FCF_CONF_COMP_SUPPORTED);
801 BUILD_BUG_ON(sizeof(sess->port_name) != sizeof(fcport->port_name));
802 memcpy(sess->port_name, fcport->port_name, sizeof(sess->port_name));
803
804 spin_lock_irqsave(&ha->hardware_lock, flags);
805 list_add_tail(&sess->sess_list_entry, &ha->tgt.qla_tgt->sess_list);
806 ha->tgt.qla_tgt->sess_count++;
807 spin_unlock_irqrestore(&ha->hardware_lock, flags);
808
809 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04b,
810 "qla_target(%d): %ssession for wwn %02x:%02x:%02x:%02x:"
811 "%02x:%02x:%02x:%02x (loop_id %d, s_id %x:%x:%x, confirmed"
812 " completion %ssupported) added\n",
813 vha->vp_idx, local ? "local " : "", fcport->port_name[0],
814 fcport->port_name[1], fcport->port_name[2], fcport->port_name[3],
815 fcport->port_name[4], fcport->port_name[5], fcport->port_name[6],
816 fcport->port_name[7], fcport->loop_id, sess->s_id.b.domain,
817 sess->s_id.b.area, sess->s_id.b.al_pa, sess->conf_compl_supported ?
818 "" : "not ");
819
820 return sess;
821}
822
823/*
824 * Called from drivers/scsi/qla2xxx/qla_init.c:qla2x00_reg_remote_port()
825 */
826void qlt_fc_port_added(struct scsi_qla_host *vha, fc_port_t *fcport)
827{
828 struct qla_hw_data *ha = vha->hw;
829 struct qla_tgt *tgt = ha->tgt.qla_tgt;
830 struct qla_tgt_sess *sess;
831 unsigned long flags;
832
833 if (!vha->hw->tgt.tgt_ops)
834 return;
835
836 if (!tgt || (fcport->port_type != FCT_INITIATOR))
837 return;
838
839 spin_lock_irqsave(&ha->hardware_lock, flags);
840 if (tgt->tgt_stop) {
841 spin_unlock_irqrestore(&ha->hardware_lock, flags);
842 return;
843 }
844 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
845 if (!sess) {
846 spin_unlock_irqrestore(&ha->hardware_lock, flags);
847
848 mutex_lock(&ha->tgt.tgt_mutex);
849 sess = qlt_create_sess(vha, fcport, false);
850 mutex_unlock(&ha->tgt.tgt_mutex);
851
852 spin_lock_irqsave(&ha->hardware_lock, flags);
853 } else {
854 kref_get(&sess->se_sess->sess_kref);
855
856 if (sess->deleted) {
857 qlt_undelete_sess(sess);
858
859 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04c,
860 "qla_target(%u): %ssession for port %02x:"
861 "%02x:%02x:%02x:%02x:%02x:%02x:%02x (loop ID %d) "
862 "reappeared\n", vha->vp_idx, sess->local ? "local "
863 : "", sess->port_name[0], sess->port_name[1],
864 sess->port_name[2], sess->port_name[3],
865 sess->port_name[4], sess->port_name[5],
866 sess->port_name[6], sess->port_name[7],
867 sess->loop_id);
868
869 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf007,
870 "Reappeared sess %p\n", sess);
871 }
872 sess->s_id = fcport->d_id;
873 sess->loop_id = fcport->loop_id;
874 sess->conf_compl_supported = !!(fcport->flags &
875 FCF_CONF_COMP_SUPPORTED);
876 }
877
878 if (sess && sess->local) {
879 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04d,
880 "qla_target(%u): local session for "
881 "port %02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x "
882 "(loop ID %d) became global\n", vha->vp_idx,
883 fcport->port_name[0], fcport->port_name[1],
884 fcport->port_name[2], fcport->port_name[3],
885 fcport->port_name[4], fcport->port_name[5],
886 fcport->port_name[6], fcport->port_name[7],
887 sess->loop_id);
888 sess->local = 0;
889 }
890 spin_unlock_irqrestore(&ha->hardware_lock, flags);
891
892 ha->tgt.tgt_ops->put_sess(sess);
893}
894
895void qlt_fc_port_deleted(struct scsi_qla_host *vha, fc_port_t *fcport)
896{
897 struct qla_hw_data *ha = vha->hw;
898 struct qla_tgt *tgt = ha->tgt.qla_tgt;
899 struct qla_tgt_sess *sess;
900 unsigned long flags;
901
902 if (!vha->hw->tgt.tgt_ops)
903 return;
904
905 if (!tgt || (fcport->port_type != FCT_INITIATOR))
906 return;
907
908 spin_lock_irqsave(&ha->hardware_lock, flags);
909 if (tgt->tgt_stop) {
910 spin_unlock_irqrestore(&ha->hardware_lock, flags);
911 return;
912 }
913 sess = qlt_find_sess_by_port_name(tgt, fcport->port_name);
914 if (!sess) {
915 spin_unlock_irqrestore(&ha->hardware_lock, flags);
916 return;
917 }
918
919 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf008, "qla_tgt_fc_port_deleted %p", sess);
920
921 sess->local = 1;
922 qlt_schedule_sess_for_deletion(sess, false);
923 spin_unlock_irqrestore(&ha->hardware_lock, flags);
924}
925
926static inline int test_tgt_sess_count(struct qla_tgt *tgt)
927{
928 struct qla_hw_data *ha = tgt->ha;
929 unsigned long flags;
930 int res;
931 /*
932 * We need to protect against race, when tgt is freed before or
933 * inside wake_up()
934 */
935 spin_lock_irqsave(&ha->hardware_lock, flags);
936 ql_dbg(ql_dbg_tgt, tgt->vha, 0xe002,
937 "tgt %p, empty(sess_list)=%d sess_count=%d\n",
938 tgt, list_empty(&tgt->sess_list), tgt->sess_count);
939 res = (tgt->sess_count == 0);
940 spin_unlock_irqrestore(&ha->hardware_lock, flags);
941
942 return res;
943}
944
945/* Called by tcm_qla2xxx configfs code */
946void qlt_stop_phase1(struct qla_tgt *tgt)
947{
948 struct scsi_qla_host *vha = tgt->vha;
949 struct qla_hw_data *ha = tgt->ha;
950 unsigned long flags;
951
952 if (tgt->tgt_stop || tgt->tgt_stopped) {
953 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf04e,
954 "Already in tgt->tgt_stop or tgt_stopped state\n");
955 dump_stack();
956 return;
957 }
958
959 ql_dbg(ql_dbg_tgt, vha, 0xe003, "Stopping target for host %ld(%p)\n",
960 vha->host_no, vha);
961 /*
962 * Mutex needed to sync with qla_tgt_fc_port_[added,deleted].
963 * Lock is needed, because we still can get an incoming packet.
964 */
965 mutex_lock(&ha->tgt.tgt_mutex);
966 spin_lock_irqsave(&ha->hardware_lock, flags);
967 tgt->tgt_stop = 1;
968 qlt_clear_tgt_db(tgt, true);
969 spin_unlock_irqrestore(&ha->hardware_lock, flags);
970 mutex_unlock(&ha->tgt.tgt_mutex);
971
972 flush_delayed_work_sync(&tgt->sess_del_work);
973
974 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf009,
975 "Waiting for sess works (tgt %p)", tgt);
976 spin_lock_irqsave(&tgt->sess_work_lock, flags);
977 while (!list_empty(&tgt->sess_works_list)) {
978 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
979 flush_scheduled_work();
980 spin_lock_irqsave(&tgt->sess_work_lock, flags);
981 }
982 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
983
984 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00a,
985 "Waiting for tgt %p: list_empty(sess_list)=%d "
986 "sess_count=%d\n", tgt, list_empty(&tgt->sess_list),
987 tgt->sess_count);
988
989 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
990
991 /* Big hammer */
992 if (!ha->flags.host_shutting_down && qla_tgt_mode_enabled(vha))
993 qlt_disable_vha(vha);
994
995 /* Wait for sessions to clear out (just in case) */
996 wait_event(tgt->waitQ, test_tgt_sess_count(tgt));
997}
998EXPORT_SYMBOL(qlt_stop_phase1);
999
1000/* Called by tcm_qla2xxx configfs code */
1001void qlt_stop_phase2(struct qla_tgt *tgt)
1002{
1003 struct qla_hw_data *ha = tgt->ha;
1004 unsigned long flags;
1005
1006 if (tgt->tgt_stopped) {
1007 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf04f,
1008 "Already in tgt->tgt_stopped state\n");
1009 dump_stack();
1010 return;
1011 }
1012
1013 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00b,
1014 "Waiting for %d IRQ commands to complete (tgt %p)",
1015 tgt->irq_cmd_count, tgt);
1016
1017 mutex_lock(&ha->tgt.tgt_mutex);
1018 spin_lock_irqsave(&ha->hardware_lock, flags);
1019 while (tgt->irq_cmd_count != 0) {
1020 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1021 udelay(2);
1022 spin_lock_irqsave(&ha->hardware_lock, flags);
1023 }
1024 tgt->tgt_stop = 0;
1025 tgt->tgt_stopped = 1;
1026 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1027 mutex_unlock(&ha->tgt.tgt_mutex);
1028
1029 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00c, "Stop of tgt %p finished",
1030 tgt);
1031}
1032EXPORT_SYMBOL(qlt_stop_phase2);
1033
1034/* Called from qlt_remove_target() -> qla2x00_remove_one() */
1035void qlt_release(struct qla_tgt *tgt)
1036{
1037 struct qla_hw_data *ha = tgt->ha;
1038
1039 if ((ha->tgt.qla_tgt != NULL) && !tgt->tgt_stopped)
1040 qlt_stop_phase2(tgt);
1041
1042 ha->tgt.qla_tgt = NULL;
1043
1044 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00d,
1045 "Release of tgt %p finished\n", tgt);
1046
1047 kfree(tgt);
1048}
1049
1050/* ha->hardware_lock supposed to be held on entry */
1051static int qlt_sched_sess_work(struct qla_tgt *tgt, int type,
1052 const void *param, unsigned int param_size)
1053{
1054 struct qla_tgt_sess_work_param *prm;
1055 unsigned long flags;
1056
1057 prm = kzalloc(sizeof(*prm), GFP_ATOMIC);
1058 if (!prm) {
1059 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf050,
1060 "qla_target(%d): Unable to create session "
1061 "work, command will be refused", 0);
1062 return -ENOMEM;
1063 }
1064
1065 ql_dbg(ql_dbg_tgt_mgt, tgt->vha, 0xf00e,
1066 "Scheduling work (type %d, prm %p)"
1067 " to find session for param %p (size %d, tgt %p)\n",
1068 type, prm, param, param_size, tgt);
1069
1070 prm->type = type;
1071 memcpy(&prm->tm_iocb, param, param_size);
1072
1073 spin_lock_irqsave(&tgt->sess_work_lock, flags);
1074 list_add_tail(&prm->sess_works_list_entry, &tgt->sess_works_list);
1075 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
1076
1077 schedule_work(&tgt->sess_work);
1078
1079 return 0;
1080}
1081
1082/*
1083 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1084 */
1085static void qlt_send_notify_ack(struct scsi_qla_host *vha,
1086 struct imm_ntfy_from_isp *ntfy,
1087 uint32_t add_flags, uint16_t resp_code, int resp_code_valid,
1088 uint16_t srr_flags, uint16_t srr_reject_code, uint8_t srr_explan)
1089{
1090 struct qla_hw_data *ha = vha->hw;
1091 request_t *pkt;
1092 struct nack_to_isp *nack;
1093
1094 ql_dbg(ql_dbg_tgt, vha, 0xe004, "Sending NOTIFY_ACK (ha=%p)\n", ha);
1095
1096 /* Send marker if required */
1097 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1098 return;
1099
1100 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
1101 if (!pkt) {
1102 ql_dbg(ql_dbg_tgt, vha, 0xe049,
1103 "qla_target(%d): %s failed: unable to allocate "
1104 "request packet\n", vha->vp_idx, __func__);
1105 return;
1106 }
1107
1108 if (ha->tgt.qla_tgt != NULL)
1109 ha->tgt.qla_tgt->notify_ack_expected++;
1110
1111 pkt->entry_type = NOTIFY_ACK_TYPE;
1112 pkt->entry_count = 1;
1113
1114 nack = (struct nack_to_isp *)pkt;
1115 nack->ox_id = ntfy->ox_id;
1116
1117 nack->u.isp24.nport_handle = ntfy->u.isp24.nport_handle;
1118 if (le16_to_cpu(ntfy->u.isp24.status) == IMM_NTFY_ELS) {
1119 nack->u.isp24.flags = ntfy->u.isp24.flags &
1120 __constant_cpu_to_le32(NOTIFY24XX_FLAGS_PUREX_IOCB);
1121 }
1122 nack->u.isp24.srr_rx_id = ntfy->u.isp24.srr_rx_id;
1123 nack->u.isp24.status = ntfy->u.isp24.status;
1124 nack->u.isp24.status_subcode = ntfy->u.isp24.status_subcode;
1125 nack->u.isp24.exchange_address = ntfy->u.isp24.exchange_address;
1126 nack->u.isp24.srr_rel_offs = ntfy->u.isp24.srr_rel_offs;
1127 nack->u.isp24.srr_ui = ntfy->u.isp24.srr_ui;
1128 nack->u.isp24.srr_flags = cpu_to_le16(srr_flags);
1129 nack->u.isp24.srr_reject_code = srr_reject_code;
1130 nack->u.isp24.srr_reject_code_expl = srr_explan;
1131 nack->u.isp24.vp_index = ntfy->u.isp24.vp_index;
1132
1133 ql_dbg(ql_dbg_tgt, vha, 0xe005,
1134 "qla_target(%d): Sending 24xx Notify Ack %d\n",
1135 vha->vp_idx, nack->u.isp24.status);
1136
1137 qla2x00_start_iocbs(vha, vha->req);
1138}
1139
1140/*
1141 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1142 */
1143static void qlt_24xx_send_abts_resp(struct scsi_qla_host *vha,
1144 struct abts_recv_from_24xx *abts, uint32_t status,
1145 bool ids_reversed)
1146{
1147 struct qla_hw_data *ha = vha->hw;
1148 struct abts_resp_to_24xx *resp;
1149 uint32_t f_ctl;
1150 uint8_t *p;
1151
1152 ql_dbg(ql_dbg_tgt, vha, 0xe006,
1153 "Sending task mgmt ABTS response (ha=%p, atio=%p, status=%x\n",
1154 ha, abts, status);
1155
1156 /* Send marker if required */
1157 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1158 return;
1159
1160 resp = (struct abts_resp_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
1161 if (!resp) {
1162 ql_dbg(ql_dbg_tgt, vha, 0xe04a,
1163 "qla_target(%d): %s failed: unable to allocate "
1164 "request packet", vha->vp_idx, __func__);
1165 return;
1166 }
1167
1168 resp->entry_type = ABTS_RESP_24XX;
1169 resp->entry_count = 1;
1170 resp->nport_handle = abts->nport_handle;
1171 resp->vp_index = vha->vp_idx;
1172 resp->sof_type = abts->sof_type;
1173 resp->exchange_address = abts->exchange_address;
1174 resp->fcp_hdr_le = abts->fcp_hdr_le;
1175 f_ctl = __constant_cpu_to_le32(F_CTL_EXCH_CONTEXT_RESP |
1176 F_CTL_LAST_SEQ | F_CTL_END_SEQ |
1177 F_CTL_SEQ_INITIATIVE);
1178 p = (uint8_t *)&f_ctl;
1179 resp->fcp_hdr_le.f_ctl[0] = *p++;
1180 resp->fcp_hdr_le.f_ctl[1] = *p++;
1181 resp->fcp_hdr_le.f_ctl[2] = *p;
1182 if (ids_reversed) {
1183 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.d_id[0];
1184 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.d_id[1];
1185 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.d_id[2];
1186 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.s_id[0];
1187 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.s_id[1];
1188 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.s_id[2];
1189 } else {
1190 resp->fcp_hdr_le.d_id[0] = abts->fcp_hdr_le.s_id[0];
1191 resp->fcp_hdr_le.d_id[1] = abts->fcp_hdr_le.s_id[1];
1192 resp->fcp_hdr_le.d_id[2] = abts->fcp_hdr_le.s_id[2];
1193 resp->fcp_hdr_le.s_id[0] = abts->fcp_hdr_le.d_id[0];
1194 resp->fcp_hdr_le.s_id[1] = abts->fcp_hdr_le.d_id[1];
1195 resp->fcp_hdr_le.s_id[2] = abts->fcp_hdr_le.d_id[2];
1196 }
1197 resp->exchange_addr_to_abort = abts->exchange_addr_to_abort;
1198 if (status == FCP_TMF_CMPL) {
1199 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_ACC;
1200 resp->payload.ba_acct.seq_id_valid = SEQ_ID_INVALID;
1201 resp->payload.ba_acct.low_seq_cnt = 0x0000;
1202 resp->payload.ba_acct.high_seq_cnt = 0xFFFF;
1203 resp->payload.ba_acct.ox_id = abts->fcp_hdr_le.ox_id;
1204 resp->payload.ba_acct.rx_id = abts->fcp_hdr_le.rx_id;
1205 } else {
1206 resp->fcp_hdr_le.r_ctl = R_CTL_BASIC_LINK_SERV | R_CTL_B_RJT;
1207 resp->payload.ba_rjt.reason_code =
1208 BA_RJT_REASON_CODE_UNABLE_TO_PERFORM;
1209 /* Other bytes are zero */
1210 }
1211
1212 ha->tgt.qla_tgt->abts_resp_expected++;
1213
1214 qla2x00_start_iocbs(vha, vha->req);
1215}
1216
1217/*
1218 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1219 */
1220static void qlt_24xx_retry_term_exchange(struct scsi_qla_host *vha,
1221 struct abts_resp_from_24xx_fw *entry)
1222{
1223 struct ctio7_to_24xx *ctio;
1224
1225 ql_dbg(ql_dbg_tgt, vha, 0xe007,
1226 "Sending retry TERM EXCH CTIO7 (ha=%p)\n", vha->hw);
1227 /* Send marker if required */
1228 if (qlt_issue_marker(vha, 1) != QLA_SUCCESS)
1229 return;
1230
1231 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(vha, NULL);
1232 if (ctio == NULL) {
1233 ql_dbg(ql_dbg_tgt, vha, 0xe04b,
1234 "qla_target(%d): %s failed: unable to allocate "
1235 "request packet\n", vha->vp_idx, __func__);
1236 return;
1237 }
1238
1239 /*
1240 * We've got on entrance firmware's response on by us generated
1241 * ABTS response. So, in it ID fields are reversed.
1242 */
1243
1244 ctio->entry_type = CTIO_TYPE7;
1245 ctio->entry_count = 1;
1246 ctio->nport_handle = entry->nport_handle;
1247 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1248 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1249 ctio->vp_index = vha->vp_idx;
1250 ctio->initiator_id[0] = entry->fcp_hdr_le.d_id[0];
1251 ctio->initiator_id[1] = entry->fcp_hdr_le.d_id[1];
1252 ctio->initiator_id[2] = entry->fcp_hdr_le.d_id[2];
1253 ctio->exchange_addr = entry->exchange_addr_to_abort;
1254 ctio->u.status1.flags =
1255 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1256 CTIO7_FLAGS_TERMINATE);
1257 ctio->u.status1.ox_id = entry->fcp_hdr_le.ox_id;
1258
1259 qla2x00_start_iocbs(vha, vha->req);
1260
1261 qlt_24xx_send_abts_resp(vha, (struct abts_recv_from_24xx *)entry,
1262 FCP_TMF_CMPL, true);
1263}
1264
1265/* ha->hardware_lock supposed to be held on entry */
1266static int __qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1267 struct abts_recv_from_24xx *abts, struct qla_tgt_sess *sess)
1268{
1269 struct qla_hw_data *ha = vha->hw;
1270 struct qla_tgt_mgmt_cmd *mcmd;
1271 int rc;
1272
1273 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf00f,
1274 "qla_target(%d): task abort (tag=%d)\n",
1275 vha->vp_idx, abts->exchange_addr_to_abort);
1276
1277 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
1278 if (mcmd == NULL) {
1279 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf051,
1280 "qla_target(%d): %s: Allocation of ABORT cmd failed",
1281 vha->vp_idx, __func__);
1282 return -ENOMEM;
1283 }
1284 memset(mcmd, 0, sizeof(*mcmd));
1285
1286 mcmd->sess = sess;
1287 memcpy(&mcmd->orig_iocb.abts, abts, sizeof(mcmd->orig_iocb.abts));
1288
1289 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, 0, TMR_ABORT_TASK,
1290 abts->exchange_addr_to_abort);
1291 if (rc != 0) {
1292 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf052,
1293 "qla_target(%d): tgt_ops->handle_tmr()"
1294 " failed: %d", vha->vp_idx, rc);
1295 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1296 return -EFAULT;
1297 }
1298
1299 return 0;
1300}
1301
1302/*
1303 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1304 */
1305static void qlt_24xx_handle_abts(struct scsi_qla_host *vha,
1306 struct abts_recv_from_24xx *abts)
1307{
1308 struct qla_hw_data *ha = vha->hw;
1309 struct qla_tgt_sess *sess;
1310 uint32_t tag = abts->exchange_addr_to_abort;
1311 uint8_t s_id[3];
1312 int rc;
1313
1314 if (le32_to_cpu(abts->fcp_hdr_le.parameter) & ABTS_PARAM_ABORT_SEQ) {
1315 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf053,
1316 "qla_target(%d): ABTS: Abort Sequence not "
1317 "supported\n", vha->vp_idx);
1318 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1319 return;
1320 }
1321
1322 if (tag == ATIO_EXCHANGE_ADDRESS_UNKNOWN) {
1323 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf010,
1324 "qla_target(%d): ABTS: Unknown Exchange "
1325 "Address received\n", vha->vp_idx);
1326 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1327 return;
1328 }
1329
1330 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf011,
1331 "qla_target(%d): task abort (s_id=%x:%x:%x, "
1332 "tag=%d, param=%x)\n", vha->vp_idx, abts->fcp_hdr_le.s_id[2],
1333 abts->fcp_hdr_le.s_id[1], abts->fcp_hdr_le.s_id[0], tag,
1334 le32_to_cpu(abts->fcp_hdr_le.parameter));
1335
1336 s_id[0] = abts->fcp_hdr_le.s_id[2];
1337 s_id[1] = abts->fcp_hdr_le.s_id[1];
1338 s_id[2] = abts->fcp_hdr_le.s_id[0];
1339
1340 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
1341 if (!sess) {
1342 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf012,
1343 "qla_target(%d): task abort for non-existant session\n",
1344 vha->vp_idx);
1345 rc = qlt_sched_sess_work(ha->tgt.qla_tgt,
1346 QLA_TGT_SESS_WORK_ABORT, abts, sizeof(*abts));
1347 if (rc != 0) {
1348 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED,
1349 false);
1350 }
1351 return;
1352 }
1353
1354 rc = __qlt_24xx_handle_abts(vha, abts, sess);
1355 if (rc != 0) {
1356 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf054,
1357 "qla_target(%d): __qlt_24xx_handle_abts() failed: %d\n",
1358 vha->vp_idx, rc);
1359 qlt_24xx_send_abts_resp(vha, abts, FCP_TMF_REJECTED, false);
1360 return;
1361 }
1362}
1363
1364/*
1365 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1366 */
1367static void qlt_24xx_send_task_mgmt_ctio(struct scsi_qla_host *ha,
1368 struct qla_tgt_mgmt_cmd *mcmd, uint32_t resp_code)
1369{
1370 struct atio_from_isp *atio = &mcmd->orig_iocb.atio;
1371 struct ctio7_to_24xx *ctio;
1372
1373 ql_dbg(ql_dbg_tgt, ha, 0xe008,
1374 "Sending task mgmt CTIO7 (ha=%p, atio=%p, resp_code=%x\n",
1375 ha, atio, resp_code);
1376
1377 /* Send marker if required */
1378 if (qlt_issue_marker(ha, 1) != QLA_SUCCESS)
1379 return;
1380
1381 ctio = (struct ctio7_to_24xx *)qla2x00_alloc_iocbs(ha, NULL);
1382 if (ctio == NULL) {
1383 ql_dbg(ql_dbg_tgt, ha, 0xe04c,
1384 "qla_target(%d): %s failed: unable to allocate "
1385 "request packet\n", ha->vp_idx, __func__);
1386 return;
1387 }
1388
1389 ctio->entry_type = CTIO_TYPE7;
1390 ctio->entry_count = 1;
1391 ctio->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
1392 ctio->nport_handle = mcmd->sess->loop_id;
1393 ctio->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1394 ctio->vp_index = ha->vp_idx;
1395 ctio->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1396 ctio->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1397 ctio->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1398 ctio->exchange_addr = atio->u.isp24.exchange_addr;
1399 ctio->u.status1.flags = (atio->u.isp24.attr << 9) |
1400 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
1401 CTIO7_FLAGS_SEND_STATUS);
1402 ctio->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
1403 ctio->u.status1.scsi_status =
1404 __constant_cpu_to_le16(SS_RESPONSE_INFO_LEN_VALID);
1405 ctio->u.status1.response_len = __constant_cpu_to_le16(8);
1406 ((uint32_t *)ctio->u.status1.sense_data)[0] = cpu_to_be32(resp_code);
1407
1408 qla2x00_start_iocbs(ha, ha->req);
1409}
1410
1411void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
1412{
1413 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
1414}
1415EXPORT_SYMBOL(qlt_free_mcmd);
1416
1417/* callback from target fabric module code */
1418void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *mcmd)
1419{
1420 struct scsi_qla_host *vha = mcmd->sess->vha;
1421 struct qla_hw_data *ha = vha->hw;
1422 unsigned long flags;
1423
1424 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf013,
1425 "TM response mcmd (%p) status %#x state %#x",
1426 mcmd, mcmd->fc_tm_rsp, mcmd->flags);
1427
1428 spin_lock_irqsave(&ha->hardware_lock, flags);
1429 if (mcmd->flags == QLA24XX_MGMT_SEND_NACK)
1430 qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
1431 0, 0, 0, 0, 0, 0);
1432 else {
1433 if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
1434 qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
1435 mcmd->fc_tm_rsp, false);
1436 else
1437 qlt_24xx_send_task_mgmt_ctio(vha, mcmd,
1438 mcmd->fc_tm_rsp);
1439 }
1440 /*
1441 * Make the callback for ->free_mcmd() to queue_work() and invoke
1442 * target_put_sess_cmd() to drop cmd_kref to 1. The final
1443 * target_put_sess_cmd() call will be made from TFO->check_stop_free()
1444 * -> tcm_qla2xxx_check_stop_free() to release the TMR associated se_cmd
1445 * descriptor after TFO->queue_tm_rsp() -> tcm_qla2xxx_queue_tm_rsp() ->
1446 * qlt_xmit_tm_rsp() returns here..
1447 */
1448 ha->tgt.tgt_ops->free_mcmd(mcmd);
1449 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1450}
1451EXPORT_SYMBOL(qlt_xmit_tm_rsp);
1452
1453/* No locks */
1454static int qlt_pci_map_calc_cnt(struct qla_tgt_prm *prm)
1455{
1456 struct qla_tgt_cmd *cmd = prm->cmd;
1457
1458 BUG_ON(cmd->sg_cnt == 0);
1459
1460 prm->sg = (struct scatterlist *)cmd->sg;
1461 prm->seg_cnt = pci_map_sg(prm->tgt->ha->pdev, cmd->sg,
1462 cmd->sg_cnt, cmd->dma_data_direction);
1463 if (unlikely(prm->seg_cnt == 0))
1464 goto out_err;
1465
1466 prm->cmd->sg_mapped = 1;
1467
1468 /*
1469 * If greater than four sg entries then we need to allocate
1470 * the continuation entries
1471 */
1472 if (prm->seg_cnt > prm->tgt->datasegs_per_cmd)
1473 prm->req_cnt += DIV_ROUND_UP(prm->seg_cnt -
1474 prm->tgt->datasegs_per_cmd, prm->tgt->datasegs_per_cont);
1475
1476 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe009, "seg_cnt=%d, req_cnt=%d\n",
1477 prm->seg_cnt, prm->req_cnt);
1478 return 0;
1479
1480out_err:
1481 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe04d,
1482 "qla_target(%d): PCI mapping failed: sg_cnt=%d",
1483 0, prm->cmd->sg_cnt);
1484 return -1;
1485}
1486
1487static inline void qlt_unmap_sg(struct scsi_qla_host *vha,
1488 struct qla_tgt_cmd *cmd)
1489{
1490 struct qla_hw_data *ha = vha->hw;
1491
1492 BUG_ON(!cmd->sg_mapped);
1493 pci_unmap_sg(ha->pdev, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
1494 cmd->sg_mapped = 0;
1495}
1496
1497static int qlt_check_reserve_free_req(struct scsi_qla_host *vha,
1498 uint32_t req_cnt)
1499{
1500 struct qla_hw_data *ha = vha->hw;
1501 device_reg_t __iomem *reg = ha->iobase;
1502 uint32_t cnt;
1503
1504 if (vha->req->cnt < (req_cnt + 2)) {
1505 cnt = (uint16_t)RD_REG_DWORD(&reg->isp24.req_q_out);
1506
1507 ql_dbg(ql_dbg_tgt, vha, 0xe00a,
1508 "Request ring circled: cnt=%d, vha->->ring_index=%d, "
1509 "vha->req->cnt=%d, req_cnt=%d\n", cnt,
1510 vha->req->ring_index, vha->req->cnt, req_cnt);
1511 if (vha->req->ring_index < cnt)
1512 vha->req->cnt = cnt - vha->req->ring_index;
1513 else
1514 vha->req->cnt = vha->req->length -
1515 (vha->req->ring_index - cnt);
1516 }
1517
1518 if (unlikely(vha->req->cnt < (req_cnt + 2))) {
1519 ql_dbg(ql_dbg_tgt, vha, 0xe00b,
1520 "qla_target(%d): There is no room in the "
1521 "request ring: vha->req->ring_index=%d, vha->req->cnt=%d, "
1522 "req_cnt=%d\n", vha->vp_idx, vha->req->ring_index,
1523 vha->req->cnt, req_cnt);
1524 return -EAGAIN;
1525 }
1526 vha->req->cnt -= req_cnt;
1527
1528 return 0;
1529}
1530
1531/*
1532 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
1533 */
1534static inline void *qlt_get_req_pkt(struct scsi_qla_host *vha)
1535{
1536 /* Adjust ring index. */
1537 vha->req->ring_index++;
1538 if (vha->req->ring_index == vha->req->length) {
1539 vha->req->ring_index = 0;
1540 vha->req->ring_ptr = vha->req->ring;
1541 } else {
1542 vha->req->ring_ptr++;
1543 }
1544 return (cont_entry_t *)vha->req->ring_ptr;
1545}
1546
1547/* ha->hardware_lock supposed to be held on entry */
1548static inline uint32_t qlt_make_handle(struct scsi_qla_host *vha)
1549{
1550 struct qla_hw_data *ha = vha->hw;
1551 uint32_t h;
1552
1553 h = ha->tgt.current_handle;
1554 /* always increment cmd handle */
1555 do {
1556 ++h;
1557 if (h > MAX_OUTSTANDING_COMMANDS)
1558 h = 1; /* 0 is QLA_TGT_NULL_HANDLE */
1559 if (h == ha->tgt.current_handle) {
1560 ql_dbg(ql_dbg_tgt, vha, 0xe04e,
1561 "qla_target(%d): Ran out of "
1562 "empty cmd slots in ha %p\n", vha->vp_idx, ha);
1563 h = QLA_TGT_NULL_HANDLE;
1564 break;
1565 }
1566 } while ((h == QLA_TGT_NULL_HANDLE) ||
1567 (h == QLA_TGT_SKIP_HANDLE) ||
1568 (ha->tgt.cmds[h-1] != NULL));
1569
1570 if (h != QLA_TGT_NULL_HANDLE)
1571 ha->tgt.current_handle = h;
1572
1573 return h;
1574}
1575
1576/* ha->hardware_lock supposed to be held on entry */
1577static int qlt_24xx_build_ctio_pkt(struct qla_tgt_prm *prm,
1578 struct scsi_qla_host *vha)
1579{
1580 uint32_t h;
1581 struct ctio7_to_24xx *pkt;
1582 struct qla_hw_data *ha = vha->hw;
1583 struct atio_from_isp *atio = &prm->cmd->atio;
1584
1585 pkt = (struct ctio7_to_24xx *)vha->req->ring_ptr;
1586 prm->pkt = pkt;
1587 memset(pkt, 0, sizeof(*pkt));
1588
1589 pkt->entry_type = CTIO_TYPE7;
1590 pkt->entry_count = (uint8_t)prm->req_cnt;
1591 pkt->vp_index = vha->vp_idx;
1592
1593 h = qlt_make_handle(vha);
1594 if (unlikely(h == QLA_TGT_NULL_HANDLE)) {
1595 /*
1596 * CTIO type 7 from the firmware doesn't provide a way to
1597 * know the initiator's LOOP ID, hence we can't find
1598 * the session and, so, the command.
1599 */
1600 return -EAGAIN;
1601 } else
1602 ha->tgt.cmds[h-1] = prm->cmd;
1603
1604 pkt->handle = h | CTIO_COMPLETION_HANDLE_MARK;
1605 pkt->nport_handle = prm->cmd->loop_id;
1606 pkt->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
1607 pkt->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
1608 pkt->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
1609 pkt->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
1610 pkt->exchange_addr = atio->u.isp24.exchange_addr;
1611 pkt->u.status0.flags |= (atio->u.isp24.attr << 9);
1612 pkt->u.status0.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
1613 pkt->u.status0.relative_offset = cpu_to_le32(prm->cmd->offset);
1614
1615 ql_dbg(ql_dbg_tgt, vha, 0xe00c,
1616 "qla_target(%d): handle(cmd) -> %08x, timeout %d, ox_id %#x\n",
1617 vha->vp_idx, pkt->handle, QLA_TGT_TIMEOUT,
1618 le16_to_cpu(pkt->u.status0.ox_id));
1619 return 0;
1620}
1621
1622/*
1623 * ha->hardware_lock supposed to be held on entry. We have already made sure
1624 * that there is sufficient amount of request entries to not drop it.
1625 */
1626static void qlt_load_cont_data_segments(struct qla_tgt_prm *prm,
1627 struct scsi_qla_host *vha)
1628{
1629 int cnt;
1630 uint32_t *dword_ptr;
1631 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1632
1633 /* Build continuation packets */
1634 while (prm->seg_cnt > 0) {
1635 cont_a64_entry_t *cont_pkt64 =
1636 (cont_a64_entry_t *)qlt_get_req_pkt(vha);
1637
1638 /*
1639 * Make sure that from cont_pkt64 none of
1640 * 64-bit specific fields used for 32-bit
1641 * addressing. Cast to (cont_entry_t *) for
1642 * that.
1643 */
1644
1645 memset(cont_pkt64, 0, sizeof(*cont_pkt64));
1646
1647 cont_pkt64->entry_count = 1;
1648 cont_pkt64->sys_define = 0;
1649
1650 if (enable_64bit_addressing) {
1651 cont_pkt64->entry_type = CONTINUE_A64_TYPE;
1652 dword_ptr =
1653 (uint32_t *)&cont_pkt64->dseg_0_address;
1654 } else {
1655 cont_pkt64->entry_type = CONTINUE_TYPE;
1656 dword_ptr =
1657 (uint32_t *)&((cont_entry_t *)
1658 cont_pkt64)->dseg_0_address;
1659 }
1660
1661 /* Load continuation entry data segments */
1662 for (cnt = 0;
1663 cnt < prm->tgt->datasegs_per_cont && prm->seg_cnt;
1664 cnt++, prm->seg_cnt--) {
1665 *dword_ptr++ =
1666 cpu_to_le32(pci_dma_lo32
1667 (sg_dma_address(prm->sg)));
1668 if (enable_64bit_addressing) {
1669 *dword_ptr++ =
1670 cpu_to_le32(pci_dma_hi32
1671 (sg_dma_address
1672 (prm->sg)));
1673 }
1674 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1675
1676 ql_dbg(ql_dbg_tgt, vha, 0xe00d,
1677 "S/G Segment Cont. phys_addr=%llx:%llx, len=%d\n",
1678 (long long unsigned int)
1679 pci_dma_hi32(sg_dma_address(prm->sg)),
1680 (long long unsigned int)
1681 pci_dma_lo32(sg_dma_address(prm->sg)),
1682 (int)sg_dma_len(prm->sg));
1683
1684 prm->sg = sg_next(prm->sg);
1685 }
1686 }
1687}
1688
1689/*
1690 * ha->hardware_lock supposed to be held on entry. We have already made sure
1691 * that there is sufficient amount of request entries to not drop it.
1692 */
1693static void qlt_load_data_segments(struct qla_tgt_prm *prm,
1694 struct scsi_qla_host *vha)
1695{
1696 int cnt;
1697 uint32_t *dword_ptr;
1698 int enable_64bit_addressing = prm->tgt->tgt_enable_64bit_addr;
1699 struct ctio7_to_24xx *pkt24 = (struct ctio7_to_24xx *)prm->pkt;
1700
1701 ql_dbg(ql_dbg_tgt, vha, 0xe00e,
1702 "iocb->scsi_status=%x, iocb->flags=%x\n",
1703 le16_to_cpu(pkt24->u.status0.scsi_status),
1704 le16_to_cpu(pkt24->u.status0.flags));
1705
1706 pkt24->u.status0.transfer_length = cpu_to_le32(prm->cmd->bufflen);
1707
1708 /* Setup packet address segment pointer */
1709 dword_ptr = pkt24->u.status0.dseg_0_address;
1710
1711 /* Set total data segment count */
1712 if (prm->seg_cnt)
1713 pkt24->dseg_count = cpu_to_le16(prm->seg_cnt);
1714
1715 if (prm->seg_cnt == 0) {
1716 /* No data transfer */
1717 *dword_ptr++ = 0;
1718 *dword_ptr = 0;
1719 return;
1720 }
1721
1722 /* If scatter gather */
1723 ql_dbg(ql_dbg_tgt, vha, 0xe00f, "%s", "Building S/G data segments...");
1724
1725 /* Load command entry data segments */
1726 for (cnt = 0;
1727 (cnt < prm->tgt->datasegs_per_cmd) && prm->seg_cnt;
1728 cnt++, prm->seg_cnt--) {
1729 *dword_ptr++ =
1730 cpu_to_le32(pci_dma_lo32(sg_dma_address(prm->sg)));
1731 if (enable_64bit_addressing) {
1732 *dword_ptr++ =
1733 cpu_to_le32(pci_dma_hi32(
1734 sg_dma_address(prm->sg)));
1735 }
1736 *dword_ptr++ = cpu_to_le32(sg_dma_len(prm->sg));
1737
1738 ql_dbg(ql_dbg_tgt, vha, 0xe010,
1739 "S/G Segment phys_addr=%llx:%llx, len=%d\n",
1740 (long long unsigned int)pci_dma_hi32(sg_dma_address(
1741 prm->sg)),
1742 (long long unsigned int)pci_dma_lo32(sg_dma_address(
1743 prm->sg)),
1744 (int)sg_dma_len(prm->sg));
1745
1746 prm->sg = sg_next(prm->sg);
1747 }
1748
1749 qlt_load_cont_data_segments(prm, vha);
1750}
1751
1752static inline int qlt_has_data(struct qla_tgt_cmd *cmd)
1753{
1754 return cmd->bufflen > 0;
1755}
1756
1757/*
1758 * Called without ha->hardware_lock held
1759 */
1760static int qlt_pre_xmit_response(struct qla_tgt_cmd *cmd,
1761 struct qla_tgt_prm *prm, int xmit_type, uint8_t scsi_status,
1762 uint32_t *full_req_cnt)
1763{
1764 struct qla_tgt *tgt = cmd->tgt;
1765 struct scsi_qla_host *vha = tgt->vha;
1766 struct qla_hw_data *ha = vha->hw;
1767 struct se_cmd *se_cmd = &cmd->se_cmd;
1768
1769 if (unlikely(cmd->aborted)) {
1770 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
1771 "qla_target(%d): terminating exchange "
1772 "for aborted cmd=%p (se_cmd=%p, tag=%d)", vha->vp_idx, cmd,
1773 se_cmd, cmd->tag);
1774
1775 cmd->state = QLA_TGT_STATE_ABORTED;
1776
1777 qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
1778
1779 /* !! At this point cmd could be already freed !! */
1780 return QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED;
1781 }
1782
1783 ql_dbg(ql_dbg_tgt, vha, 0xe011, "qla_target(%d): tag=%u\n",
1784 vha->vp_idx, cmd->tag);
1785
1786 prm->cmd = cmd;
1787 prm->tgt = tgt;
1788 prm->rq_result = scsi_status;
1789 prm->sense_buffer = &cmd->sense_buffer[0];
1790 prm->sense_buffer_len = TRANSPORT_SENSE_BUFFER;
1791 prm->sg = NULL;
1792 prm->seg_cnt = -1;
1793 prm->req_cnt = 1;
1794 prm->add_status_pkt = 0;
1795
1796 ql_dbg(ql_dbg_tgt, vha, 0xe012, "rq_result=%x, xmit_type=%x\n",
1797 prm->rq_result, xmit_type);
1798
1799 /* Send marker if required */
1800 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
1801 return -EFAULT;
1802
1803 ql_dbg(ql_dbg_tgt, vha, 0xe013, "CTIO start: vha(%d)\n", vha->vp_idx);
1804
1805 if ((xmit_type & QLA_TGT_XMIT_DATA) && qlt_has_data(cmd)) {
1806 if (qlt_pci_map_calc_cnt(prm) != 0)
1807 return -EAGAIN;
1808 }
1809
1810 *full_req_cnt = prm->req_cnt;
1811
1812 if (se_cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
1813 prm->residual = se_cmd->residual_count;
1814 ql_dbg(ql_dbg_tgt, vha, 0xe014,
1815 "Residual underflow: %d (tag %d, "
1816 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1817 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1818 cmd->bufflen, prm->rq_result);
1819 prm->rq_result |= SS_RESIDUAL_UNDER;
1820 } else if (se_cmd->se_cmd_flags & SCF_OVERFLOW_BIT) {
1821 prm->residual = se_cmd->residual_count;
1822 ql_dbg(ql_dbg_tgt, vha, 0xe015,
1823 "Residual overflow: %d (tag %d, "
1824 "op %x, bufflen %d, rq_result %x)\n", prm->residual,
1825 cmd->tag, se_cmd->t_task_cdb ? se_cmd->t_task_cdb[0] : 0,
1826 cmd->bufflen, prm->rq_result);
1827 prm->rq_result |= SS_RESIDUAL_OVER;
1828 }
1829
1830 if (xmit_type & QLA_TGT_XMIT_STATUS) {
1831 /*
1832 * If QLA_TGT_XMIT_DATA is not set, add_status_pkt will be
1833 * ignored in *xmit_response() below
1834 */
1835 if (qlt_has_data(cmd)) {
1836 if (QLA_TGT_SENSE_VALID(prm->sense_buffer) ||
1837 (IS_FWI2_CAPABLE(ha) &&
1838 (prm->rq_result != 0))) {
1839 prm->add_status_pkt = 1;
1840 (*full_req_cnt)++;
1841 }
1842 }
1843 }
1844
1845 ql_dbg(ql_dbg_tgt, vha, 0xe016,
1846 "req_cnt=%d, full_req_cnt=%d, add_status_pkt=%d\n",
1847 prm->req_cnt, *full_req_cnt, prm->add_status_pkt);
1848
1849 return 0;
1850}
1851
1852static inline int qlt_need_explicit_conf(struct qla_hw_data *ha,
1853 struct qla_tgt_cmd *cmd, int sending_sense)
1854{
1855 if (ha->tgt.enable_class_2)
1856 return 0;
1857
1858 if (sending_sense)
1859 return cmd->conf_compl_supported;
1860 else
1861 return ha->tgt.enable_explicit_conf &&
1862 cmd->conf_compl_supported;
1863}
1864
1865#ifdef CONFIG_QLA_TGT_DEBUG_SRR
1866/*
1867 * Original taken from the XFS code
1868 */
1869static unsigned long qlt_srr_random(void)
1870{
1871 static int Inited;
1872 static unsigned long RandomValue;
1873 static DEFINE_SPINLOCK(lock);
1874 /* cycles pseudo-randomly through all values between 1 and 2^31 - 2 */
1875 register long rv;
1876 register long lo;
1877 register long hi;
1878 unsigned long flags;
1879
1880 spin_lock_irqsave(&lock, flags);
1881 if (!Inited) {
1882 RandomValue = jiffies;
1883 Inited = 1;
1884 }
1885 rv = RandomValue;
1886 hi = rv / 127773;
1887 lo = rv % 127773;
1888 rv = 16807 * lo - 2836 * hi;
1889 if (rv <= 0)
1890 rv += 2147483647;
1891 RandomValue = rv;
1892 spin_unlock_irqrestore(&lock, flags);
1893 return rv;
1894}
1895
1896static void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1897{
1898#if 0 /* This is not a real status packets lost, so it won't lead to SRR */
1899 if ((*xmit_type & QLA_TGT_XMIT_STATUS) && (qlt_srr_random() % 200)
1900 == 50) {
1901 *xmit_type &= ~QLA_TGT_XMIT_STATUS;
1902 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf015,
1903 "Dropping cmd %p (tag %d) status", cmd, cmd->tag);
1904 }
1905#endif
1906 /*
1907 * It's currently not possible to simulate SRRs for FCP_WRITE without
1908 * a physical link layer failure, so don't even try here..
1909 */
1910 if (cmd->dma_data_direction != DMA_FROM_DEVICE)
1911 return;
1912
1913 if (qlt_has_data(cmd) && (cmd->sg_cnt > 1) &&
1914 ((qlt_srr_random() % 100) == 20)) {
1915 int i, leave = 0;
1916 unsigned int tot_len = 0;
1917
1918 while (leave == 0)
1919 leave = qlt_srr_random() % cmd->sg_cnt;
1920
1921 for (i = 0; i < leave; i++)
1922 tot_len += cmd->sg[i].length;
1923
1924 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf016,
1925 "Cutting cmd %p (tag %d) buffer"
1926 " tail to len %d, sg_cnt %d (cmd->bufflen %d,"
1927 " cmd->sg_cnt %d)", cmd, cmd->tag, tot_len, leave,
1928 cmd->bufflen, cmd->sg_cnt);
1929
1930 cmd->bufflen = tot_len;
1931 cmd->sg_cnt = leave;
1932 }
1933
1934 if (qlt_has_data(cmd) && ((qlt_srr_random() % 100) == 70)) {
1935 unsigned int offset = qlt_srr_random() % cmd->bufflen;
1936
1937 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf017,
1938 "Cutting cmd %p (tag %d) buffer head "
1939 "to offset %d (cmd->bufflen %d)", cmd, cmd->tag, offset,
1940 cmd->bufflen);
1941 if (offset == 0)
1942 *xmit_type &= ~QLA_TGT_XMIT_DATA;
1943 else if (qlt_set_data_offset(cmd, offset)) {
1944 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf018,
1945 "qlt_set_data_offset() failed (tag %d)", cmd->tag);
1946 }
1947 }
1948}
1949#else
1950static inline void qlt_check_srr_debug(struct qla_tgt_cmd *cmd, int *xmit_type)
1951{}
1952#endif
1953
1954static void qlt_24xx_init_ctio_to_isp(struct ctio7_to_24xx *ctio,
1955 struct qla_tgt_prm *prm)
1956{
1957 prm->sense_buffer_len = min_t(uint32_t, prm->sense_buffer_len,
1958 (uint32_t)sizeof(ctio->u.status1.sense_data));
1959 ctio->u.status0.flags |=
1960 __constant_cpu_to_le16(CTIO7_FLAGS_SEND_STATUS);
1961 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 0)) {
1962 ctio->u.status0.flags |= __constant_cpu_to_le16(
1963 CTIO7_FLAGS_EXPLICIT_CONFORM |
1964 CTIO7_FLAGS_CONFORM_REQ);
1965 }
1966 ctio->u.status0.residual = cpu_to_le32(prm->residual);
1967 ctio->u.status0.scsi_status = cpu_to_le16(prm->rq_result);
1968 if (QLA_TGT_SENSE_VALID(prm->sense_buffer)) {
1969 int i;
1970
1971 if (qlt_need_explicit_conf(prm->tgt->ha, prm->cmd, 1)) {
1972 if (prm->cmd->se_cmd.scsi_status != 0) {
1973 ql_dbg(ql_dbg_tgt, prm->cmd->vha, 0xe017,
1974 "Skipping EXPLICIT_CONFORM and "
1975 "CTIO7_FLAGS_CONFORM_REQ for FCP READ w/ "
1976 "non GOOD status\n");
1977 goto skip_explict_conf;
1978 }
1979 ctio->u.status1.flags |= __constant_cpu_to_le16(
1980 CTIO7_FLAGS_EXPLICIT_CONFORM |
1981 CTIO7_FLAGS_CONFORM_REQ);
1982 }
1983skip_explict_conf:
1984 ctio->u.status1.flags &=
1985 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
1986 ctio->u.status1.flags |=
1987 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
1988 ctio->u.status1.scsi_status |=
1989 __constant_cpu_to_le16(SS_SENSE_LEN_VALID);
1990 ctio->u.status1.sense_length =
1991 cpu_to_le16(prm->sense_buffer_len);
1992 for (i = 0; i < prm->sense_buffer_len/4; i++)
1993 ((uint32_t *)ctio->u.status1.sense_data)[i] =
1994 cpu_to_be32(((uint32_t *)prm->sense_buffer)[i]);
1995#if 0
1996 if (unlikely((prm->sense_buffer_len % 4) != 0)) {
1997 static int q;
1998 if (q < 10) {
1999 ql_dbg(ql_dbg_tgt, vha, 0xe04f,
2000 "qla_target(%d): %d bytes of sense "
2001 "lost", prm->tgt->ha->vp_idx,
2002 prm->sense_buffer_len % 4);
2003 q++;
2004 }
2005 }
2006#endif
2007 } else {
2008 ctio->u.status1.flags &=
2009 ~__constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_0);
2010 ctio->u.status1.flags |=
2011 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1);
2012 ctio->u.status1.sense_length = 0;
2013 memset(ctio->u.status1.sense_data, 0,
2014 sizeof(ctio->u.status1.sense_data));
2015 }
2016
2017 /* Sense with len > 24, is it possible ??? */
2018}
2019
2020/*
2021 * Callback to setup response of xmit_type of QLA_TGT_XMIT_DATA and *
2022 * QLA_TGT_XMIT_STATUS for >= 24xx silicon
2023 */
2024int qlt_xmit_response(struct qla_tgt_cmd *cmd, int xmit_type,
2025 uint8_t scsi_status)
2026{
2027 struct scsi_qla_host *vha = cmd->vha;
2028 struct qla_hw_data *ha = vha->hw;
2029 struct ctio7_to_24xx *pkt;
2030 struct qla_tgt_prm prm;
2031 uint32_t full_req_cnt = 0;
2032 unsigned long flags = 0;
2033 int res;
2034
2035 memset(&prm, 0, sizeof(prm));
2036 qlt_check_srr_debug(cmd, &xmit_type);
2037
2038 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe018,
2039 "is_send_status=%d, cmd->bufflen=%d, cmd->sg_cnt=%d, "
2040 "cmd->dma_data_direction=%d\n", (xmit_type & QLA_TGT_XMIT_STATUS) ?
2041 1 : 0, cmd->bufflen, cmd->sg_cnt, cmd->dma_data_direction);
2042
2043 res = qlt_pre_xmit_response(cmd, &prm, xmit_type, scsi_status,
2044 &full_req_cnt);
2045 if (unlikely(res != 0)) {
2046 if (res == QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED)
2047 return 0;
2048
2049 return res;
2050 }
2051
2052 spin_lock_irqsave(&ha->hardware_lock, flags);
2053
2054 /* Does F/W have an IOCBs for this request */
2055 res = qlt_check_reserve_free_req(vha, full_req_cnt);
2056 if (unlikely(res))
2057 goto out_unmap_unlock;
2058
2059 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2060 if (unlikely(res != 0))
2061 goto out_unmap_unlock;
2062
2063
2064 pkt = (struct ctio7_to_24xx *)prm.pkt;
2065
2066 if (qlt_has_data(cmd) && (xmit_type & QLA_TGT_XMIT_DATA)) {
2067 pkt->u.status0.flags |=
2068 __constant_cpu_to_le16(CTIO7_FLAGS_DATA_IN |
2069 CTIO7_FLAGS_STATUS_MODE_0);
2070
2071 qlt_load_data_segments(&prm, vha);
2072
2073 if (prm.add_status_pkt == 0) {
2074 if (xmit_type & QLA_TGT_XMIT_STATUS) {
2075 pkt->u.status0.scsi_status =
2076 cpu_to_le16(prm.rq_result);
2077 pkt->u.status0.residual =
2078 cpu_to_le32(prm.residual);
2079 pkt->u.status0.flags |= __constant_cpu_to_le16(
2080 CTIO7_FLAGS_SEND_STATUS);
2081 if (qlt_need_explicit_conf(ha, cmd, 0)) {
2082 pkt->u.status0.flags |=
2083 __constant_cpu_to_le16(
2084 CTIO7_FLAGS_EXPLICIT_CONFORM |
2085 CTIO7_FLAGS_CONFORM_REQ);
2086 }
2087 }
2088
2089 } else {
2090 /*
2091 * We have already made sure that there is sufficient
2092 * amount of request entries to not drop HW lock in
2093 * req_pkt().
2094 */
2095 struct ctio7_to_24xx *ctio =
2096 (struct ctio7_to_24xx *)qlt_get_req_pkt(vha);
2097
2098 ql_dbg(ql_dbg_tgt, vha, 0xe019,
2099 "Building additional status packet\n");
2100
2101 memcpy(ctio, pkt, sizeof(*ctio));
2102 ctio->entry_count = 1;
2103 ctio->dseg_count = 0;
2104 ctio->u.status1.flags &= ~__constant_cpu_to_le16(
2105 CTIO7_FLAGS_DATA_IN);
2106
2107 /* Real finish is ctio_m1's finish */
2108 pkt->handle |= CTIO_INTERMEDIATE_HANDLE_MARK;
2109 pkt->u.status0.flags |= __constant_cpu_to_le16(
2110 CTIO7_FLAGS_DONT_RET_CTIO);
2111 qlt_24xx_init_ctio_to_isp((struct ctio7_to_24xx *)ctio,
2112 &prm);
2113 pr_debug("Status CTIO7: %p\n", ctio);
2114 }
2115 } else
2116 qlt_24xx_init_ctio_to_isp(pkt, &prm);
2117
2118
2119 cmd->state = QLA_TGT_STATE_PROCESSED; /* Mid-level is done processing */
2120
2121 ql_dbg(ql_dbg_tgt, vha, 0xe01a,
2122 "Xmitting CTIO7 response pkt for 24xx: %p scsi_status: 0x%02x\n",
2123 pkt, scsi_status);
2124
2125 qla2x00_start_iocbs(vha, vha->req);
2126 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2127
2128 return 0;
2129
2130out_unmap_unlock:
2131 if (cmd->sg_mapped)
2132 qlt_unmap_sg(vha, cmd);
2133 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2134
2135 return res;
2136}
2137EXPORT_SYMBOL(qlt_xmit_response);
2138
2139int qlt_rdy_to_xfer(struct qla_tgt_cmd *cmd)
2140{
2141 struct ctio7_to_24xx *pkt;
2142 struct scsi_qla_host *vha = cmd->vha;
2143 struct qla_hw_data *ha = vha->hw;
2144 struct qla_tgt *tgt = cmd->tgt;
2145 struct qla_tgt_prm prm;
2146 unsigned long flags;
2147 int res = 0;
2148
2149 memset(&prm, 0, sizeof(prm));
2150 prm.cmd = cmd;
2151 prm.tgt = tgt;
2152 prm.sg = NULL;
2153 prm.req_cnt = 1;
2154
2155 /* Send marker if required */
2156 if (qlt_issue_marker(vha, 0) != QLA_SUCCESS)
2157 return -EIO;
2158
2159 ql_dbg(ql_dbg_tgt, vha, 0xe01b, "CTIO_start: vha(%d)",
2160 (int)vha->vp_idx);
2161
2162 /* Calculate number of entries and segments required */
2163 if (qlt_pci_map_calc_cnt(&prm) != 0)
2164 return -EAGAIN;
2165
2166 spin_lock_irqsave(&ha->hardware_lock, flags);
2167
2168 /* Does F/W have an IOCBs for this request */
2169 res = qlt_check_reserve_free_req(vha, prm.req_cnt);
2170 if (res != 0)
2171 goto out_unlock_free_unmap;
2172
2173 res = qlt_24xx_build_ctio_pkt(&prm, vha);
2174 if (unlikely(res != 0))
2175 goto out_unlock_free_unmap;
2176 pkt = (struct ctio7_to_24xx *)prm.pkt;
2177 pkt->u.status0.flags |= __constant_cpu_to_le16(CTIO7_FLAGS_DATA_OUT |
2178 CTIO7_FLAGS_STATUS_MODE_0);
2179 qlt_load_data_segments(&prm, vha);
2180
2181 cmd->state = QLA_TGT_STATE_NEED_DATA;
2182
2183 qla2x00_start_iocbs(vha, vha->req);
2184 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2185
2186 return res;
2187
2188out_unlock_free_unmap:
2189 if (cmd->sg_mapped)
2190 qlt_unmap_sg(vha, cmd);
2191 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2192
2193 return res;
2194}
2195EXPORT_SYMBOL(qlt_rdy_to_xfer);
2196
2197/* If hardware_lock held on entry, might drop it, then reaquire */
2198/* This function sends the appropriate CTIO to ISP 2xxx or 24xx */
2199static int __qlt_send_term_exchange(struct scsi_qla_host *vha,
2200 struct qla_tgt_cmd *cmd,
2201 struct atio_from_isp *atio)
2202{
2203 struct ctio7_to_24xx *ctio24;
2204 struct qla_hw_data *ha = vha->hw;
2205 request_t *pkt;
2206 int ret = 0;
2207
2208 ql_dbg(ql_dbg_tgt, vha, 0xe01c, "Sending TERM EXCH CTIO (ha=%p)\n", ha);
2209
2210 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
2211 if (pkt == NULL) {
2212 ql_dbg(ql_dbg_tgt, vha, 0xe050,
2213 "qla_target(%d): %s failed: unable to allocate "
2214 "request packet\n", vha->vp_idx, __func__);
2215 return -ENOMEM;
2216 }
2217
2218 if (cmd != NULL) {
2219 if (cmd->state < QLA_TGT_STATE_PROCESSED) {
2220 ql_dbg(ql_dbg_tgt, vha, 0xe051,
2221 "qla_target(%d): Terminating cmd %p with "
2222 "incorrect state %d\n", vha->vp_idx, cmd,
2223 cmd->state);
2224 } else
2225 ret = 1;
2226 }
2227
2228 pkt->entry_count = 1;
2229 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
2230
2231 ctio24 = (struct ctio7_to_24xx *)pkt;
2232 ctio24->entry_type = CTIO_TYPE7;
2233 ctio24->nport_handle = cmd ? cmd->loop_id : CTIO7_NHANDLE_UNRECOGNIZED;
2234 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
2235 ctio24->vp_index = vha->vp_idx;
2236 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
2237 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
2238 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
2239 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
2240 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
2241 __constant_cpu_to_le16(CTIO7_FLAGS_STATUS_MODE_1 |
2242 CTIO7_FLAGS_TERMINATE);
2243 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
2244
2245 /* Most likely, it isn't needed */
2246 ctio24->u.status1.residual = get_unaligned((uint32_t *)
2247 &atio->u.isp24.fcp_cmnd.add_cdb[
2248 atio->u.isp24.fcp_cmnd.add_cdb_len]);
2249 if (ctio24->u.status1.residual != 0)
2250 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
2251
2252 qla2x00_start_iocbs(vha, vha->req);
2253 return ret;
2254}
2255
2256static void qlt_send_term_exchange(struct scsi_qla_host *vha,
2257 struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
2258{
2259 unsigned long flags;
2260 int rc;
2261
2262 if (qlt_issue_marker(vha, ha_locked) < 0)
2263 return;
2264
2265 if (ha_locked) {
2266 rc = __qlt_send_term_exchange(vha, cmd, atio);
2267 goto done;
2268 }
2269 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
2270 rc = __qlt_send_term_exchange(vha, cmd, atio);
2271 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
2272done:
2273 if (rc == 1) {
2274 if (!ha_locked && !in_interrupt())
2275 msleep(250); /* just in case */
2276
2277 vha->hw->tgt.tgt_ops->free_cmd(cmd);
2278 }
2279}
2280
2281void qlt_free_cmd(struct qla_tgt_cmd *cmd)
2282{
2283 BUG_ON(cmd->sg_mapped);
2284
2285 if (unlikely(cmd->free_sg))
2286 kfree(cmd->sg);
2287 kmem_cache_free(qla_tgt_cmd_cachep, cmd);
2288}
2289EXPORT_SYMBOL(qlt_free_cmd);
2290
2291/* ha->hardware_lock supposed to be held on entry */
2292static int qlt_prepare_srr_ctio(struct scsi_qla_host *vha,
2293 struct qla_tgt_cmd *cmd, void *ctio)
2294{
2295 struct qla_tgt_srr_ctio *sc;
2296 struct qla_hw_data *ha = vha->hw;
2297 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2298 struct qla_tgt_srr_imm *imm;
2299
2300 tgt->ctio_srr_id++;
2301
2302 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf019,
2303 "qla_target(%d): CTIO with SRR status received\n", vha->vp_idx);
2304
2305 if (!ctio) {
2306 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf055,
2307 "qla_target(%d): SRR CTIO, but ctio is NULL\n",
2308 vha->vp_idx);
2309 return -EINVAL;
2310 }
2311
2312 sc = kzalloc(sizeof(*sc), GFP_ATOMIC);
2313 if (sc != NULL) {
2314 sc->cmd = cmd;
2315 /* IRQ is already OFF */
2316 spin_lock(&tgt->srr_lock);
2317 sc->srr_id = tgt->ctio_srr_id;
2318 list_add_tail(&sc->srr_list_entry,
2319 &tgt->srr_ctio_list);
2320 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01a,
2321 "CTIO SRR %p added (id %d)\n", sc, sc->srr_id);
2322 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
2323 int found = 0;
2324 list_for_each_entry(imm, &tgt->srr_imm_list,
2325 srr_list_entry) {
2326 if (imm->srr_id == sc->srr_id) {
2327 found = 1;
2328 break;
2329 }
2330 }
2331 if (found) {
2332 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01b,
2333 "Scheduling srr work\n");
2334 schedule_work(&tgt->srr_work);
2335 } else {
2336 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf056,
2337 "qla_target(%d): imm_srr_id "
2338 "== ctio_srr_id (%d), but there is no "
2339 "corresponding SRR IMM, deleting CTIO "
2340 "SRR %p\n", vha->vp_idx,
2341 tgt->ctio_srr_id, sc);
2342 list_del(&sc->srr_list_entry);
2343 spin_unlock(&tgt->srr_lock);
2344
2345 kfree(sc);
2346 return -EINVAL;
2347 }
2348 }
2349 spin_unlock(&tgt->srr_lock);
2350 } else {
2351 struct qla_tgt_srr_imm *ti;
2352
2353 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf057,
2354 "qla_target(%d): Unable to allocate SRR CTIO entry\n",
2355 vha->vp_idx);
2356 spin_lock(&tgt->srr_lock);
2357 list_for_each_entry_safe(imm, ti, &tgt->srr_imm_list,
2358 srr_list_entry) {
2359 if (imm->srr_id == tgt->ctio_srr_id) {
2360 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01c,
2361 "IMM SRR %p deleted (id %d)\n",
2362 imm, imm->srr_id);
2363 list_del(&imm->srr_list_entry);
2364 qlt_reject_free_srr_imm(vha, imm, 1);
2365 }
2366 }
2367 spin_unlock(&tgt->srr_lock);
2368
2369 return -ENOMEM;
2370 }
2371
2372 return 0;
2373}
2374
2375/*
2376 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2377 */
2378static int qlt_term_ctio_exchange(struct scsi_qla_host *vha, void *ctio,
2379 struct qla_tgt_cmd *cmd, uint32_t status)
2380{
2381 int term = 0;
2382
2383 if (ctio != NULL) {
2384 struct ctio7_from_24xx *c = (struct ctio7_from_24xx *)ctio;
2385 term = !(c->flags &
2386 __constant_cpu_to_le16(OF_TERM_EXCH));
2387 } else
2388 term = 1;
2389
2390 if (term)
2391 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
2392
2393 return term;
2394}
2395
2396/* ha->hardware_lock supposed to be held on entry */
2397static inline struct qla_tgt_cmd *qlt_get_cmd(struct scsi_qla_host *vha,
2398 uint32_t handle)
2399{
2400 struct qla_hw_data *ha = vha->hw;
2401
2402 handle--;
2403 if (ha->tgt.cmds[handle] != NULL) {
2404 struct qla_tgt_cmd *cmd = ha->tgt.cmds[handle];
2405 ha->tgt.cmds[handle] = NULL;
2406 return cmd;
2407 } else
2408 return NULL;
2409}
2410
2411/* ha->hardware_lock supposed to be held on entry */
2412static struct qla_tgt_cmd *qlt_ctio_to_cmd(struct scsi_qla_host *vha,
2413 uint32_t handle, void *ctio)
2414{
2415 struct qla_tgt_cmd *cmd = NULL;
2416
2417 /* Clear out internal marks */
2418 handle &= ~(CTIO_COMPLETION_HANDLE_MARK |
2419 CTIO_INTERMEDIATE_HANDLE_MARK);
2420
2421 if (handle != QLA_TGT_NULL_HANDLE) {
2422 if (unlikely(handle == QLA_TGT_SKIP_HANDLE)) {
2423 ql_dbg(ql_dbg_tgt, vha, 0xe01d, "%s",
2424 "SKIP_HANDLE CTIO\n");
2425 return NULL;
2426 }
2427 /* handle-1 is actually used */
2428 if (unlikely(handle > MAX_OUTSTANDING_COMMANDS)) {
2429 ql_dbg(ql_dbg_tgt, vha, 0xe052,
2430 "qla_target(%d): Wrong handle %x received\n",
2431 vha->vp_idx, handle);
2432 return NULL;
2433 }
2434 cmd = qlt_get_cmd(vha, handle);
2435 if (unlikely(cmd == NULL)) {
2436 ql_dbg(ql_dbg_tgt, vha, 0xe053,
2437 "qla_target(%d): Suspicious: unable to "
2438 "find the command with handle %x\n", vha->vp_idx,
2439 handle);
2440 return NULL;
2441 }
2442 } else if (ctio != NULL) {
2443 /* We can't get loop ID from CTIO7 */
2444 ql_dbg(ql_dbg_tgt, vha, 0xe054,
2445 "qla_target(%d): Wrong CTIO received: QLA24xx doesn't "
2446 "support NULL handles\n", vha->vp_idx);
2447 return NULL;
2448 }
2449
2450 return cmd;
2451}
2452
2453/*
2454 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2455 */
2456static void qlt_do_ctio_completion(struct scsi_qla_host *vha, uint32_t handle,
2457 uint32_t status, void *ctio)
2458{
2459 struct qla_hw_data *ha = vha->hw;
2460 struct se_cmd *se_cmd;
2461 struct target_core_fabric_ops *tfo;
2462 struct qla_tgt_cmd *cmd;
2463
2464 ql_dbg(ql_dbg_tgt, vha, 0xe01e,
2465 "qla_target(%d): handle(ctio %p status %#x) <- %08x\n",
2466 vha->vp_idx, ctio, status, handle);
2467
2468 if (handle & CTIO_INTERMEDIATE_HANDLE_MARK) {
2469 /* That could happen only in case of an error/reset/abort */
2470 if (status != CTIO_SUCCESS) {
2471 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01d,
2472 "Intermediate CTIO received"
2473 " (status %x)\n", status);
2474 }
2475 return;
2476 }
2477
2478 cmd = qlt_ctio_to_cmd(vha, handle, ctio);
2479 if (cmd == NULL)
2480 return;
2481
2482 se_cmd = &cmd->se_cmd;
2483 tfo = se_cmd->se_tfo;
2484
2485 if (cmd->sg_mapped)
2486 qlt_unmap_sg(vha, cmd);
2487
2488 if (unlikely(status != CTIO_SUCCESS)) {
2489 switch (status & 0xFFFF) {
2490 case CTIO_LIP_RESET:
2491 case CTIO_TARGET_RESET:
2492 case CTIO_ABORTED:
2493 case CTIO_TIMEOUT:
2494 case CTIO_INVALID_RX_ID:
2495 /* They are OK */
2496 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf058,
2497 "qla_target(%d): CTIO with "
2498 "status %#x received, state %x, se_cmd %p, "
2499 "(LIP_RESET=e, ABORTED=2, TARGET_RESET=17, "
2500 "TIMEOUT=b, INVALID_RX_ID=8)\n", vha->vp_idx,
2501 status, cmd->state, se_cmd);
2502 break;
2503
2504 case CTIO_PORT_LOGGED_OUT:
2505 case CTIO_PORT_UNAVAILABLE:
2506 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
2507 "qla_target(%d): CTIO with PORT LOGGED "
2508 "OUT (29) or PORT UNAVAILABLE (28) status %x "
2509 "received (state %x, se_cmd %p)\n", vha->vp_idx,
2510 status, cmd->state, se_cmd);
2511 break;
2512
2513 case CTIO_SRR_RECEIVED:
2514 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05a,
2515 "qla_target(%d): CTIO with SRR_RECEIVED"
2516 " status %x received (state %x, se_cmd %p)\n",
2517 vha->vp_idx, status, cmd->state, se_cmd);
2518 if (qlt_prepare_srr_ctio(vha, cmd, ctio) != 0)
2519 break;
2520 else
2521 return;
2522
2523 default:
2524 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05b,
2525 "qla_target(%d): CTIO with error status "
2526 "0x%x received (state %x, se_cmd %p\n",
2527 vha->vp_idx, status, cmd->state, se_cmd);
2528 break;
2529 }
2530
2531 if (cmd->state != QLA_TGT_STATE_NEED_DATA)
2532 if (qlt_term_ctio_exchange(vha, ctio, cmd, status))
2533 return;
2534 }
2535
2536 if (cmd->state == QLA_TGT_STATE_PROCESSED) {
2537 ql_dbg(ql_dbg_tgt, vha, 0xe01f, "Command %p finished\n", cmd);
2538 } else if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
2539 int rx_status = 0;
2540
2541 cmd->state = QLA_TGT_STATE_DATA_IN;
2542
2543 if (unlikely(status != CTIO_SUCCESS))
2544 rx_status = -EIO;
2545 else
2546 cmd->write_data_transferred = 1;
2547
2548 ql_dbg(ql_dbg_tgt, vha, 0xe020,
2549 "Data received, context %x, rx_status %d\n",
2550 0x0, rx_status);
2551
2552 ha->tgt.tgt_ops->handle_data(cmd);
2553 return;
2554 } else if (cmd->state == QLA_TGT_STATE_ABORTED) {
2555 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01e,
2556 "Aborted command %p (tag %d) finished\n", cmd, cmd->tag);
2557 } else {
2558 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05c,
2559 "qla_target(%d): A command in state (%d) should "
2560 "not return a CTIO complete\n", vha->vp_idx, cmd->state);
2561 }
2562
2563 if (unlikely(status != CTIO_SUCCESS)) {
2564 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf01f, "Finishing failed CTIO\n");
2565 dump_stack();
2566 }
2567
2568 ha->tgt.tgt_ops->free_cmd(cmd);
2569}
2570
2571/* ha->hardware_lock supposed to be held on entry */
2572/* called via callback from qla2xxx */
2573void qlt_ctio_completion(struct scsi_qla_host *vha, uint32_t handle)
2574{
2575 struct qla_hw_data *ha = vha->hw;
2576 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2577
2578 if (likely(tgt == NULL)) {
2579 ql_dbg(ql_dbg_tgt, vha, 0xe021,
2580 "CTIO, but target mode not enabled"
2581 " (ha %d %p handle %#x)", vha->vp_idx, ha, handle);
2582 return;
2583 }
2584
2585 tgt->irq_cmd_count++;
2586 qlt_do_ctio_completion(vha, handle, CTIO_SUCCESS, NULL);
2587 tgt->irq_cmd_count--;
2588}
2589
2590static inline int qlt_get_fcp_task_attr(struct scsi_qla_host *vha,
2591 uint8_t task_codes)
2592{
2593 int fcp_task_attr;
2594
2595 switch (task_codes) {
2596 case ATIO_SIMPLE_QUEUE:
2597 fcp_task_attr = MSG_SIMPLE_TAG;
2598 break;
2599 case ATIO_HEAD_OF_QUEUE:
2600 fcp_task_attr = MSG_HEAD_TAG;
2601 break;
2602 case ATIO_ORDERED_QUEUE:
2603 fcp_task_attr = MSG_ORDERED_TAG;
2604 break;
2605 case ATIO_ACA_QUEUE:
2606 fcp_task_attr = MSG_ACA_TAG;
2607 break;
2608 case ATIO_UNTAGGED:
2609 fcp_task_attr = MSG_SIMPLE_TAG;
2610 break;
2611 default:
2612 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05d,
2613 "qla_target: unknown task code %x, use ORDERED instead\n",
2614 task_codes);
2615 fcp_task_attr = MSG_ORDERED_TAG;
2616 break;
2617 }
2618
2619 return fcp_task_attr;
2620}
2621
2622static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *,
2623 uint8_t *);
2624/*
2625 * Process context for I/O path into tcm_qla2xxx code
2626 */
2627static void qlt_do_work(struct work_struct *work)
2628{
2629 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
2630 scsi_qla_host_t *vha = cmd->vha;
2631 struct qla_hw_data *ha = vha->hw;
2632 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2633 struct qla_tgt_sess *sess = NULL;
2634 struct atio_from_isp *atio = &cmd->atio;
2635 unsigned char *cdb;
2636 unsigned long flags;
2637 uint32_t data_length;
2638 int ret, fcp_task_attr, data_dir, bidi = 0;
2639
2640 if (tgt->tgt_stop)
2641 goto out_term;
2642
2643 spin_lock_irqsave(&ha->hardware_lock, flags);
2644 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2645 atio->u.isp24.fcp_hdr.s_id);
2646 if (sess) {
2647 if (unlikely(sess->tearing_down)) {
2648 sess = NULL;
2649 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2650 goto out_term;
2651 } else {
2652 /*
2653 * Do the extra kref_get() before dropping
2654 * qla_hw_data->hardware_lock.
2655 */
2656 kref_get(&sess->se_sess->sess_kref);
2657 }
2658 }
2659 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2660
2661 if (unlikely(!sess)) {
2662 uint8_t *s_id = atio->u.isp24.fcp_hdr.s_id;
2663
2664 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf022,
2665 "qla_target(%d): Unable to find wwn login"
2666 " (s_id %x:%x:%x), trying to create it manually\n",
2667 vha->vp_idx, s_id[0], s_id[1], s_id[2]);
2668
2669 if (atio->u.raw.entry_count > 1) {
2670 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf023,
2671 "Dropping multy entry cmd %p\n", cmd);
2672 goto out_term;
2673 }
2674
2675 mutex_lock(&ha->tgt.tgt_mutex);
2676 sess = qlt_make_local_sess(vha, s_id);
2677 /* sess has an extra creation ref. */
2678 mutex_unlock(&ha->tgt.tgt_mutex);
2679
2680 if (!sess)
2681 goto out_term;
2682 }
2683
2684 cmd->sess = sess;
2685 cmd->loop_id = sess->loop_id;
2686 cmd->conf_compl_supported = sess->conf_compl_supported;
2687
2688 cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
2689 cmd->tag = atio->u.isp24.exchange_addr;
2690 cmd->unpacked_lun = scsilun_to_int(
2691 (struct scsi_lun *)&atio->u.isp24.fcp_cmnd.lun);
2692
2693 if (atio->u.isp24.fcp_cmnd.rddata &&
2694 atio->u.isp24.fcp_cmnd.wrdata) {
2695 bidi = 1;
2696 data_dir = DMA_TO_DEVICE;
2697 } else if (atio->u.isp24.fcp_cmnd.rddata)
2698 data_dir = DMA_FROM_DEVICE;
2699 else if (atio->u.isp24.fcp_cmnd.wrdata)
2700 data_dir = DMA_TO_DEVICE;
2701 else
2702 data_dir = DMA_NONE;
2703
2704 fcp_task_attr = qlt_get_fcp_task_attr(vha,
2705 atio->u.isp24.fcp_cmnd.task_attr);
2706 data_length = be32_to_cpu(get_unaligned((uint32_t *)
2707 &atio->u.isp24.fcp_cmnd.add_cdb[
2708 atio->u.isp24.fcp_cmnd.add_cdb_len]));
2709
2710 ql_dbg(ql_dbg_tgt, vha, 0xe022,
2711 "qla_target: START qla command: %p lun: 0x%04x (tag %d)\n",
2712 cmd, cmd->unpacked_lun, cmd->tag);
2713
2714 ret = vha->hw->tgt.tgt_ops->handle_cmd(vha, cmd, cdb, data_length,
2715 fcp_task_attr, data_dir, bidi);
2716 if (ret != 0)
2717 goto out_term;
2718 /*
2719 * Drop extra session reference from qla_tgt_handle_cmd_for_atio*(
2720 */
2721 ha->tgt.tgt_ops->put_sess(sess);
2722 return;
2723
2724out_term:
2725 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf020, "Terminating work cmd %p", cmd);
2726 /*
2727 * cmd has not sent to target yet, so pass NULL as the second
2728 * argument to qlt_send_term_exchange() and free the memory here.
2729 */
2730 spin_lock_irqsave(&ha->hardware_lock, flags);
2731 qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
2732 kmem_cache_free(qla_tgt_cmd_cachep, cmd);
2733 spin_unlock_irqrestore(&ha->hardware_lock, flags);
2734 if (sess)
2735 ha->tgt.tgt_ops->put_sess(sess);
2736}
2737
2738/* ha->hardware_lock supposed to be held on entry */
2739static int qlt_handle_cmd_for_atio(struct scsi_qla_host *vha,
2740 struct atio_from_isp *atio)
2741{
2742 struct qla_hw_data *ha = vha->hw;
2743 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2744 struct qla_tgt_cmd *cmd;
2745
2746 if (unlikely(tgt->tgt_stop)) {
2747 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf021,
2748 "New command while device %p is shutting down\n", tgt);
2749 return -EFAULT;
2750 }
2751
2752 cmd = kmem_cache_zalloc(qla_tgt_cmd_cachep, GFP_ATOMIC);
2753 if (!cmd) {
2754 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05e,
2755 "qla_target(%d): Allocation of cmd failed\n", vha->vp_idx);
2756 return -ENOMEM;
2757 }
2758
2759 INIT_LIST_HEAD(&cmd->cmd_list);
2760
2761 memcpy(&cmd->atio, atio, sizeof(*atio));
2762 cmd->state = QLA_TGT_STATE_NEW;
2763 cmd->tgt = ha->tgt.qla_tgt;
2764 cmd->vha = vha;
2765
2766 INIT_WORK(&cmd->work, qlt_do_work);
2767 queue_work(qla_tgt_wq, &cmd->work);
2768 return 0;
2769
2770}
2771
2772/* ha->hardware_lock supposed to be held on entry */
2773static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
2774 int fn, void *iocb, int flags)
2775{
2776 struct scsi_qla_host *vha = sess->vha;
2777 struct qla_hw_data *ha = vha->hw;
2778 struct qla_tgt_mgmt_cmd *mcmd;
2779 int res;
2780 uint8_t tmr_func;
2781
2782 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2783 if (!mcmd) {
2784 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10009,
2785 "qla_target(%d): Allocation of management "
2786 "command failed, some commands and their data could "
2787 "leak\n", vha->vp_idx);
2788 return -ENOMEM;
2789 }
2790 memset(mcmd, 0, sizeof(*mcmd));
2791 mcmd->sess = sess;
2792
2793 if (iocb) {
2794 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
2795 sizeof(mcmd->orig_iocb.imm_ntfy));
2796 }
2797 mcmd->tmr_func = fn;
2798 mcmd->flags = flags;
2799
2800 switch (fn) {
2801 case QLA_TGT_CLEAR_ACA:
2802 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10000,
2803 "qla_target(%d): CLEAR_ACA received\n", sess->vha->vp_idx);
2804 tmr_func = TMR_CLEAR_ACA;
2805 break;
2806
2807 case QLA_TGT_TARGET_RESET:
2808 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10001,
2809 "qla_target(%d): TARGET_RESET received\n",
2810 sess->vha->vp_idx);
2811 tmr_func = TMR_TARGET_WARM_RESET;
2812 break;
2813
2814 case QLA_TGT_LUN_RESET:
2815 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10002,
2816 "qla_target(%d): LUN_RESET received\n", sess->vha->vp_idx);
2817 tmr_func = TMR_LUN_RESET;
2818 break;
2819
2820 case QLA_TGT_CLEAR_TS:
2821 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10003,
2822 "qla_target(%d): CLEAR_TS received\n", sess->vha->vp_idx);
2823 tmr_func = TMR_CLEAR_TASK_SET;
2824 break;
2825
2826 case QLA_TGT_ABORT_TS:
2827 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10004,
2828 "qla_target(%d): ABORT_TS received\n", sess->vha->vp_idx);
2829 tmr_func = TMR_ABORT_TASK_SET;
2830 break;
2831#if 0
2832 case QLA_TGT_ABORT_ALL:
2833 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10005,
2834 "qla_target(%d): Doing ABORT_ALL_TASKS\n",
2835 sess->vha->vp_idx);
2836 tmr_func = 0;
2837 break;
2838
2839 case QLA_TGT_ABORT_ALL_SESS:
2840 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10006,
2841 "qla_target(%d): Doing ABORT_ALL_TASKS_SESS\n",
2842 sess->vha->vp_idx);
2843 tmr_func = 0;
2844 break;
2845
2846 case QLA_TGT_NEXUS_LOSS_SESS:
2847 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10007,
2848 "qla_target(%d): Doing NEXUS_LOSS_SESS\n",
2849 sess->vha->vp_idx);
2850 tmr_func = 0;
2851 break;
2852
2853 case QLA_TGT_NEXUS_LOSS:
2854 ql_dbg(ql_dbg_tgt_tmr, vha, 0x10008,
2855 "qla_target(%d): Doing NEXUS_LOSS\n", sess->vha->vp_idx);
2856 tmr_func = 0;
2857 break;
2858#endif
2859 default:
2860 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000a,
2861 "qla_target(%d): Unknown task mgmt fn 0x%x\n",
2862 sess->vha->vp_idx, fn);
2863 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2864 return -ENOSYS;
2865 }
2866
2867 res = ha->tgt.tgt_ops->handle_tmr(mcmd, lun, tmr_func, 0);
2868 if (res != 0) {
2869 ql_dbg(ql_dbg_tgt_tmr, vha, 0x1000b,
2870 "qla_target(%d): tgt.tgt_ops->handle_tmr() failed: %d\n",
2871 sess->vha->vp_idx, res);
2872 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2873 return -EFAULT;
2874 }
2875
2876 return 0;
2877}
2878
2879/* ha->hardware_lock supposed to be held on entry */
2880static int qlt_handle_task_mgmt(struct scsi_qla_host *vha, void *iocb)
2881{
2882 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2883 struct qla_hw_data *ha = vha->hw;
2884 struct qla_tgt *tgt;
2885 struct qla_tgt_sess *sess;
2886 uint32_t lun, unpacked_lun;
2887 int lun_size, fn;
2888
2889 tgt = ha->tgt.qla_tgt;
2890
2891 lun = a->u.isp24.fcp_cmnd.lun;
2892 lun_size = sizeof(a->u.isp24.fcp_cmnd.lun);
2893 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
2894 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
2895 a->u.isp24.fcp_hdr.s_id);
2896 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2897
2898 if (!sess) {
2899 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf024,
2900 "qla_target(%d): task mgmt fn 0x%x for "
2901 "non-existant session\n", vha->vp_idx, fn);
2902 return qlt_sched_sess_work(tgt, QLA_TGT_SESS_WORK_TM, iocb,
2903 sizeof(struct atio_from_isp));
2904 }
2905
2906 return qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
2907}
2908
2909/* ha->hardware_lock supposed to be held on entry */
2910static int __qlt_abort_task(struct scsi_qla_host *vha,
2911 struct imm_ntfy_from_isp *iocb, struct qla_tgt_sess *sess)
2912{
2913 struct atio_from_isp *a = (struct atio_from_isp *)iocb;
2914 struct qla_hw_data *ha = vha->hw;
2915 struct qla_tgt_mgmt_cmd *mcmd;
2916 uint32_t lun, unpacked_lun;
2917 int rc;
2918
2919 mcmd = mempool_alloc(qla_tgt_mgmt_cmd_mempool, GFP_ATOMIC);
2920 if (mcmd == NULL) {
2921 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf05f,
2922 "qla_target(%d): %s: Allocation of ABORT cmd failed\n",
2923 vha->vp_idx, __func__);
2924 return -ENOMEM;
2925 }
2926 memset(mcmd, 0, sizeof(*mcmd));
2927
2928 mcmd->sess = sess;
2929 memcpy(&mcmd->orig_iocb.imm_ntfy, iocb,
2930 sizeof(mcmd->orig_iocb.imm_ntfy));
2931
2932 lun = a->u.isp24.fcp_cmnd.lun;
2933 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
2934
2935 rc = ha->tgt.tgt_ops->handle_tmr(mcmd, unpacked_lun, TMR_ABORT_TASK,
2936 le16_to_cpu(iocb->u.isp2x.seq_id));
2937 if (rc != 0) {
2938 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf060,
2939 "qla_target(%d): tgt_ops->handle_tmr() failed: %d\n",
2940 vha->vp_idx, rc);
2941 mempool_free(mcmd, qla_tgt_mgmt_cmd_mempool);
2942 return -EFAULT;
2943 }
2944
2945 return 0;
2946}
2947
2948/* ha->hardware_lock supposed to be held on entry */
2949static int qlt_abort_task(struct scsi_qla_host *vha,
2950 struct imm_ntfy_from_isp *iocb)
2951{
2952 struct qla_hw_data *ha = vha->hw;
2953 struct qla_tgt_sess *sess;
2954 int loop_id;
2955
2956 loop_id = GET_TARGET_ID(ha, (struct atio_from_isp *)iocb);
2957
2958 sess = ha->tgt.tgt_ops->find_sess_by_loop_id(vha, loop_id);
2959 if (sess == NULL) {
2960 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf025,
2961 "qla_target(%d): task abort for unexisting "
2962 "session\n", vha->vp_idx);
2963 return qlt_sched_sess_work(ha->tgt.qla_tgt,
2964 QLA_TGT_SESS_WORK_ABORT, iocb, sizeof(*iocb));
2965 }
2966
2967 return __qlt_abort_task(vha, iocb, sess);
2968}
2969
2970/*
2971 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
2972 */
2973static int qlt_24xx_handle_els(struct scsi_qla_host *vha,
2974 struct imm_ntfy_from_isp *iocb)
2975{
2976 struct qla_hw_data *ha = vha->hw;
2977 int res = 0;
2978
2979 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf026,
2980 "qla_target(%d): Port ID: 0x%02x:%02x:%02x"
2981 " ELS opcode: 0x%02x\n", vha->vp_idx, iocb->u.isp24.port_id[0],
2982 iocb->u.isp24.port_id[1], iocb->u.isp24.port_id[2],
2983 iocb->u.isp24.status_subcode);
2984
2985 switch (iocb->u.isp24.status_subcode) {
2986 case ELS_PLOGI:
2987 case ELS_FLOGI:
2988 case ELS_PRLI:
2989 case ELS_LOGO:
2990 case ELS_PRLO:
2991 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
2992 break;
2993 case ELS_PDISC:
2994 case ELS_ADISC:
2995 {
2996 struct qla_tgt *tgt = ha->tgt.qla_tgt;
2997 if (tgt->link_reinit_iocb_pending) {
2998 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
2999 0, 0, 0, 0, 0, 0);
3000 tgt->link_reinit_iocb_pending = 0;
3001 }
3002 res = 1; /* send notify ack */
3003 break;
3004 }
3005
3006 default:
3007 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf061,
3008 "qla_target(%d): Unsupported ELS command %x "
3009 "received\n", vha->vp_idx, iocb->u.isp24.status_subcode);
3010 res = qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS);
3011 break;
3012 }
3013
3014 return res;
3015}
3016
3017static int qlt_set_data_offset(struct qla_tgt_cmd *cmd, uint32_t offset)
3018{
3019 struct scatterlist *sg, *sgp, *sg_srr, *sg_srr_start = NULL;
3020 size_t first_offset = 0, rem_offset = offset, tmp = 0;
3021 int i, sg_srr_cnt, bufflen = 0;
3022
3023 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe023,
3024 "Entering qla_tgt_set_data_offset: cmd: %p, cmd->sg: %p, "
3025 "cmd->sg_cnt: %u, direction: %d\n",
3026 cmd, cmd->sg, cmd->sg_cnt, cmd->dma_data_direction);
3027
3028 /*
3029 * FIXME: Reject non zero SRR relative offset until we can test
3030 * this code properly.
3031 */
3032 pr_debug("Rejecting non zero SRR rel_offs: %u\n", offset);
3033 return -1;
3034
3035 if (!cmd->sg || !cmd->sg_cnt) {
3036 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe055,
3037 "Missing cmd->sg or zero cmd->sg_cnt in"
3038 " qla_tgt_set_data_offset\n");
3039 return -EINVAL;
3040 }
3041 /*
3042 * Walk the current cmd->sg list until we locate the new sg_srr_start
3043 */
3044 for_each_sg(cmd->sg, sg, cmd->sg_cnt, i) {
3045 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe024,
3046 "sg[%d]: %p page: %p, length: %d, offset: %d\n",
3047 i, sg, sg_page(sg), sg->length, sg->offset);
3048
3049 if ((sg->length + tmp) > offset) {
3050 first_offset = rem_offset;
3051 sg_srr_start = sg;
3052 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe025,
3053 "Found matching sg[%d], using %p as sg_srr_start, "
3054 "and using first_offset: %zu\n", i, sg,
3055 first_offset);
3056 break;
3057 }
3058 tmp += sg->length;
3059 rem_offset -= sg->length;
3060 }
3061
3062 if (!sg_srr_start) {
3063 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe056,
3064 "Unable to locate sg_srr_start for offset: %u\n", offset);
3065 return -EINVAL;
3066 }
3067 sg_srr_cnt = (cmd->sg_cnt - i);
3068
3069 sg_srr = kzalloc(sizeof(struct scatterlist) * sg_srr_cnt, GFP_KERNEL);
3070 if (!sg_srr) {
3071 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe057,
3072 "Unable to allocate sgp\n");
3073 return -ENOMEM;
3074 }
3075 sg_init_table(sg_srr, sg_srr_cnt);
3076 sgp = &sg_srr[0];
3077 /*
3078 * Walk the remaining list for sg_srr_start, mapping to the newly
3079 * allocated sg_srr taking first_offset into account.
3080 */
3081 for_each_sg(sg_srr_start, sg, sg_srr_cnt, i) {
3082 if (first_offset) {
3083 sg_set_page(sgp, sg_page(sg),
3084 (sg->length - first_offset), first_offset);
3085 first_offset = 0;
3086 } else {
3087 sg_set_page(sgp, sg_page(sg), sg->length, 0);
3088 }
3089 bufflen += sgp->length;
3090
3091 sgp = sg_next(sgp);
3092 if (!sgp)
3093 break;
3094 }
3095
3096 cmd->sg = sg_srr;
3097 cmd->sg_cnt = sg_srr_cnt;
3098 cmd->bufflen = bufflen;
3099 cmd->offset += offset;
3100 cmd->free_sg = 1;
3101
3102 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe026, "New cmd->sg: %p\n", cmd->sg);
3103 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe027, "New cmd->sg_cnt: %u\n",
3104 cmd->sg_cnt);
3105 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe028, "New cmd->bufflen: %u\n",
3106 cmd->bufflen);
3107 ql_dbg(ql_dbg_tgt, cmd->vha, 0xe029, "New cmd->offset: %u\n",
3108 cmd->offset);
3109
3110 if (cmd->sg_cnt < 0)
3111 BUG();
3112
3113 if (cmd->bufflen < 0)
3114 BUG();
3115
3116 return 0;
3117}
3118
3119static inline int qlt_srr_adjust_data(struct qla_tgt_cmd *cmd,
3120 uint32_t srr_rel_offs, int *xmit_type)
3121{
3122 int res = 0, rel_offs;
3123
3124 rel_offs = srr_rel_offs - cmd->offset;
3125 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf027, "srr_rel_offs=%d, rel_offs=%d",
3126 srr_rel_offs, rel_offs);
3127
3128 *xmit_type = QLA_TGT_XMIT_ALL;
3129
3130 if (rel_offs < 0) {
3131 ql_dbg(ql_dbg_tgt_mgt, cmd->vha, 0xf062,
3132 "qla_target(%d): SRR rel_offs (%d) < 0",
3133 cmd->vha->vp_idx, rel_offs);
3134 res = -1;
3135 } else if (rel_offs == cmd->bufflen)
3136 *xmit_type = QLA_TGT_XMIT_STATUS;
3137 else if (rel_offs > 0)
3138 res = qlt_set_data_offset(cmd, rel_offs);
3139
3140 return res;
3141}
3142
3143/* No locks, thread context */
3144static void qlt_handle_srr(struct scsi_qla_host *vha,
3145 struct qla_tgt_srr_ctio *sctio, struct qla_tgt_srr_imm *imm)
3146{
3147 struct imm_ntfy_from_isp *ntfy =
3148 (struct imm_ntfy_from_isp *)&imm->imm_ntfy;
3149 struct qla_hw_data *ha = vha->hw;
3150 struct qla_tgt_cmd *cmd = sctio->cmd;
3151 struct se_cmd *se_cmd = &cmd->se_cmd;
3152 unsigned long flags;
3153 int xmit_type = 0, resp = 0;
3154 uint32_t offset;
3155 uint16_t srr_ui;
3156
3157 offset = le32_to_cpu(ntfy->u.isp24.srr_rel_offs);
3158 srr_ui = ntfy->u.isp24.srr_ui;
3159
3160 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf028, "SRR cmd %p, srr_ui %x\n",
3161 cmd, srr_ui);
3162
3163 switch (srr_ui) {
3164 case SRR_IU_STATUS:
3165 spin_lock_irqsave(&ha->hardware_lock, flags);
3166 qlt_send_notify_ack(vha, ntfy,
3167 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3168 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3169 xmit_type = QLA_TGT_XMIT_STATUS;
3170 resp = 1;
3171 break;
3172 case SRR_IU_DATA_IN:
3173 if (!cmd->sg || !cmd->sg_cnt) {
3174 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf063,
3175 "Unable to process SRR_IU_DATA_IN due to"
3176 " missing cmd->sg, state: %d\n", cmd->state);
3177 dump_stack();
3178 goto out_reject;
3179 }
3180 if (se_cmd->scsi_status != 0) {
3181 ql_dbg(ql_dbg_tgt, vha, 0xe02a,
3182 "Rejecting SRR_IU_DATA_IN with non GOOD "
3183 "scsi_status\n");
3184 goto out_reject;
3185 }
3186 cmd->bufflen = se_cmd->data_length;
3187
3188 if (qlt_has_data(cmd)) {
3189 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3190 goto out_reject;
3191 spin_lock_irqsave(&ha->hardware_lock, flags);
3192 qlt_send_notify_ack(vha, ntfy,
3193 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3194 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3195 resp = 1;
3196 } else {
3197 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf064,
3198 "qla_target(%d): SRR for in data for cmd "
3199 "without them (tag %d, SCSI status %d), "
3200 "reject", vha->vp_idx, cmd->tag,
3201 cmd->se_cmd.scsi_status);
3202 goto out_reject;
3203 }
3204 break;
3205 case SRR_IU_DATA_OUT:
3206 if (!cmd->sg || !cmd->sg_cnt) {
3207 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf065,
3208 "Unable to process SRR_IU_DATA_OUT due to"
3209 " missing cmd->sg\n");
3210 dump_stack();
3211 goto out_reject;
3212 }
3213 if (se_cmd->scsi_status != 0) {
3214 ql_dbg(ql_dbg_tgt, vha, 0xe02b,
3215 "Rejecting SRR_IU_DATA_OUT"
3216 " with non GOOD scsi_status\n");
3217 goto out_reject;
3218 }
3219 cmd->bufflen = se_cmd->data_length;
3220
3221 if (qlt_has_data(cmd)) {
3222 if (qlt_srr_adjust_data(cmd, offset, &xmit_type) != 0)
3223 goto out_reject;
3224 spin_lock_irqsave(&ha->hardware_lock, flags);
3225 qlt_send_notify_ack(vha, ntfy,
3226 0, 0, 0, NOTIFY_ACK_SRR_FLAGS_ACCEPT, 0, 0);
3227 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3228 if (xmit_type & QLA_TGT_XMIT_DATA)
3229 qlt_rdy_to_xfer(cmd);
3230 } else {
3231 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf066,
3232 "qla_target(%d): SRR for out data for cmd "
3233 "without them (tag %d, SCSI status %d), "
3234 "reject", vha->vp_idx, cmd->tag,
3235 cmd->se_cmd.scsi_status);
3236 goto out_reject;
3237 }
3238 break;
3239 default:
3240 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf067,
3241 "qla_target(%d): Unknown srr_ui value %x",
3242 vha->vp_idx, srr_ui);
3243 goto out_reject;
3244 }
3245
3246 /* Transmit response in case of status and data-in cases */
3247 if (resp)
3248 qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
3249
3250 return;
3251
3252out_reject:
3253 spin_lock_irqsave(&ha->hardware_lock, flags);
3254 qlt_send_notify_ack(vha, ntfy, 0, 0, 0,
3255 NOTIFY_ACK_SRR_FLAGS_REJECT,
3256 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3257 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3258 if (cmd->state == QLA_TGT_STATE_NEED_DATA) {
3259 cmd->state = QLA_TGT_STATE_DATA_IN;
3260 dump_stack();
3261 } else
3262 qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
3263 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3264}
3265
3266static void qlt_reject_free_srr_imm(struct scsi_qla_host *vha,
3267 struct qla_tgt_srr_imm *imm, int ha_locked)
3268{
3269 struct qla_hw_data *ha = vha->hw;
3270 unsigned long flags = 0;
3271
3272 if (!ha_locked)
3273 spin_lock_irqsave(&ha->hardware_lock, flags);
3274
3275 qlt_send_notify_ack(vha, (void *)&imm->imm_ntfy, 0, 0, 0,
3276 NOTIFY_ACK_SRR_FLAGS_REJECT,
3277 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3278 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3279
3280 if (!ha_locked)
3281 spin_unlock_irqrestore(&ha->hardware_lock, flags);
3282
3283 kfree(imm);
3284}
3285
3286static void qlt_handle_srr_work(struct work_struct *work)
3287{
3288 struct qla_tgt *tgt = container_of(work, struct qla_tgt, srr_work);
3289 struct scsi_qla_host *vha = tgt->vha;
3290 struct qla_tgt_srr_ctio *sctio;
3291 unsigned long flags;
3292
3293 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf029, "Entering SRR work (tgt %p)\n",
3294 tgt);
3295
3296restart:
3297 spin_lock_irqsave(&tgt->srr_lock, flags);
3298 list_for_each_entry(sctio, &tgt->srr_ctio_list, srr_list_entry) {
3299 struct qla_tgt_srr_imm *imm, *i, *ti;
3300 struct qla_tgt_cmd *cmd;
3301 struct se_cmd *se_cmd;
3302
3303 imm = NULL;
3304 list_for_each_entry_safe(i, ti, &tgt->srr_imm_list,
3305 srr_list_entry) {
3306 if (i->srr_id == sctio->srr_id) {
3307 list_del(&i->srr_list_entry);
3308 if (imm) {
3309 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf068,
3310 "qla_target(%d): There must be "
3311 "only one IMM SRR per CTIO SRR "
3312 "(IMM SRR %p, id %d, CTIO %p\n",
3313 vha->vp_idx, i, i->srr_id, sctio);
3314 qlt_reject_free_srr_imm(tgt->vha, i, 0);
3315 } else
3316 imm = i;
3317 }
3318 }
3319
3320 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02a,
3321 "IMM SRR %p, CTIO SRR %p (id %d)\n", imm, sctio,
3322 sctio->srr_id);
3323
3324 if (imm == NULL) {
3325 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02b,
3326 "Not found matching IMM for SRR CTIO (id %d)\n",
3327 sctio->srr_id);
3328 continue;
3329 } else
3330 list_del(&sctio->srr_list_entry);
3331
3332 spin_unlock_irqrestore(&tgt->srr_lock, flags);
3333
3334 cmd = sctio->cmd;
3335 /*
3336 * Reset qla_tgt_cmd SRR values and SGL pointer+count to follow
3337 * tcm_qla2xxx_write_pending() and tcm_qla2xxx_queue_data_in()
3338 * logic..
3339 */
3340 cmd->offset = 0;
3341 if (cmd->free_sg) {
3342 kfree(cmd->sg);
3343 cmd->sg = NULL;
3344 cmd->free_sg = 0;
3345 }
3346 se_cmd = &cmd->se_cmd;
3347
3348 cmd->sg_cnt = se_cmd->t_data_nents;
3349 cmd->sg = se_cmd->t_data_sg;
3350
3351 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02c,
3352 "SRR cmd %p (se_cmd %p, tag %d, op %x), "
3353 "sg_cnt=%d, offset=%d", cmd, &cmd->se_cmd, cmd->tag,
3354 se_cmd->t_task_cdb[0], cmd->sg_cnt, cmd->offset);
3355
3356 qlt_handle_srr(vha, sctio, imm);
3357
3358 kfree(imm);
3359 kfree(sctio);
3360 goto restart;
3361 }
3362 spin_unlock_irqrestore(&tgt->srr_lock, flags);
3363}
3364
3365/* ha->hardware_lock supposed to be held on entry */
3366static void qlt_prepare_srr_imm(struct scsi_qla_host *vha,
3367 struct imm_ntfy_from_isp *iocb)
3368{
3369 struct qla_tgt_srr_imm *imm;
3370 struct qla_hw_data *ha = vha->hw;
3371 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3372 struct qla_tgt_srr_ctio *sctio;
3373
3374 tgt->imm_srr_id++;
3375
3376 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02d, "qla_target(%d): SRR received\n",
3377 vha->vp_idx);
3378
3379 imm = kzalloc(sizeof(*imm), GFP_ATOMIC);
3380 if (imm != NULL) {
3381 memcpy(&imm->imm_ntfy, iocb, sizeof(imm->imm_ntfy));
3382
3383 /* IRQ is already OFF */
3384 spin_lock(&tgt->srr_lock);
3385 imm->srr_id = tgt->imm_srr_id;
3386 list_add_tail(&imm->srr_list_entry,
3387 &tgt->srr_imm_list);
3388 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02e,
3389 "IMM NTFY SRR %p added (id %d, ui %x)\n",
3390 imm, imm->srr_id, iocb->u.isp24.srr_ui);
3391 if (tgt->imm_srr_id == tgt->ctio_srr_id) {
3392 int found = 0;
3393 list_for_each_entry(sctio, &tgt->srr_ctio_list,
3394 srr_list_entry) {
3395 if (sctio->srr_id == imm->srr_id) {
3396 found = 1;
3397 break;
3398 }
3399 }
3400 if (found) {
3401 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf02f, "%s",
3402 "Scheduling srr work\n");
3403 schedule_work(&tgt->srr_work);
3404 } else {
3405 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf030,
3406 "qla_target(%d): imm_srr_id "
3407 "== ctio_srr_id (%d), but there is no "
3408 "corresponding SRR CTIO, deleting IMM "
3409 "SRR %p\n", vha->vp_idx, tgt->ctio_srr_id,
3410 imm);
3411 list_del(&imm->srr_list_entry);
3412
3413 kfree(imm);
3414
3415 spin_unlock(&tgt->srr_lock);
3416 goto out_reject;
3417 }
3418 }
3419 spin_unlock(&tgt->srr_lock);
3420 } else {
3421 struct qla_tgt_srr_ctio *ts;
3422
3423 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf069,
3424 "qla_target(%d): Unable to allocate SRR IMM "
3425 "entry, SRR request will be rejected\n", vha->vp_idx);
3426
3427 /* IRQ is already OFF */
3428 spin_lock(&tgt->srr_lock);
3429 list_for_each_entry_safe(sctio, ts, &tgt->srr_ctio_list,
3430 srr_list_entry) {
3431 if (sctio->srr_id == tgt->imm_srr_id) {
3432 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf031,
3433 "CTIO SRR %p deleted (id %d)\n",
3434 sctio, sctio->srr_id);
3435 list_del(&sctio->srr_list_entry);
3436 qlt_send_term_exchange(vha, sctio->cmd,
3437 &sctio->cmd->atio, 1);
3438 kfree(sctio);
3439 }
3440 }
3441 spin_unlock(&tgt->srr_lock);
3442 goto out_reject;
3443 }
3444
3445 return;
3446
3447out_reject:
3448 qlt_send_notify_ack(vha, iocb, 0, 0, 0,
3449 NOTIFY_ACK_SRR_FLAGS_REJECT,
3450 NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM,
3451 NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL);
3452}
3453
3454/*
3455 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3456 */
3457static void qlt_handle_imm_notify(struct scsi_qla_host *vha,
3458 struct imm_ntfy_from_isp *iocb)
3459{
3460 struct qla_hw_data *ha = vha->hw;
3461 uint32_t add_flags = 0;
3462 int send_notify_ack = 1;
3463 uint16_t status;
3464
3465 status = le16_to_cpu(iocb->u.isp2x.status);
3466 switch (status) {
3467 case IMM_NTFY_LIP_RESET:
3468 {
3469 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf032,
3470 "qla_target(%d): LIP reset (loop %#x), subcode %x\n",
3471 vha->vp_idx, le16_to_cpu(iocb->u.isp24.nport_handle),
3472 iocb->u.isp24.status_subcode);
3473
3474 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
3475 send_notify_ack = 0;
3476 break;
3477 }
3478
3479 case IMM_NTFY_LIP_LINK_REINIT:
3480 {
3481 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3482 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf033,
3483 "qla_target(%d): LINK REINIT (loop %#x, "
3484 "subcode %x)\n", vha->vp_idx,
3485 le16_to_cpu(iocb->u.isp24.nport_handle),
3486 iocb->u.isp24.status_subcode);
3487 if (tgt->link_reinit_iocb_pending) {
3488 qlt_send_notify_ack(vha, &tgt->link_reinit_iocb,
3489 0, 0, 0, 0, 0, 0);
3490 }
3491 memcpy(&tgt->link_reinit_iocb, iocb, sizeof(*iocb));
3492 tgt->link_reinit_iocb_pending = 1;
3493 /*
3494 * QLogic requires to wait after LINK REINIT for possible
3495 * PDISC or ADISC ELS commands
3496 */
3497 send_notify_ack = 0;
3498 break;
3499 }
3500
3501 case IMM_NTFY_PORT_LOGOUT:
3502 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf034,
3503 "qla_target(%d): Port logout (loop "
3504 "%#x, subcode %x)\n", vha->vp_idx,
3505 le16_to_cpu(iocb->u.isp24.nport_handle),
3506 iocb->u.isp24.status_subcode);
3507
3508 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS_SESS) == 0)
3509 send_notify_ack = 0;
3510 /* The sessions will be cleared in the callback, if needed */
3511 break;
3512
3513 case IMM_NTFY_GLBL_TPRLO:
3514 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf035,
3515 "qla_target(%d): Global TPRLO (%x)\n", vha->vp_idx, status);
3516 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
3517 send_notify_ack = 0;
3518 /* The sessions will be cleared in the callback, if needed */
3519 break;
3520
3521 case IMM_NTFY_PORT_CONFIG:
3522 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf036,
3523 "qla_target(%d): Port config changed (%x)\n", vha->vp_idx,
3524 status);
3525 if (qlt_reset(vha, iocb, QLA_TGT_ABORT_ALL) == 0)
3526 send_notify_ack = 0;
3527 /* The sessions will be cleared in the callback, if needed */
3528 break;
3529
3530 case IMM_NTFY_GLBL_LOGO:
3531 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06a,
3532 "qla_target(%d): Link failure detected\n",
3533 vha->vp_idx);
3534 /* I_T nexus loss */
3535 if (qlt_reset(vha, iocb, QLA_TGT_NEXUS_LOSS) == 0)
3536 send_notify_ack = 0;
3537 break;
3538
3539 case IMM_NTFY_IOCB_OVERFLOW:
3540 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06b,
3541 "qla_target(%d): Cannot provide requested "
3542 "capability (IOCB overflowed the immediate notify "
3543 "resource count)\n", vha->vp_idx);
3544 break;
3545
3546 case IMM_NTFY_ABORT_TASK:
3547 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf037,
3548 "qla_target(%d): Abort Task (S %08x I %#x -> "
3549 "L %#x)\n", vha->vp_idx,
3550 le16_to_cpu(iocb->u.isp2x.seq_id),
3551 GET_TARGET_ID(ha, (struct atio_from_isp *)iocb),
3552 le16_to_cpu(iocb->u.isp2x.lun));
3553 if (qlt_abort_task(vha, iocb) == 0)
3554 send_notify_ack = 0;
3555 break;
3556
3557 case IMM_NTFY_RESOURCE:
3558 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06c,
3559 "qla_target(%d): Out of resources, host %ld\n",
3560 vha->vp_idx, vha->host_no);
3561 break;
3562
3563 case IMM_NTFY_MSG_RX:
3564 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf038,
3565 "qla_target(%d): Immediate notify task %x\n",
3566 vha->vp_idx, iocb->u.isp2x.task_flags);
3567 if (qlt_handle_task_mgmt(vha, iocb) == 0)
3568 send_notify_ack = 0;
3569 break;
3570
3571 case IMM_NTFY_ELS:
3572 if (qlt_24xx_handle_els(vha, iocb) == 0)
3573 send_notify_ack = 0;
3574 break;
3575
3576 case IMM_NTFY_SRR:
3577 qlt_prepare_srr_imm(vha, iocb);
3578 send_notify_ack = 0;
3579 break;
3580
3581 default:
3582 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06d,
3583 "qla_target(%d): Received unknown immediate "
3584 "notify status %x\n", vha->vp_idx, status);
3585 break;
3586 }
3587
3588 if (send_notify_ack)
3589 qlt_send_notify_ack(vha, iocb, add_flags, 0, 0, 0, 0, 0);
3590}
3591
3592/*
3593 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3594 * This function sends busy to ISP 2xxx or 24xx.
3595 */
3596static void qlt_send_busy(struct scsi_qla_host *vha,
3597 struct atio_from_isp *atio, uint16_t status)
3598{
3599 struct ctio7_to_24xx *ctio24;
3600 struct qla_hw_data *ha = vha->hw;
3601 request_t *pkt;
3602 struct qla_tgt_sess *sess = NULL;
3603
3604 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
3605 atio->u.isp24.fcp_hdr.s_id);
3606 if (!sess) {
3607 qlt_send_term_exchange(vha, NULL, atio, 1);
3608 return;
3609 }
3610 /* Sending marker isn't necessary, since we called from ISR */
3611
3612 pkt = (request_t *)qla2x00_alloc_iocbs(vha, NULL);
3613 if (!pkt) {
3614 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06e,
3615 "qla_target(%d): %s failed: unable to allocate "
3616 "request packet", vha->vp_idx, __func__);
3617 return;
3618 }
3619
3620 pkt->entry_count = 1;
3621 pkt->handle = QLA_TGT_SKIP_HANDLE | CTIO_COMPLETION_HANDLE_MARK;
3622
3623 ctio24 = (struct ctio7_to_24xx *)pkt;
3624 ctio24->entry_type = CTIO_TYPE7;
3625 ctio24->nport_handle = sess->loop_id;
3626 ctio24->timeout = __constant_cpu_to_le16(QLA_TGT_TIMEOUT);
3627 ctio24->vp_index = vha->vp_idx;
3628 ctio24->initiator_id[0] = atio->u.isp24.fcp_hdr.s_id[2];
3629 ctio24->initiator_id[1] = atio->u.isp24.fcp_hdr.s_id[1];
3630 ctio24->initiator_id[2] = atio->u.isp24.fcp_hdr.s_id[0];
3631 ctio24->exchange_addr = atio->u.isp24.exchange_addr;
3632 ctio24->u.status1.flags = (atio->u.isp24.attr << 9) |
3633 __constant_cpu_to_le16(
3634 CTIO7_FLAGS_STATUS_MODE_1 | CTIO7_FLAGS_SEND_STATUS |
3635 CTIO7_FLAGS_DONT_RET_CTIO);
3636 /*
3637 * CTIO from fw w/o se_cmd doesn't provide enough info to retry it,
3638 * if the explicit conformation is used.
3639 */
3640 ctio24->u.status1.ox_id = swab16(atio->u.isp24.fcp_hdr.ox_id);
3641 ctio24->u.status1.scsi_status = cpu_to_le16(status);
3642 ctio24->u.status1.residual = get_unaligned((uint32_t *)
3643 &atio->u.isp24.fcp_cmnd.add_cdb[
3644 atio->u.isp24.fcp_cmnd.add_cdb_len]);
3645 if (ctio24->u.status1.residual != 0)
3646 ctio24->u.status1.scsi_status |= SS_RESIDUAL_UNDER;
3647
3648 qla2x00_start_iocbs(vha, vha->req);
3649}
3650
3651/* ha->hardware_lock supposed to be held on entry */
3652/* called via callback from qla2xxx */
3653static void qlt_24xx_atio_pkt(struct scsi_qla_host *vha,
3654 struct atio_from_isp *atio)
3655{
3656 struct qla_hw_data *ha = vha->hw;
3657 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3658 int rc;
3659
3660 if (unlikely(tgt == NULL)) {
3661 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf039,
3662 "ATIO pkt, but no tgt (ha %p)", ha);
3663 return;
3664 }
3665 ql_dbg(ql_dbg_tgt, vha, 0xe02c,
3666 "qla_target(%d): ATIO pkt %p: type %02x count %02x",
3667 vha->vp_idx, atio, atio->u.raw.entry_type,
3668 atio->u.raw.entry_count);
3669 /*
3670 * In tgt_stop mode we also should allow all requests to pass.
3671 * Otherwise, some commands can stuck.
3672 */
3673
3674 tgt->irq_cmd_count++;
3675
3676 switch (atio->u.raw.entry_type) {
3677 case ATIO_TYPE7:
3678 ql_dbg(ql_dbg_tgt, vha, 0xe02d,
3679 "ATIO_TYPE7 instance %d, lun %Lx, read/write %d/%d, "
3680 "add_cdb_len %d, data_length %04x, s_id %x:%x:%x\n",
3681 vha->vp_idx, atio->u.isp24.fcp_cmnd.lun,
3682 atio->u.isp24.fcp_cmnd.rddata,
3683 atio->u.isp24.fcp_cmnd.wrdata,
3684 atio->u.isp24.fcp_cmnd.add_cdb_len,
3685 be32_to_cpu(get_unaligned((uint32_t *)
3686 &atio->u.isp24.fcp_cmnd.add_cdb[
3687 atio->u.isp24.fcp_cmnd.add_cdb_len])),
3688 atio->u.isp24.fcp_hdr.s_id[0],
3689 atio->u.isp24.fcp_hdr.s_id[1],
3690 atio->u.isp24.fcp_hdr.s_id[2]);
3691
3692 if (unlikely(atio->u.isp24.exchange_addr ==
3693 ATIO_EXCHANGE_ADDRESS_UNKNOWN)) {
3694 ql_dbg(ql_dbg_tgt, vha, 0xe058,
3695 "qla_target(%d): ATIO_TYPE7 "
3696 "received with UNKNOWN exchange address, "
3697 "sending QUEUE_FULL\n", vha->vp_idx);
3698 qlt_send_busy(vha, atio, SAM_STAT_TASK_SET_FULL);
3699 break;
3700 }
3701 if (likely(atio->u.isp24.fcp_cmnd.task_mgmt_flags == 0))
3702 rc = qlt_handle_cmd_for_atio(vha, atio);
3703 else
3704 rc = qlt_handle_task_mgmt(vha, atio);
3705 if (unlikely(rc != 0)) {
3706 if (rc == -ESRCH) {
3707#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3708 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
3709#else
3710 qlt_send_term_exchange(vha, NULL, atio, 1);
3711#endif
3712 } else {
3713 if (tgt->tgt_stop) {
3714 ql_dbg(ql_dbg_tgt, vha, 0xe059,
3715 "qla_target: Unable to send "
3716 "command to target for req, "
3717 "ignoring.\n");
3718 } else {
3719 ql_dbg(ql_dbg_tgt, vha, 0xe05a,
3720 "qla_target(%d): Unable to send "
3721 "command to target, sending BUSY "
3722 "status.\n", vha->vp_idx);
3723 qlt_send_busy(vha, atio, SAM_STAT_BUSY);
3724 }
3725 }
3726 }
3727 break;
3728
3729 case IMMED_NOTIFY_TYPE:
3730 {
3731 if (unlikely(atio->u.isp2x.entry_status != 0)) {
3732 ql_dbg(ql_dbg_tgt, vha, 0xe05b,
3733 "qla_target(%d): Received ATIO packet %x "
3734 "with error status %x\n", vha->vp_idx,
3735 atio->u.raw.entry_type,
3736 atio->u.isp2x.entry_status);
3737 break;
3738 }
3739 ql_dbg(ql_dbg_tgt, vha, 0xe02e, "%s", "IMMED_NOTIFY ATIO");
3740 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)atio);
3741 break;
3742 }
3743
3744 default:
3745 ql_dbg(ql_dbg_tgt, vha, 0xe05c,
3746 "qla_target(%d): Received unknown ATIO atio "
3747 "type %x\n", vha->vp_idx, atio->u.raw.entry_type);
3748 break;
3749 }
3750
3751 tgt->irq_cmd_count--;
3752}
3753
3754/* ha->hardware_lock supposed to be held on entry */
3755/* called via callback from qla2xxx */
3756static void qlt_response_pkt(struct scsi_qla_host *vha, response_t *pkt)
3757{
3758 struct qla_hw_data *ha = vha->hw;
3759 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3760
3761 if (unlikely(tgt == NULL)) {
3762 ql_dbg(ql_dbg_tgt, vha, 0xe05d,
3763 "qla_target(%d): Response pkt %x received, but no "
3764 "tgt (ha %p)\n", vha->vp_idx, pkt->entry_type, ha);
3765 return;
3766 }
3767
3768 ql_dbg(ql_dbg_tgt, vha, 0xe02f,
3769 "qla_target(%d): response pkt %p: T %02x C %02x S %02x "
3770 "handle %#x\n", vha->vp_idx, pkt, pkt->entry_type,
3771 pkt->entry_count, pkt->entry_status, pkt->handle);
3772
3773 /*
3774 * In tgt_stop mode we also should allow all requests to pass.
3775 * Otherwise, some commands can stuck.
3776 */
3777
3778 tgt->irq_cmd_count++;
3779
3780 switch (pkt->entry_type) {
3781 case CTIO_TYPE7:
3782 {
3783 struct ctio7_from_24xx *entry = (struct ctio7_from_24xx *)pkt;
3784 ql_dbg(ql_dbg_tgt, vha, 0xe030, "CTIO_TYPE7: instance %d\n",
3785 vha->vp_idx);
3786 qlt_do_ctio_completion(vha, entry->handle,
3787 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3788 entry);
3789 break;
3790 }
3791
3792 case ACCEPT_TGT_IO_TYPE:
3793 {
3794 struct atio_from_isp *atio = (struct atio_from_isp *)pkt;
3795 int rc;
3796 ql_dbg(ql_dbg_tgt, vha, 0xe031,
3797 "ACCEPT_TGT_IO instance %d status %04x "
3798 "lun %04x read/write %d data_length %04x "
3799 "target_id %02x rx_id %04x\n ", vha->vp_idx,
3800 le16_to_cpu(atio->u.isp2x.status),
3801 le16_to_cpu(atio->u.isp2x.lun),
3802 atio->u.isp2x.execution_codes,
3803 le32_to_cpu(atio->u.isp2x.data_length), GET_TARGET_ID(ha,
3804 atio), atio->u.isp2x.rx_id);
3805 if (atio->u.isp2x.status !=
3806 __constant_cpu_to_le16(ATIO_CDB_VALID)) {
3807 ql_dbg(ql_dbg_tgt, vha, 0xe05e,
3808 "qla_target(%d): ATIO with error "
3809 "status %x received\n", vha->vp_idx,
3810 le16_to_cpu(atio->u.isp2x.status));
3811 break;
3812 }
3813 ql_dbg(ql_dbg_tgt, vha, 0xe032,
3814 "FCP CDB: 0x%02x, sizeof(cdb): %lu",
3815 atio->u.isp2x.cdb[0], (unsigned long
3816 int)sizeof(atio->u.isp2x.cdb));
3817
3818 rc = qlt_handle_cmd_for_atio(vha, atio);
3819 if (unlikely(rc != 0)) {
3820 if (rc == -ESRCH) {
3821#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
3822 qlt_send_busy(vha, atio, 0);
3823#else
3824 qlt_send_term_exchange(vha, NULL, atio, 1);
3825#endif
3826 } else {
3827 if (tgt->tgt_stop) {
3828 ql_dbg(ql_dbg_tgt, vha, 0xe05f,
3829 "qla_target: Unable to send "
3830 "command to target, sending TERM "
3831 "EXCHANGE for rsp\n");
3832 qlt_send_term_exchange(vha, NULL,
3833 atio, 1);
3834 } else {
3835 ql_dbg(ql_dbg_tgt, vha, 0xe060,
3836 "qla_target(%d): Unable to send "
3837 "command to target, sending BUSY "
3838 "status\n", vha->vp_idx);
3839 qlt_send_busy(vha, atio, 0);
3840 }
3841 }
3842 }
3843 }
3844 break;
3845
3846 case CONTINUE_TGT_IO_TYPE:
3847 {
3848 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
3849 ql_dbg(ql_dbg_tgt, vha, 0xe033,
3850 "CONTINUE_TGT_IO: instance %d\n", vha->vp_idx);
3851 qlt_do_ctio_completion(vha, entry->handle,
3852 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3853 entry);
3854 break;
3855 }
3856
3857 case CTIO_A64_TYPE:
3858 {
3859 struct ctio_to_2xxx *entry = (struct ctio_to_2xxx *)pkt;
3860 ql_dbg(ql_dbg_tgt, vha, 0xe034, "CTIO_A64: instance %d\n",
3861 vha->vp_idx);
3862 qlt_do_ctio_completion(vha, entry->handle,
3863 le16_to_cpu(entry->status)|(pkt->entry_status << 16),
3864 entry);
3865 break;
3866 }
3867
3868 case IMMED_NOTIFY_TYPE:
3869 ql_dbg(ql_dbg_tgt, vha, 0xe035, "%s", "IMMED_NOTIFY\n");
3870 qlt_handle_imm_notify(vha, (struct imm_ntfy_from_isp *)pkt);
3871 break;
3872
3873 case NOTIFY_ACK_TYPE:
3874 if (tgt->notify_ack_expected > 0) {
3875 struct nack_to_isp *entry = (struct nack_to_isp *)pkt;
3876 ql_dbg(ql_dbg_tgt, vha, 0xe036,
3877 "NOTIFY_ACK seq %08x status %x\n",
3878 le16_to_cpu(entry->u.isp2x.seq_id),
3879 le16_to_cpu(entry->u.isp2x.status));
3880 tgt->notify_ack_expected--;
3881 if (entry->u.isp2x.status !=
3882 __constant_cpu_to_le16(NOTIFY_ACK_SUCCESS)) {
3883 ql_dbg(ql_dbg_tgt, vha, 0xe061,
3884 "qla_target(%d): NOTIFY_ACK "
3885 "failed %x\n", vha->vp_idx,
3886 le16_to_cpu(entry->u.isp2x.status));
3887 }
3888 } else {
3889 ql_dbg(ql_dbg_tgt, vha, 0xe062,
3890 "qla_target(%d): Unexpected NOTIFY_ACK received\n",
3891 vha->vp_idx);
3892 }
3893 break;
3894
3895 case ABTS_RECV_24XX:
3896 ql_dbg(ql_dbg_tgt, vha, 0xe037,
3897 "ABTS_RECV_24XX: instance %d\n", vha->vp_idx);
3898 qlt_24xx_handle_abts(vha, (struct abts_recv_from_24xx *)pkt);
3899 break;
3900
3901 case ABTS_RESP_24XX:
3902 if (tgt->abts_resp_expected > 0) {
3903 struct abts_resp_from_24xx_fw *entry =
3904 (struct abts_resp_from_24xx_fw *)pkt;
3905 ql_dbg(ql_dbg_tgt, vha, 0xe038,
3906 "ABTS_RESP_24XX: compl_status %x\n",
3907 entry->compl_status);
3908 tgt->abts_resp_expected--;
3909 if (le16_to_cpu(entry->compl_status) !=
3910 ABTS_RESP_COMPL_SUCCESS) {
3911 if ((entry->error_subcode1 == 0x1E) &&
3912 (entry->error_subcode2 == 0)) {
3913 /*
3914 * We've got a race here: aborted
3915 * exchange not terminated, i.e.
3916 * response for the aborted command was
3917 * sent between the abort request was
3918 * received and processed.
3919 * Unfortunately, the firmware has a
3920 * silly requirement that all aborted
3921 * exchanges must be explicitely
3922 * terminated, otherwise it refuses to
3923 * send responses for the abort
3924 * requests. So, we have to
3925 * (re)terminate the exchange and retry
3926 * the abort response.
3927 */
3928 qlt_24xx_retry_term_exchange(vha,
3929 entry);
3930 } else
3931 ql_dbg(ql_dbg_tgt, vha, 0xe063,
3932 "qla_target(%d): ABTS_RESP_24XX "
3933 "failed %x (subcode %x:%x)",
3934 vha->vp_idx, entry->compl_status,
3935 entry->error_subcode1,
3936 entry->error_subcode2);
3937 }
3938 } else {
3939 ql_dbg(ql_dbg_tgt, vha, 0xe064,
3940 "qla_target(%d): Unexpected ABTS_RESP_24XX "
3941 "received\n", vha->vp_idx);
3942 }
3943 break;
3944
3945 default:
3946 ql_dbg(ql_dbg_tgt, vha, 0xe065,
3947 "qla_target(%d): Received unknown response pkt "
3948 "type %x\n", vha->vp_idx, pkt->entry_type);
3949 break;
3950 }
3951
3952 tgt->irq_cmd_count--;
3953}
3954
3955/*
3956 * ha->hardware_lock supposed to be held on entry. Might drop it, then reaquire
3957 */
3958void qlt_async_event(uint16_t code, struct scsi_qla_host *vha,
3959 uint16_t *mailbox)
3960{
3961 struct qla_hw_data *ha = vha->hw;
3962 struct qla_tgt *tgt = ha->tgt.qla_tgt;
3963 int reason_code;
3964
3965 ql_dbg(ql_dbg_tgt, vha, 0xe039,
3966 "scsi(%ld): ha state %d init_done %d oper_mode %d topo %d\n",
3967 vha->host_no, atomic_read(&vha->loop_state), vha->flags.init_done,
3968 ha->operating_mode, ha->current_topology);
3969
3970 if (!ha->tgt.tgt_ops)
3971 return;
3972
3973 if (unlikely(tgt == NULL)) {
3974 ql_dbg(ql_dbg_tgt, vha, 0xe03a,
3975 "ASYNC EVENT %#x, but no tgt (ha %p)\n", code, ha);
3976 return;
3977 }
3978
3979 if (((code == MBA_POINT_TO_POINT) || (code == MBA_CHG_IN_CONNECTION)) &&
3980 IS_QLA2100(ha))
3981 return;
3982 /*
3983 * In tgt_stop mode we also should allow all requests to pass.
3984 * Otherwise, some commands can stuck.
3985 */
3986
3987 tgt->irq_cmd_count++;
3988
3989 switch (code) {
3990 case MBA_RESET: /* Reset */
3991 case MBA_SYSTEM_ERR: /* System Error */
3992 case MBA_REQ_TRANSFER_ERR: /* Request Transfer Error */
3993 case MBA_RSP_TRANSFER_ERR: /* Response Transfer Error */
3994 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03a,
3995 "qla_target(%d): System error async event %#x "
3996 "occured", vha->vp_idx, code);
3997 break;
3998 case MBA_WAKEUP_THRES: /* Request Queue Wake-up. */
3999 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4000 break;
4001
4002 case MBA_LOOP_UP:
4003 {
4004 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03b,
4005 "qla_target(%d): Async LOOP_UP occured "
4006 "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
4007 le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
4008 le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
4009 if (tgt->link_reinit_iocb_pending) {
4010 qlt_send_notify_ack(vha, (void *)&tgt->link_reinit_iocb,
4011 0, 0, 0, 0, 0, 0);
4012 tgt->link_reinit_iocb_pending = 0;
4013 }
4014 break;
4015 }
4016
4017 case MBA_LIP_OCCURRED:
4018 case MBA_LOOP_DOWN:
4019 case MBA_LIP_RESET:
4020 case MBA_RSCN_UPDATE:
4021 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03c,
4022 "qla_target(%d): Async event %#x occured "
4023 "(m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx, code,
4024 le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
4025 le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
4026 break;
4027
4028 case MBA_PORT_UPDATE:
4029 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03d,
4030 "qla_target(%d): Port update async event %#x "
4031 "occured: updating the ports database (m[1]=%x, m[2]=%x, "
4032 "m[3]=%x, m[4]=%x)", vha->vp_idx, code,
4033 le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
4034 le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
4035 reason_code = le16_to_cpu(mailbox[2]);
4036 if (reason_code == 0x4)
4037 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03e,
4038 "Async MB 2: Got PLOGI Complete\n");
4039 else if (reason_code == 0x7)
4040 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf03f,
4041 "Async MB 2: Port Logged Out\n");
4042 break;
4043
4044 default:
4045 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf040,
4046 "qla_target(%d): Async event %#x occured: "
4047 "ignore (m[1]=%x, m[2]=%x, m[3]=%x, m[4]=%x)", vha->vp_idx,
4048 code, le16_to_cpu(mailbox[1]), le16_to_cpu(mailbox[2]),
4049 le16_to_cpu(mailbox[3]), le16_to_cpu(mailbox[4]));
4050 break;
4051 }
4052
4053 tgt->irq_cmd_count--;
4054}
4055
4056static fc_port_t *qlt_get_port_database(struct scsi_qla_host *vha,
4057 uint16_t loop_id)
4058{
4059 fc_port_t *fcport;
4060 int rc;
4061
4062 fcport = kzalloc(sizeof(*fcport), GFP_KERNEL);
4063 if (!fcport) {
4064 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf06f,
4065 "qla_target(%d): Allocation of tmp FC port failed",
4066 vha->vp_idx);
4067 return NULL;
4068 }
4069
4070 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf041, "loop_id %d", loop_id);
4071
4072 fcport->loop_id = loop_id;
4073
4074 rc = qla2x00_get_port_database(vha, fcport, 0);
4075 if (rc != QLA_SUCCESS) {
4076 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf070,
4077 "qla_target(%d): Failed to retrieve fcport "
4078 "information -- get_port_database() returned %x "
4079 "(loop_id=0x%04x)", vha->vp_idx, rc, loop_id);
4080 kfree(fcport);
4081 return NULL;
4082 }
4083
4084 return fcport;
4085}
4086
4087/* Must be called under tgt_mutex */
4088static struct qla_tgt_sess *qlt_make_local_sess(struct scsi_qla_host *vha,
4089 uint8_t *s_id)
4090{
4091 struct qla_hw_data *ha = vha->hw;
4092 struct qla_tgt_sess *sess = NULL;
4093 fc_port_t *fcport = NULL;
4094 int rc, global_resets;
4095 uint16_t loop_id = 0;
4096
4097retry:
4098 global_resets = atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count);
4099
4100 rc = qla24xx_get_loop_id(vha, s_id, &loop_id);
4101 if (rc != 0) {
4102 if ((s_id[0] == 0xFF) &&
4103 (s_id[1] == 0xFC)) {
4104 /*
4105 * This is Domain Controller, so it should be
4106 * OK to drop SCSI commands from it.
4107 */
4108 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf042,
4109 "Unable to find initiator with S_ID %x:%x:%x",
4110 s_id[0], s_id[1], s_id[2]);
4111 } else
4112 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf071,
4113 "qla_target(%d): Unable to find "
4114 "initiator with S_ID %x:%x:%x",
4115 vha->vp_idx, s_id[0], s_id[1],
4116 s_id[2]);
4117 return NULL;
4118 }
4119
4120 fcport = qlt_get_port_database(vha, loop_id);
4121 if (!fcport)
4122 return NULL;
4123
4124 if (global_resets !=
4125 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count)) {
4126 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf043,
4127 "qla_target(%d): global reset during session discovery "
4128 "(counter was %d, new %d), retrying", vha->vp_idx,
4129 global_resets,
4130 atomic_read(&ha->tgt.qla_tgt->tgt_global_resets_count));
4131 goto retry;
4132 }
4133
4134 sess = qlt_create_sess(vha, fcport, true);
4135
4136 kfree(fcport);
4137 return sess;
4138}
4139
4140static void qlt_abort_work(struct qla_tgt *tgt,
4141 struct qla_tgt_sess_work_param *prm)
4142{
4143 struct scsi_qla_host *vha = tgt->vha;
4144 struct qla_hw_data *ha = vha->hw;
4145 struct qla_tgt_sess *sess = NULL;
4146 unsigned long flags;
4147 uint32_t be_s_id;
4148 uint8_t s_id[3];
4149 int rc;
4150
4151 spin_lock_irqsave(&ha->hardware_lock, flags);
4152
4153 if (tgt->tgt_stop)
4154 goto out_term;
4155
4156 s_id[0] = prm->abts.fcp_hdr_le.s_id[2];
4157 s_id[1] = prm->abts.fcp_hdr_le.s_id[1];
4158 s_id[2] = prm->abts.fcp_hdr_le.s_id[0];
4159
4160 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha,
4161 (unsigned char *)&be_s_id);
4162 if (!sess) {
4163 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4164
4165 mutex_lock(&ha->tgt.tgt_mutex);
4166 sess = qlt_make_local_sess(vha, s_id);
4167 /* sess has got an extra creation ref */
4168 mutex_unlock(&ha->tgt.tgt_mutex);
4169
4170 spin_lock_irqsave(&ha->hardware_lock, flags);
4171 if (!sess)
4172 goto out_term;
4173 } else {
4174 kref_get(&sess->se_sess->sess_kref);
4175 }
4176
4177 if (tgt->tgt_stop)
4178 goto out_term;
4179
4180 rc = __qlt_24xx_handle_abts(vha, &prm->abts, sess);
4181 if (rc != 0)
4182 goto out_term;
4183 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4184
4185 ha->tgt.tgt_ops->put_sess(sess);
4186 return;
4187
4188out_term:
4189 qlt_24xx_send_abts_resp(vha, &prm->abts, FCP_TMF_REJECTED, false);
4190 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4191 if (sess)
4192 ha->tgt.tgt_ops->put_sess(sess);
4193}
4194
4195static void qlt_tmr_work(struct qla_tgt *tgt,
4196 struct qla_tgt_sess_work_param *prm)
4197{
4198 struct atio_from_isp *a = &prm->tm_iocb2;
4199 struct scsi_qla_host *vha = tgt->vha;
4200 struct qla_hw_data *ha = vha->hw;
4201 struct qla_tgt_sess *sess = NULL;
4202 unsigned long flags;
4203 uint8_t *s_id = NULL; /* to hide compiler warnings */
4204 int rc;
4205 uint32_t lun, unpacked_lun;
4206 int lun_size, fn;
4207 void *iocb;
4208
4209 spin_lock_irqsave(&ha->hardware_lock, flags);
4210
4211 if (tgt->tgt_stop)
4212 goto out_term;
4213
4214 s_id = prm->tm_iocb2.u.isp24.fcp_hdr.s_id;
4215 sess = ha->tgt.tgt_ops->find_sess_by_s_id(vha, s_id);
4216 if (!sess) {
4217 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4218
4219 mutex_lock(&ha->tgt.tgt_mutex);
4220 sess = qlt_make_local_sess(vha, s_id);
4221 /* sess has got an extra creation ref */
4222 mutex_unlock(&ha->tgt.tgt_mutex);
4223
4224 spin_lock_irqsave(&ha->hardware_lock, flags);
4225 if (!sess)
4226 goto out_term;
4227 } else {
4228 kref_get(&sess->se_sess->sess_kref);
4229 }
4230
4231 iocb = a;
4232 lun = a->u.isp24.fcp_cmnd.lun;
4233 lun_size = sizeof(lun);
4234 fn = a->u.isp24.fcp_cmnd.task_mgmt_flags;
4235 unpacked_lun = scsilun_to_int((struct scsi_lun *)&lun);
4236
4237 rc = qlt_issue_task_mgmt(sess, unpacked_lun, fn, iocb, 0);
4238 if (rc != 0)
4239 goto out_term;
4240 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4241
4242 ha->tgt.tgt_ops->put_sess(sess);
4243 return;
4244
4245out_term:
4246 qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1);
4247 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4248 if (sess)
4249 ha->tgt.tgt_ops->put_sess(sess);
4250}
4251
4252static void qlt_sess_work_fn(struct work_struct *work)
4253{
4254 struct qla_tgt *tgt = container_of(work, struct qla_tgt, sess_work);
4255 struct scsi_qla_host *vha = tgt->vha;
4256 unsigned long flags;
4257
4258 ql_dbg(ql_dbg_tgt_mgt, vha, 0xf000, "Sess work (tgt %p)", tgt);
4259
4260 spin_lock_irqsave(&tgt->sess_work_lock, flags);
4261 while (!list_empty(&tgt->sess_works_list)) {
4262 struct qla_tgt_sess_work_param *prm = list_entry(
4263 tgt->sess_works_list.next, typeof(*prm),
4264 sess_works_list_entry);
4265
4266 /*
4267 * This work can be scheduled on several CPUs at time, so we
4268 * must delete the entry to eliminate double processing
4269 */
4270 list_del(&prm->sess_works_list_entry);
4271
4272 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
4273
4274 switch (prm->type) {
4275 case QLA_TGT_SESS_WORK_ABORT:
4276 qlt_abort_work(tgt, prm);
4277 break;
4278 case QLA_TGT_SESS_WORK_TM:
4279 qlt_tmr_work(tgt, prm);
4280 break;
4281 default:
4282 BUG_ON(1);
4283 break;
4284 }
4285
4286 spin_lock_irqsave(&tgt->sess_work_lock, flags);
4287
4288 kfree(prm);
4289 }
4290 spin_unlock_irqrestore(&tgt->sess_work_lock, flags);
4291}
4292
4293/* Must be called under tgt_host_action_mutex */
4294int qlt_add_target(struct qla_hw_data *ha, struct scsi_qla_host *base_vha)
4295{
4296 struct qla_tgt *tgt;
4297
4298 if (!QLA_TGT_MODE_ENABLED())
4299 return 0;
4300
4301 ql_dbg(ql_dbg_tgt, base_vha, 0xe03b,
4302 "Registering target for host %ld(%p)", base_vha->host_no, ha);
4303
4304 BUG_ON((ha->tgt.qla_tgt != NULL) || (ha->tgt.tgt_ops != NULL));
4305
4306 tgt = kzalloc(sizeof(struct qla_tgt), GFP_KERNEL);
4307 if (!tgt) {
4308 ql_dbg(ql_dbg_tgt, base_vha, 0xe066,
4309 "Unable to allocate struct qla_tgt\n");
4310 return -ENOMEM;
4311 }
4312
4313 if (!(base_vha->host->hostt->supported_mode & MODE_TARGET))
4314 base_vha->host->hostt->supported_mode |= MODE_TARGET;
4315
4316 tgt->ha = ha;
4317 tgt->vha = base_vha;
4318 init_waitqueue_head(&tgt->waitQ);
4319 INIT_LIST_HEAD(&tgt->sess_list);
4320 INIT_LIST_HEAD(&tgt->del_sess_list);
4321 INIT_DELAYED_WORK(&tgt->sess_del_work,
4322 (void (*)(struct work_struct *))qlt_del_sess_work_fn);
4323 spin_lock_init(&tgt->sess_work_lock);
4324 INIT_WORK(&tgt->sess_work, qlt_sess_work_fn);
4325 INIT_LIST_HEAD(&tgt->sess_works_list);
4326 spin_lock_init(&tgt->srr_lock);
4327 INIT_LIST_HEAD(&tgt->srr_ctio_list);
4328 INIT_LIST_HEAD(&tgt->srr_imm_list);
4329 INIT_WORK(&tgt->srr_work, qlt_handle_srr_work);
4330 atomic_set(&tgt->tgt_global_resets_count, 0);
4331
4332 ha->tgt.qla_tgt = tgt;
4333
4334 ql_dbg(ql_dbg_tgt, base_vha, 0xe067,
4335 "qla_target(%d): using 64 Bit PCI addressing",
4336 base_vha->vp_idx);
4337 tgt->tgt_enable_64bit_addr = 1;
4338 /* 3 is reserved */
4339 tgt->sg_tablesize = QLA_TGT_MAX_SG_24XX(base_vha->req->length - 3);
4340 tgt->datasegs_per_cmd = QLA_TGT_DATASEGS_PER_CMD_24XX;
4341 tgt->datasegs_per_cont = QLA_TGT_DATASEGS_PER_CONT_24XX;
4342
4343 mutex_lock(&qla_tgt_mutex);
4344 list_add_tail(&tgt->tgt_list_entry, &qla_tgt_glist);
4345 mutex_unlock(&qla_tgt_mutex);
4346
4347 return 0;
4348}
4349
4350/* Must be called under tgt_host_action_mutex */
4351int qlt_remove_target(struct qla_hw_data *ha, struct scsi_qla_host *vha)
4352{
4353 if (!ha->tgt.qla_tgt)
4354 return 0;
4355
4356 mutex_lock(&qla_tgt_mutex);
4357 list_del(&ha->tgt.qla_tgt->tgt_list_entry);
4358 mutex_unlock(&qla_tgt_mutex);
4359
4360 ql_dbg(ql_dbg_tgt, vha, 0xe03c, "Unregistering target for host %ld(%p)",
4361 vha->host_no, ha);
4362 qlt_release(ha->tgt.qla_tgt);
4363
4364 return 0;
4365}
4366
4367static void qlt_lport_dump(struct scsi_qla_host *vha, u64 wwpn,
4368 unsigned char *b)
4369{
4370 int i;
4371
4372 pr_debug("qla2xxx HW vha->node_name: ");
4373 for (i = 0; i < WWN_SIZE; i++)
4374 pr_debug("%02x ", vha->node_name[i]);
4375 pr_debug("\n");
4376 pr_debug("qla2xxx HW vha->port_name: ");
4377 for (i = 0; i < WWN_SIZE; i++)
4378 pr_debug("%02x ", vha->port_name[i]);
4379 pr_debug("\n");
4380
4381 pr_debug("qla2xxx passed configfs WWPN: ");
4382 put_unaligned_be64(wwpn, b);
4383 for (i = 0; i < WWN_SIZE; i++)
4384 pr_debug("%02x ", b[i]);
4385 pr_debug("\n");
4386}
4387
4388/**
4389 * qla_tgt_lport_register - register lport with external module
4390 *
4391 * @qla_tgt_ops: Pointer for tcm_qla2xxx qla_tgt_ops
4392 * @wwpn: Passwd FC target WWPN
4393 * @callback: lport initialization callback for tcm_qla2xxx code
4394 * @target_lport_ptr: pointer for tcm_qla2xxx specific lport data
4395 */
4396int qlt_lport_register(struct qla_tgt_func_tmpl *qla_tgt_ops, u64 wwpn,
4397 int (*callback)(struct scsi_qla_host *), void *target_lport_ptr)
4398{
4399 struct qla_tgt *tgt;
4400 struct scsi_qla_host *vha;
4401 struct qla_hw_data *ha;
4402 struct Scsi_Host *host;
4403 unsigned long flags;
4404 int rc;
4405 u8 b[WWN_SIZE];
4406
4407 mutex_lock(&qla_tgt_mutex);
4408 list_for_each_entry(tgt, &qla_tgt_glist, tgt_list_entry) {
4409 vha = tgt->vha;
4410 ha = vha->hw;
4411
4412 host = vha->host;
4413 if (!host)
4414 continue;
4415
4416 if (ha->tgt.tgt_ops != NULL)
4417 continue;
4418
4419 if (!(host->hostt->supported_mode & MODE_TARGET))
4420 continue;
4421
4422 spin_lock_irqsave(&ha->hardware_lock, flags);
4423 if (host->active_mode & MODE_TARGET) {
4424 pr_debug("MODE_TARGET already active on qla2xxx(%d)\n",
4425 host->host_no);
4426 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4427 continue;
4428 }
4429 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4430
4431 if (!scsi_host_get(host)) {
4432 ql_dbg(ql_dbg_tgt, vha, 0xe068,
4433 "Unable to scsi_host_get() for"
4434 " qla2xxx scsi_host\n");
4435 continue;
4436 }
4437 qlt_lport_dump(vha, wwpn, b);
4438
4439 if (memcmp(vha->port_name, b, WWN_SIZE)) {
4440 scsi_host_put(host);
4441 continue;
4442 }
4443 /*
4444 * Setup passed parameters ahead of invoking callback
4445 */
4446 ha->tgt.tgt_ops = qla_tgt_ops;
4447 ha->tgt.target_lport_ptr = target_lport_ptr;
4448 rc = (*callback)(vha);
4449 if (rc != 0) {
4450 ha->tgt.tgt_ops = NULL;
4451 ha->tgt.target_lport_ptr = NULL;
4452 }
4453 mutex_unlock(&qla_tgt_mutex);
4454 return rc;
4455 }
4456 mutex_unlock(&qla_tgt_mutex);
4457
4458 return -ENODEV;
4459}
4460EXPORT_SYMBOL(qlt_lport_register);
4461
4462/**
4463 * qla_tgt_lport_deregister - Degister lport
4464 *
4465 * @vha: Registered scsi_qla_host pointer
4466 */
4467void qlt_lport_deregister(struct scsi_qla_host *vha)
4468{
4469 struct qla_hw_data *ha = vha->hw;
4470 struct Scsi_Host *sh = vha->host;
4471 /*
4472 * Clear the target_lport_ptr qla_target_template pointer in qla_hw_data
4473 */
4474 ha->tgt.target_lport_ptr = NULL;
4475 ha->tgt.tgt_ops = NULL;
4476 /*
4477 * Release the Scsi_Host reference for the underlying qla2xxx host
4478 */
4479 scsi_host_put(sh);
4480}
4481EXPORT_SYMBOL(qlt_lport_deregister);
4482
4483/* Must be called under HW lock */
4484void qlt_set_mode(struct scsi_qla_host *vha)
4485{
4486 struct qla_hw_data *ha = vha->hw;
4487
4488 switch (ql2x_ini_mode) {
4489 case QLA2XXX_INI_MODE_DISABLED:
4490 case QLA2XXX_INI_MODE_EXCLUSIVE:
4491 vha->host->active_mode = MODE_TARGET;
4492 break;
4493 case QLA2XXX_INI_MODE_ENABLED:
4494 vha->host->active_mode |= MODE_TARGET;
4495 break;
4496 default:
4497 break;
4498 }
4499
4500 if (ha->tgt.ini_mode_force_reverse)
4501 qla_reverse_ini_mode(vha);
4502}
4503
4504/* Must be called under HW lock */
4505void qlt_clear_mode(struct scsi_qla_host *vha)
4506{
4507 struct qla_hw_data *ha = vha->hw;
4508
4509 switch (ql2x_ini_mode) {
4510 case QLA2XXX_INI_MODE_DISABLED:
4511 vha->host->active_mode = MODE_UNKNOWN;
4512 break;
4513 case QLA2XXX_INI_MODE_EXCLUSIVE:
4514 vha->host->active_mode = MODE_INITIATOR;
4515 break;
4516 case QLA2XXX_INI_MODE_ENABLED:
4517 vha->host->active_mode &= ~MODE_TARGET;
4518 break;
4519 default:
4520 break;
4521 }
4522
4523 if (ha->tgt.ini_mode_force_reverse)
4524 qla_reverse_ini_mode(vha);
4525}
4526
4527/*
4528 * qla_tgt_enable_vha - NO LOCK HELD
4529 *
4530 * host_reset, bring up w/ Target Mode Enabled
4531 */
4532void
4533qlt_enable_vha(struct scsi_qla_host *vha)
4534{
4535 struct qla_hw_data *ha = vha->hw;
4536 struct qla_tgt *tgt = ha->tgt.qla_tgt;
4537 unsigned long flags;
4538
4539 if (!tgt) {
4540 ql_dbg(ql_dbg_tgt, vha, 0xe069,
4541 "Unable to locate qla_tgt pointer from"
4542 " struct qla_hw_data\n");
4543 dump_stack();
4544 return;
4545 }
4546
4547 spin_lock_irqsave(&ha->hardware_lock, flags);
4548 tgt->tgt_stopped = 0;
4549 qlt_set_mode(vha);
4550 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4551
4552 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4553 qla2xxx_wake_dpc(vha);
4554 qla2x00_wait_for_hba_online(vha);
4555}
4556EXPORT_SYMBOL(qlt_enable_vha);
4557
4558/*
4559 * qla_tgt_disable_vha - NO LOCK HELD
4560 *
4561 * Disable Target Mode and reset the adapter
4562 */
4563void
4564qlt_disable_vha(struct scsi_qla_host *vha)
4565{
4566 struct qla_hw_data *ha = vha->hw;
4567 struct qla_tgt *tgt = ha->tgt.qla_tgt;
4568 unsigned long flags;
4569
4570 if (!tgt) {
4571 ql_dbg(ql_dbg_tgt, vha, 0xe06a,
4572 "Unable to locate qla_tgt pointer from"
4573 " struct qla_hw_data\n");
4574 dump_stack();
4575 return;
4576 }
4577
4578 spin_lock_irqsave(&ha->hardware_lock, flags);
4579 qlt_clear_mode(vha);
4580 spin_unlock_irqrestore(&ha->hardware_lock, flags);
4581
4582 set_bit(ISP_ABORT_NEEDED, &vha->dpc_flags);
4583 qla2xxx_wake_dpc(vha);
4584 qla2x00_wait_for_hba_online(vha);
4585}
4586
4587/*
4588 * Called from qla_init.c:qla24xx_vport_create() contex to setup
4589 * the target mode specific struct scsi_qla_host and struct qla_hw_data
4590 * members.
4591 */
4592void
4593qlt_vport_create(struct scsi_qla_host *vha, struct qla_hw_data *ha)
4594{
4595 if (!qla_tgt_mode_enabled(vha))
4596 return;
4597
4598 mutex_init(&ha->tgt.tgt_mutex);
4599 mutex_init(&ha->tgt.tgt_host_action_mutex);
4600
4601 qlt_clear_mode(vha);
4602
4603 /*
4604 * NOTE: Currently the value is kept the same for <24xx and
4605 * >=24xx ISPs. If it is necessary to change it,
4606 * the check should be added for specific ISPs,
4607 * assigning the value appropriately.
4608 */
4609 ha->tgt.atio_q_length = ATIO_ENTRY_CNT_24XX;
4610}
4611
4612void
4613qlt_rff_id(struct scsi_qla_host *vha, struct ct_sns_req *ct_req)
4614{
4615 /*
4616 * FC-4 Feature bit 0 indicates target functionality to the name server.
4617 */
4618 if (qla_tgt_mode_enabled(vha)) {
4619 if (qla_ini_mode_enabled(vha))
4620 ct_req->req.rff_id.fc4_feature = BIT_0 | BIT_1;
4621 else
4622 ct_req->req.rff_id.fc4_feature = BIT_0;
4623 } else if (qla_ini_mode_enabled(vha)) {
4624 ct_req->req.rff_id.fc4_feature = BIT_1;
4625 }
4626}
4627
4628/*
4629 * qlt_init_atio_q_entries() - Initializes ATIO queue entries.
4630 * @ha: HA context
4631 *
4632 * Beginning of ATIO ring has initialization control block already built
4633 * by nvram config routine.
4634 *
4635 * Returns 0 on success.
4636 */
4637void
4638qlt_init_atio_q_entries(struct scsi_qla_host *vha)
4639{
4640 struct qla_hw_data *ha = vha->hw;
4641 uint16_t cnt;
4642 struct atio_from_isp *pkt = (struct atio_from_isp *)ha->tgt.atio_ring;
4643
4644 if (!qla_tgt_mode_enabled(vha))
4645 return;
4646
4647 for (cnt = 0; cnt < ha->tgt.atio_q_length; cnt++) {
4648 pkt->u.raw.signature = ATIO_PROCESSED;
4649 pkt++;
4650 }
4651
4652}
4653
4654/*
4655 * qlt_24xx_process_atio_queue() - Process ATIO queue entries.
4656 * @ha: SCSI driver HA context
4657 */
4658void
4659qlt_24xx_process_atio_queue(struct scsi_qla_host *vha)
4660{
4661 struct qla_hw_data *ha = vha->hw;
4662 struct device_reg_24xx __iomem *reg = &ha->iobase->isp24;
4663 struct atio_from_isp *pkt;
4664 int cnt, i;
4665
4666 if (!vha->flags.online)
4667 return;
4668
4669 while (ha->tgt.atio_ring_ptr->signature != ATIO_PROCESSED) {
4670 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
4671 cnt = pkt->u.raw.entry_count;
4672
4673 qlt_24xx_atio_pkt_all_vps(vha, (struct atio_from_isp *)pkt);
4674
4675 for (i = 0; i < cnt; i++) {
4676 ha->tgt.atio_ring_index++;
4677 if (ha->tgt.atio_ring_index == ha->tgt.atio_q_length) {
4678 ha->tgt.atio_ring_index = 0;
4679 ha->tgt.atio_ring_ptr = ha->tgt.atio_ring;
4680 } else
4681 ha->tgt.atio_ring_ptr++;
4682
4683 pkt->u.raw.signature = ATIO_PROCESSED;
4684 pkt = (struct atio_from_isp *)ha->tgt.atio_ring_ptr;
4685 }
4686 wmb();
4687 }
4688
4689 /* Adjust ring index */
4690 WRT_REG_DWORD(&reg->atio_q_out, ha->tgt.atio_ring_index);
4691}
4692
4693void
4694qlt_24xx_config_rings(struct scsi_qla_host *vha, device_reg_t __iomem *reg)
4695{
4696 struct qla_hw_data *ha = vha->hw;
4697
4698/* FIXME: atio_q in/out for ha->mqenable=1..? */
4699 if (ha->mqenable) {
4700#if 0
4701 WRT_REG_DWORD(&reg->isp25mq.atio_q_in, 0);
4702 WRT_REG_DWORD(&reg->isp25mq.atio_q_out, 0);
4703 RD_REG_DWORD(&reg->isp25mq.atio_q_out);
4704#endif
4705 } else {
4706 /* Setup APTIO registers for target mode */
4707 WRT_REG_DWORD(&reg->isp24.atio_q_in, 0);
4708 WRT_REG_DWORD(&reg->isp24.atio_q_out, 0);
4709 RD_REG_DWORD(&reg->isp24.atio_q_out);
4710 }
4711}
4712
4713void
4714qlt_24xx_config_nvram_stage1(struct scsi_qla_host *vha, struct nvram_24xx *nv)
4715{
4716 struct qla_hw_data *ha = vha->hw;
4717
4718 if (qla_tgt_mode_enabled(vha)) {
4719 if (!ha->tgt.saved_set) {
4720 /* We save only once */
4721 ha->tgt.saved_exchange_count = nv->exchange_count;
4722 ha->tgt.saved_firmware_options_1 =
4723 nv->firmware_options_1;
4724 ha->tgt.saved_firmware_options_2 =
4725 nv->firmware_options_2;
4726 ha->tgt.saved_firmware_options_3 =
4727 nv->firmware_options_3;
4728 ha->tgt.saved_set = 1;
4729 }
4730
4731 nv->exchange_count = __constant_cpu_to_le16(0xFFFF);
4732
4733 /* Enable target mode */
4734 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_4);
4735
4736 /* Disable ini mode, if requested */
4737 if (!qla_ini_mode_enabled(vha))
4738 nv->firmware_options_1 |= __constant_cpu_to_le32(BIT_5);
4739
4740 /* Disable Full Login after LIP */
4741 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_13);
4742 /* Enable initial LIP */
4743 nv->firmware_options_1 &= __constant_cpu_to_le32(~BIT_9);
4744 /* Enable FC tapes support */
4745 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_12);
4746 /* Disable Full Login after LIP */
4747 nv->host_p &= __constant_cpu_to_le32(~BIT_10);
4748 /* Enable target PRLI control */
4749 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_14);
4750 } else {
4751 if (ha->tgt.saved_set) {
4752 nv->exchange_count = ha->tgt.saved_exchange_count;
4753 nv->firmware_options_1 =
4754 ha->tgt.saved_firmware_options_1;
4755 nv->firmware_options_2 =
4756 ha->tgt.saved_firmware_options_2;
4757 nv->firmware_options_3 =
4758 ha->tgt.saved_firmware_options_3;
4759 }
4760 return;
4761 }
4762
4763 /* out-of-order frames reassembly */
4764 nv->firmware_options_3 |= BIT_6|BIT_9;
4765
4766 if (ha->tgt.enable_class_2) {
4767 if (vha->flags.init_done)
4768 fc_host_supported_classes(vha->host) =
4769 FC_COS_CLASS2 | FC_COS_CLASS3;
4770
4771 nv->firmware_options_2 |= __constant_cpu_to_le32(BIT_8);
4772 } else {
4773 if (vha->flags.init_done)
4774 fc_host_supported_classes(vha->host) = FC_COS_CLASS3;
4775
4776 nv->firmware_options_2 &= ~__constant_cpu_to_le32(BIT_8);
4777 }
4778}
4779
4780void
4781qlt_24xx_config_nvram_stage2(struct scsi_qla_host *vha,
4782 struct init_cb_24xx *icb)
4783{
4784 struct qla_hw_data *ha = vha->hw;
4785
4786 if (ha->tgt.node_name_set) {
4787 memcpy(icb->node_name, ha->tgt.tgt_node_name, WWN_SIZE);
4788 icb->firmware_options_1 |= __constant_cpu_to_le32(BIT_14);
4789 }
4790}
4791
4792int
4793qlt_24xx_process_response_error(struct scsi_qla_host *vha,
4794 struct sts_entry_24xx *pkt)
4795{
4796 switch (pkt->entry_type) {
4797 case ABTS_RECV_24XX:
4798 case ABTS_RESP_24XX:
4799 case CTIO_TYPE7:
4800 case NOTIFY_ACK_TYPE:
4801 return 1;
4802 default:
4803 return 0;
4804 }
4805}
4806
4807void
4808qlt_modify_vp_config(struct scsi_qla_host *vha,
4809 struct vp_config_entry_24xx *vpmod)
4810{
4811 if (qla_tgt_mode_enabled(vha))
4812 vpmod->options_idx1 &= ~BIT_5;
4813 /* Disable ini mode, if requested */
4814 if (!qla_ini_mode_enabled(vha))
4815 vpmod->options_idx1 &= ~BIT_4;
4816}
4817
4818void
4819qlt_probe_one_stage1(struct scsi_qla_host *base_vha, struct qla_hw_data *ha)
4820{
4821 if (!QLA_TGT_MODE_ENABLED())
4822 return;
4823
4824 mutex_init(&ha->tgt.tgt_mutex);
4825 mutex_init(&ha->tgt.tgt_host_action_mutex);
4826 qlt_clear_mode(base_vha);
4827}
4828
4829int
4830qlt_mem_alloc(struct qla_hw_data *ha)
4831{
4832 if (!QLA_TGT_MODE_ENABLED())
4833 return 0;
4834
4835 ha->tgt.tgt_vp_map = kzalloc(sizeof(struct qla_tgt_vp_map) *
4836 MAX_MULTI_ID_FABRIC, GFP_KERNEL);
4837 if (!ha->tgt.tgt_vp_map)
4838 return -ENOMEM;
4839
4840 ha->tgt.atio_ring = dma_alloc_coherent(&ha->pdev->dev,
4841 (ha->tgt.atio_q_length + 1) * sizeof(struct atio_from_isp),
4842 &ha->tgt.atio_dma, GFP_KERNEL);
4843 if (!ha->tgt.atio_ring) {
4844 kfree(ha->tgt.tgt_vp_map);
4845 return -ENOMEM;
4846 }
4847 return 0;
4848}
4849
4850void
4851qlt_mem_free(struct qla_hw_data *ha)
4852{
4853 if (!QLA_TGT_MODE_ENABLED())
4854 return;
4855
4856 if (ha->tgt.atio_ring) {
4857 dma_free_coherent(&ha->pdev->dev, (ha->tgt.atio_q_length + 1) *
4858 sizeof(struct atio_from_isp), ha->tgt.atio_ring,
4859 ha->tgt.atio_dma);
4860 }
4861 kfree(ha->tgt.tgt_vp_map);
4862}
4863
4864/* vport_slock to be held by the caller */
4865void
4866qlt_update_vp_map(struct scsi_qla_host *vha, int cmd)
4867{
4868 if (!QLA_TGT_MODE_ENABLED())
4869 return;
4870
4871 switch (cmd) {
4872 case SET_VP_IDX:
4873 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = vha;
4874 break;
4875 case SET_AL_PA:
4876 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = vha->vp_idx;
4877 break;
4878 case RESET_VP_IDX:
4879 vha->hw->tgt.tgt_vp_map[vha->vp_idx].vha = NULL;
4880 break;
4881 case RESET_AL_PA:
4882 vha->hw->tgt.tgt_vp_map[vha->d_id.b.al_pa].idx = 0;
4883 break;
4884 }
4885}
4886
4887static int __init qlt_parse_ini_mode(void)
4888{
4889 if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_EXCLUSIVE) == 0)
4890 ql2x_ini_mode = QLA2XXX_INI_MODE_EXCLUSIVE;
4891 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_DISABLED) == 0)
4892 ql2x_ini_mode = QLA2XXX_INI_MODE_DISABLED;
4893 else if (strcasecmp(qlini_mode, QLA2XXX_INI_MODE_STR_ENABLED) == 0)
4894 ql2x_ini_mode = QLA2XXX_INI_MODE_ENABLED;
4895 else
4896 return false;
4897
4898 return true;
4899}
4900
4901int __init qlt_init(void)
4902{
4903 int ret;
4904
4905 if (!qlt_parse_ini_mode()) {
4906 ql_log(ql_log_fatal, NULL, 0xe06b,
4907 "qlt_parse_ini_mode() failed\n");
4908 return -EINVAL;
4909 }
4910
4911 if (!QLA_TGT_MODE_ENABLED())
4912 return 0;
4913
4914 qla_tgt_cmd_cachep = kmem_cache_create("qla_tgt_cmd_cachep",
4915 sizeof(struct qla_tgt_cmd), __alignof__(struct qla_tgt_cmd), 0,
4916 NULL);
4917 if (!qla_tgt_cmd_cachep) {
4918 ql_log(ql_log_fatal, NULL, 0xe06c,
4919 "kmem_cache_create for qla_tgt_cmd_cachep failed\n");
4920 return -ENOMEM;
4921 }
4922
4923 qla_tgt_mgmt_cmd_cachep = kmem_cache_create("qla_tgt_mgmt_cmd_cachep",
4924 sizeof(struct qla_tgt_mgmt_cmd), __alignof__(struct
4925 qla_tgt_mgmt_cmd), 0, NULL);
4926 if (!qla_tgt_mgmt_cmd_cachep) {
4927 ql_log(ql_log_fatal, NULL, 0xe06d,
4928 "kmem_cache_create for qla_tgt_mgmt_cmd_cachep failed\n");
4929 ret = -ENOMEM;
4930 goto out;
4931 }
4932
4933 qla_tgt_mgmt_cmd_mempool = mempool_create(25, mempool_alloc_slab,
4934 mempool_free_slab, qla_tgt_mgmt_cmd_cachep);
4935 if (!qla_tgt_mgmt_cmd_mempool) {
4936 ql_log(ql_log_fatal, NULL, 0xe06e,
4937 "mempool_create for qla_tgt_mgmt_cmd_mempool failed\n");
4938 ret = -ENOMEM;
4939 goto out_mgmt_cmd_cachep;
4940 }
4941
4942 qla_tgt_wq = alloc_workqueue("qla_tgt_wq", 0, 0);
4943 if (!qla_tgt_wq) {
4944 ql_log(ql_log_fatal, NULL, 0xe06f,
4945 "alloc_workqueue for qla_tgt_wq failed\n");
4946 ret = -ENOMEM;
4947 goto out_cmd_mempool;
4948 }
4949 /*
4950 * Return 1 to signal that initiator-mode is being disabled
4951 */
4952 return (ql2x_ini_mode == QLA2XXX_INI_MODE_DISABLED) ? 1 : 0;
4953
4954out_cmd_mempool:
4955 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
4956out_mgmt_cmd_cachep:
4957 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
4958out:
4959 kmem_cache_destroy(qla_tgt_cmd_cachep);
4960 return ret;
4961}
4962
4963void qlt_exit(void)
4964{
4965 if (!QLA_TGT_MODE_ENABLED())
4966 return;
4967
4968 destroy_workqueue(qla_tgt_wq);
4969 mempool_destroy(qla_tgt_mgmt_cmd_mempool);
4970 kmem_cache_destroy(qla_tgt_mgmt_cmd_cachep);
4971 kmem_cache_destroy(qla_tgt_cmd_cachep);
4972}
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
new file mode 100644
index 000000000000..9f9ef1644fd9
--- /dev/null
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -0,0 +1,1004 @@
1/*
2 * Copyright (C) 2004 - 2010 Vladislav Bolkhovitin <vst@vlnb.net>
3 * Copyright (C) 2004 - 2005 Leonid Stoljar
4 * Copyright (C) 2006 Nathaniel Clark <nate@misrule.us>
5 * Copyright (C) 2007 - 2010 ID7 Ltd.
6 *
7 * Forward port and refactoring to modern qla2xxx and target/configfs
8 *
9 * Copyright (C) 2010-2011 Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * Additional file for the target driver support.
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version 2
16 * of the License, or (at your option) any later version.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 */
23/*
24 * This is the global def file that is useful for including from the
25 * target portion.
26 */
27
28#ifndef __QLA_TARGET_H
29#define __QLA_TARGET_H
30
31#include "qla_def.h"
32
33/*
34 * Must be changed on any change in any initiator visible interfaces or
35 * data in the target add-on
36 */
37#define QLA2XXX_TARGET_MAGIC 269
38
39/*
40 * Must be changed on any change in any target visible interfaces or
41 * data in the initiator
42 */
43#define QLA2XXX_INITIATOR_MAGIC 57222
44
45#define QLA2XXX_INI_MODE_STR_EXCLUSIVE "exclusive"
46#define QLA2XXX_INI_MODE_STR_DISABLED "disabled"
47#define QLA2XXX_INI_MODE_STR_ENABLED "enabled"
48
49#define QLA2XXX_INI_MODE_EXCLUSIVE 0
50#define QLA2XXX_INI_MODE_DISABLED 1
51#define QLA2XXX_INI_MODE_ENABLED 2
52
53#define QLA2XXX_COMMAND_COUNT_INIT 250
54#define QLA2XXX_IMMED_NOTIFY_COUNT_INIT 250
55
56/*
57 * Used to mark which completion handles (for RIO Status's) are for CTIO's
58 * vs. regular (non-target) info. This is checked for in
59 * qla2x00_process_response_queue() to see if a handle coming back in a
60 * multi-complete should come to the tgt driver or be handled there by qla2xxx
61 */
62#define CTIO_COMPLETION_HANDLE_MARK BIT_29
63#if (CTIO_COMPLETION_HANDLE_MARK <= MAX_OUTSTANDING_COMMANDS)
64#error "CTIO_COMPLETION_HANDLE_MARK not larger than MAX_OUTSTANDING_COMMANDS"
65#endif
66#define HANDLE_IS_CTIO_COMP(h) (h & CTIO_COMPLETION_HANDLE_MARK)
67
68/* Used to mark CTIO as intermediate */
69#define CTIO_INTERMEDIATE_HANDLE_MARK BIT_30
70
71#ifndef OF_SS_MODE_0
72/*
73 * ISP target entries - Flags bit definitions.
74 */
75#define OF_SS_MODE_0 0
76#define OF_SS_MODE_1 1
77#define OF_SS_MODE_2 2
78#define OF_SS_MODE_3 3
79
80#define OF_EXPL_CONF BIT_5 /* Explicit Confirmation Requested */
81#define OF_DATA_IN BIT_6 /* Data in to initiator */
82 /* (data from target to initiator) */
83#define OF_DATA_OUT BIT_7 /* Data out from initiator */
84 /* (data from initiator to target) */
85#define OF_NO_DATA (BIT_7 | BIT_6)
86#define OF_INC_RC BIT_8 /* Increment command resource count */
87#define OF_FAST_POST BIT_9 /* Enable mailbox fast posting. */
88#define OF_CONF_REQ BIT_13 /* Confirmation Requested */
89#define OF_TERM_EXCH BIT_14 /* Terminate exchange */
90#define OF_SSTS BIT_15 /* Send SCSI status */
91#endif
92
93#ifndef QLA_TGT_DATASEGS_PER_CMD32
94#define QLA_TGT_DATASEGS_PER_CMD32 3
95#define QLA_TGT_DATASEGS_PER_CONT32 7
96#define QLA_TGT_MAX_SG32(ql) \
97 (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD32 + \
98 QLA_TGT_DATASEGS_PER_CONT32*((ql) - 1)) : 0)
99
100#define QLA_TGT_DATASEGS_PER_CMD64 2
101#define QLA_TGT_DATASEGS_PER_CONT64 5
102#define QLA_TGT_MAX_SG64(ql) \
103 (((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD64 + \
104 QLA_TGT_DATASEGS_PER_CONT64*((ql) - 1)) : 0)
105#endif
106
107#ifndef QLA_TGT_DATASEGS_PER_CMD_24XX
108#define QLA_TGT_DATASEGS_PER_CMD_24XX 1
109#define QLA_TGT_DATASEGS_PER_CONT_24XX 5
110#define QLA_TGT_MAX_SG_24XX(ql) \
111 (min(1270, ((ql) > 0) ? (QLA_TGT_DATASEGS_PER_CMD_24XX + \
112 QLA_TGT_DATASEGS_PER_CONT_24XX*((ql) - 1)) : 0))
113#endif
114#endif
115
116#define GET_TARGET_ID(ha, iocb) ((HAS_EXTENDED_IDS(ha)) \
117 ? le16_to_cpu((iocb)->u.isp2x.target.extended) \
118 : (uint16_t)(iocb)->u.isp2x.target.id.standard)
119
120#ifndef IMMED_NOTIFY_TYPE
121#define IMMED_NOTIFY_TYPE 0x0D /* Immediate notify entry. */
122/*
123 * ISP queue - immediate notify entry structure definition.
124 * This is sent by the ISP to the Target driver.
125 * This IOCB would have report of events sent by the
126 * initiator, that needs to be handled by the target
127 * driver immediately.
128 */
129struct imm_ntfy_from_isp {
130 uint8_t entry_type; /* Entry type. */
131 uint8_t entry_count; /* Entry count. */
132 uint8_t sys_define; /* System defined. */
133 uint8_t entry_status; /* Entry Status. */
134 union {
135 struct {
136 uint32_t sys_define_2; /* System defined. */
137 target_id_t target;
138 uint16_t lun;
139 uint8_t target_id;
140 uint8_t reserved_1;
141 uint16_t status_modifier;
142 uint16_t status;
143 uint16_t task_flags;
144 uint16_t seq_id;
145 uint16_t srr_rx_id;
146 uint32_t srr_rel_offs;
147 uint16_t srr_ui;
148#define SRR_IU_DATA_IN 0x1
149#define SRR_IU_DATA_OUT 0x5
150#define SRR_IU_STATUS 0x7
151 uint16_t srr_ox_id;
152 uint8_t reserved_2[28];
153 } isp2x;
154 struct {
155 uint32_t reserved;
156 uint16_t nport_handle;
157 uint16_t reserved_2;
158 uint16_t flags;
159#define NOTIFY24XX_FLAGS_GLOBAL_TPRLO BIT_1
160#define NOTIFY24XX_FLAGS_PUREX_IOCB BIT_0
161 uint16_t srr_rx_id;
162 uint16_t status;
163 uint8_t status_subcode;
164 uint8_t reserved_3;
165 uint32_t exchange_address;
166 uint32_t srr_rel_offs;
167 uint16_t srr_ui;
168 uint16_t srr_ox_id;
169 uint8_t reserved_4[19];
170 uint8_t vp_index;
171 uint32_t reserved_5;
172 uint8_t port_id[3];
173 uint8_t reserved_6;
174 } isp24;
175 } u;
176 uint16_t reserved_7;
177 uint16_t ox_id;
178} __packed;
179#endif
180
181#ifndef NOTIFY_ACK_TYPE
182#define NOTIFY_ACK_TYPE 0x0E /* Notify acknowledge entry. */
183/*
184 * ISP queue - notify acknowledge entry structure definition.
185 * This is sent to the ISP from the target driver.
186 */
187struct nack_to_isp {
188 uint8_t entry_type; /* Entry type. */
189 uint8_t entry_count; /* Entry count. */
190 uint8_t sys_define; /* System defined. */
191 uint8_t entry_status; /* Entry Status. */
192 union {
193 struct {
194 uint32_t sys_define_2; /* System defined. */
195 target_id_t target;
196 uint8_t target_id;
197 uint8_t reserved_1;
198 uint16_t flags;
199 uint16_t resp_code;
200 uint16_t status;
201 uint16_t task_flags;
202 uint16_t seq_id;
203 uint16_t srr_rx_id;
204 uint32_t srr_rel_offs;
205 uint16_t srr_ui;
206 uint16_t srr_flags;
207 uint16_t srr_reject_code;
208 uint8_t srr_reject_vendor_uniq;
209 uint8_t srr_reject_code_expl;
210 uint8_t reserved_2[24];
211 } isp2x;
212 struct {
213 uint32_t handle;
214 uint16_t nport_handle;
215 uint16_t reserved_1;
216 uint16_t flags;
217 uint16_t srr_rx_id;
218 uint16_t status;
219 uint8_t status_subcode;
220 uint8_t reserved_3;
221 uint32_t exchange_address;
222 uint32_t srr_rel_offs;
223 uint16_t srr_ui;
224 uint16_t srr_flags;
225 uint8_t reserved_4[19];
226 uint8_t vp_index;
227 uint8_t srr_reject_vendor_uniq;
228 uint8_t srr_reject_code_expl;
229 uint8_t srr_reject_code;
230 uint8_t reserved_5[5];
231 } isp24;
232 } u;
233 uint8_t reserved[2];
234 uint16_t ox_id;
235} __packed;
236#define NOTIFY_ACK_SRR_FLAGS_ACCEPT 0
237#define NOTIFY_ACK_SRR_FLAGS_REJECT 1
238
239#define NOTIFY_ACK_SRR_REJECT_REASON_UNABLE_TO_PERFORM 0x9
240
241#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_NO_EXPL 0
242#define NOTIFY_ACK_SRR_FLAGS_REJECT_EXPL_UNABLE_TO_SUPPLY_DATA 0x2a
243
244#define NOTIFY_ACK_SUCCESS 0x01
245#endif
246
247#ifndef ACCEPT_TGT_IO_TYPE
248#define ACCEPT_TGT_IO_TYPE 0x16 /* Accept target I/O entry. */
249#endif
250
251#ifndef CONTINUE_TGT_IO_TYPE
252#define CONTINUE_TGT_IO_TYPE 0x17
253/*
254 * ISP queue - Continue Target I/O (CTIO) entry for status mode 0 structure.
255 * This structure is sent to the ISP 2xxx from target driver.
256 */
257struct ctio_to_2xxx {
258 uint8_t entry_type; /* Entry type. */
259 uint8_t entry_count; /* Entry count. */
260 uint8_t sys_define; /* System defined. */
261 uint8_t entry_status; /* Entry Status. */
262 uint32_t handle; /* System defined handle */
263 target_id_t target;
264 uint16_t rx_id;
265 uint16_t flags;
266 uint16_t status;
267 uint16_t timeout; /* 0 = 30 seconds, 0xFFFF = disable */
268 uint16_t dseg_count; /* Data segment count. */
269 uint32_t relative_offset;
270 uint32_t residual;
271 uint16_t reserved_1[3];
272 uint16_t scsi_status;
273 uint32_t transfer_length;
274 uint32_t dseg_0_address; /* Data segment 0 address. */
275 uint32_t dseg_0_length; /* Data segment 0 length. */
276 uint32_t dseg_1_address; /* Data segment 1 address. */
277 uint32_t dseg_1_length; /* Data segment 1 length. */
278 uint32_t dseg_2_address; /* Data segment 2 address. */
279 uint32_t dseg_2_length; /* Data segment 2 length. */
280} __packed;
281#define ATIO_PATH_INVALID 0x07
282#define ATIO_CANT_PROV_CAP 0x16
283#define ATIO_CDB_VALID 0x3D
284
285#define ATIO_EXEC_READ BIT_1
286#define ATIO_EXEC_WRITE BIT_0
287#endif
288
289#ifndef CTIO_A64_TYPE
290#define CTIO_A64_TYPE 0x1F
291#define CTIO_SUCCESS 0x01
292#define CTIO_ABORTED 0x02
293#define CTIO_INVALID_RX_ID 0x08
294#define CTIO_TIMEOUT 0x0B
295#define CTIO_LIP_RESET 0x0E
296#define CTIO_TARGET_RESET 0x17
297#define CTIO_PORT_UNAVAILABLE 0x28
298#define CTIO_PORT_LOGGED_OUT 0x29
299#define CTIO_PORT_CONF_CHANGED 0x2A
300#define CTIO_SRR_RECEIVED 0x45
301#endif
302
303#ifndef CTIO_RET_TYPE
304#define CTIO_RET_TYPE 0x17 /* CTIO return entry */
305#define ATIO_TYPE7 0x06 /* Accept target I/O entry for 24xx */
306
307struct fcp_hdr {
308 uint8_t r_ctl;
309 uint8_t d_id[3];
310 uint8_t cs_ctl;
311 uint8_t s_id[3];
312 uint8_t type;
313 uint8_t f_ctl[3];
314 uint8_t seq_id;
315 uint8_t df_ctl;
316 uint16_t seq_cnt;
317 uint16_t ox_id;
318 uint16_t rx_id;
319 uint32_t parameter;
320} __packed;
321
322struct fcp_hdr_le {
323 uint8_t d_id[3];
324 uint8_t r_ctl;
325 uint8_t s_id[3];
326 uint8_t cs_ctl;
327 uint8_t f_ctl[3];
328 uint8_t type;
329 uint16_t seq_cnt;
330 uint8_t df_ctl;
331 uint8_t seq_id;
332 uint16_t rx_id;
333 uint16_t ox_id;
334 uint32_t parameter;
335} __packed;
336
337#define F_CTL_EXCH_CONTEXT_RESP BIT_23
338#define F_CTL_SEQ_CONTEXT_RESIP BIT_22
339#define F_CTL_LAST_SEQ BIT_20
340#define F_CTL_END_SEQ BIT_19
341#define F_CTL_SEQ_INITIATIVE BIT_16
342
343#define R_CTL_BASIC_LINK_SERV 0x80
344#define R_CTL_B_ACC 0x4
345#define R_CTL_B_RJT 0x5
346
347struct atio7_fcp_cmnd {
348 uint64_t lun;
349 uint8_t cmnd_ref;
350 uint8_t task_attr:3;
351 uint8_t reserved:5;
352 uint8_t task_mgmt_flags;
353#define FCP_CMND_TASK_MGMT_CLEAR_ACA 6
354#define FCP_CMND_TASK_MGMT_TARGET_RESET 5
355#define FCP_CMND_TASK_MGMT_LU_RESET 4
356#define FCP_CMND_TASK_MGMT_CLEAR_TASK_SET 2
357#define FCP_CMND_TASK_MGMT_ABORT_TASK_SET 1
358 uint8_t wrdata:1;
359 uint8_t rddata:1;
360 uint8_t add_cdb_len:6;
361 uint8_t cdb[16];
362 /*
363 * add_cdb is optional and can absent from struct atio7_fcp_cmnd. Size 4
364 * only to make sizeof(struct atio7_fcp_cmnd) be as expected by
365 * BUILD_BUG_ON in qlt_init().
366 */
367 uint8_t add_cdb[4];
368 /* uint32_t data_length; */
369} __packed;
370
371/*
372 * ISP queue - Accept Target I/O (ATIO) type entry IOCB structure.
373 * This is sent from the ISP to the target driver.
374 */
375struct atio_from_isp {
376 union {
377 struct {
378 uint16_t entry_hdr;
379 uint8_t sys_define; /* System defined. */
380 uint8_t entry_status; /* Entry Status. */
381 uint32_t sys_define_2; /* System defined. */
382 target_id_t target;
383 uint16_t rx_id;
384 uint16_t flags;
385 uint16_t status;
386 uint8_t command_ref;
387 uint8_t task_codes;
388 uint8_t task_flags;
389 uint8_t execution_codes;
390 uint8_t cdb[MAX_CMDSZ];
391 uint32_t data_length;
392 uint16_t lun;
393 uint8_t initiator_port_name[WWN_SIZE]; /* on qla23xx */
394 uint16_t reserved_32[6];
395 uint16_t ox_id;
396 } isp2x;
397 struct {
398 uint16_t entry_hdr;
399 uint8_t fcp_cmnd_len_low;
400 uint8_t fcp_cmnd_len_high:4;
401 uint8_t attr:4;
402 uint32_t exchange_addr;
403#define ATIO_EXCHANGE_ADDRESS_UNKNOWN 0xFFFFFFFF
404 struct fcp_hdr fcp_hdr;
405 struct atio7_fcp_cmnd fcp_cmnd;
406 } isp24;
407 struct {
408 uint8_t entry_type; /* Entry type. */
409 uint8_t entry_count; /* Entry count. */
410 uint8_t data[58];
411 uint32_t signature;
412#define ATIO_PROCESSED 0xDEADDEAD /* Signature */
413 } raw;
414 } u;
415} __packed;
416
417#define CTIO_TYPE7 0x12 /* Continue target I/O entry (for 24xx) */
418
419/*
420 * ISP queue - Continue Target I/O (ATIO) type 7 entry (for 24xx) structure.
421 * This structure is sent to the ISP 24xx from the target driver.
422 */
423
424struct ctio7_to_24xx {
425 uint8_t entry_type; /* Entry type. */
426 uint8_t entry_count; /* Entry count. */
427 uint8_t sys_define; /* System defined. */
428 uint8_t entry_status; /* Entry Status. */
429 uint32_t handle; /* System defined handle */
430 uint16_t nport_handle;
431#define CTIO7_NHANDLE_UNRECOGNIZED 0xFFFF
432 uint16_t timeout;
433 uint16_t dseg_count; /* Data segment count. */
434 uint8_t vp_index;
435 uint8_t add_flags;
436 uint8_t initiator_id[3];
437 uint8_t reserved;
438 uint32_t exchange_addr;
439 union {
440 struct {
441 uint16_t reserved1;
442 uint16_t flags;
443 uint32_t residual;
444 uint16_t ox_id;
445 uint16_t scsi_status;
446 uint32_t relative_offset;
447 uint32_t reserved2;
448 uint32_t transfer_length;
449 uint32_t reserved3;
450 /* Data segment 0 address. */
451 uint32_t dseg_0_address[2];
452 /* Data segment 0 length. */
453 uint32_t dseg_0_length;
454 } status0;
455 struct {
456 uint16_t sense_length;
457 uint16_t flags;
458 uint32_t residual;
459 uint16_t ox_id;
460 uint16_t scsi_status;
461 uint16_t response_len;
462 uint16_t reserved;
463 uint8_t sense_data[24];
464 } status1;
465 } u;
466} __packed;
467
468/*
469 * ISP queue - CTIO type 7 from ISP 24xx to target driver
470 * returned entry structure.
471 */
472struct ctio7_from_24xx {
473 uint8_t entry_type; /* Entry type. */
474 uint8_t entry_count; /* Entry count. */
475 uint8_t sys_define; /* System defined. */
476 uint8_t entry_status; /* Entry Status. */
477 uint32_t handle; /* System defined handle */
478 uint16_t status;
479 uint16_t timeout;
480 uint16_t dseg_count; /* Data segment count. */
481 uint8_t vp_index;
482 uint8_t reserved1[5];
483 uint32_t exchange_address;
484 uint16_t reserved2;
485 uint16_t flags;
486 uint32_t residual;
487 uint16_t ox_id;
488 uint16_t reserved3;
489 uint32_t relative_offset;
490 uint8_t reserved4[24];
491} __packed;
492
493/* CTIO7 flags values */
494#define CTIO7_FLAGS_SEND_STATUS BIT_15
495#define CTIO7_FLAGS_TERMINATE BIT_14
496#define CTIO7_FLAGS_CONFORM_REQ BIT_13
497#define CTIO7_FLAGS_DONT_RET_CTIO BIT_8
498#define CTIO7_FLAGS_STATUS_MODE_0 0
499#define CTIO7_FLAGS_STATUS_MODE_1 BIT_6
500#define CTIO7_FLAGS_EXPLICIT_CONFORM BIT_5
501#define CTIO7_FLAGS_CONFIRM_SATISF BIT_4
502#define CTIO7_FLAGS_DSD_PTR BIT_2
503#define CTIO7_FLAGS_DATA_IN BIT_1
504#define CTIO7_FLAGS_DATA_OUT BIT_0
505
506#define ELS_PLOGI 0x3
507#define ELS_FLOGI 0x4
508#define ELS_LOGO 0x5
509#define ELS_PRLI 0x20
510#define ELS_PRLO 0x21
511#define ELS_TPRLO 0x24
512#define ELS_PDISC 0x50
513#define ELS_ADISC 0x52
514
515/*
516 * ISP queue - ABTS received/response entries structure definition for 24xx.
517 */
518#define ABTS_RECV_24XX 0x54 /* ABTS received (for 24xx) */
519#define ABTS_RESP_24XX 0x55 /* ABTS responce (for 24xx) */
520
521/*
522 * ISP queue - ABTS received IOCB entry structure definition for 24xx.
523 * The ABTS BLS received from the wire is sent to the
524 * target driver by the ISP 24xx.
525 * The IOCB is placed on the response queue.
526 */
527struct abts_recv_from_24xx {
528 uint8_t entry_type; /* Entry type. */
529 uint8_t entry_count; /* Entry count. */
530 uint8_t sys_define; /* System defined. */
531 uint8_t entry_status; /* Entry Status. */
532 uint8_t reserved_1[6];
533 uint16_t nport_handle;
534 uint8_t reserved_2[2];
535 uint8_t vp_index;
536 uint8_t reserved_3:4;
537 uint8_t sof_type:4;
538 uint32_t exchange_address;
539 struct fcp_hdr_le fcp_hdr_le;
540 uint8_t reserved_4[16];
541 uint32_t exchange_addr_to_abort;
542} __packed;
543
544#define ABTS_PARAM_ABORT_SEQ BIT_0
545
546struct ba_acc_le {
547 uint16_t reserved;
548 uint8_t seq_id_last;
549 uint8_t seq_id_valid;
550#define SEQ_ID_VALID 0x80
551#define SEQ_ID_INVALID 0x00
552 uint16_t rx_id;
553 uint16_t ox_id;
554 uint16_t high_seq_cnt;
555 uint16_t low_seq_cnt;
556} __packed;
557
558struct ba_rjt_le {
559 uint8_t vendor_uniq;
560 uint8_t reason_expl;
561 uint8_t reason_code;
562#define BA_RJT_REASON_CODE_INVALID_COMMAND 0x1
563#define BA_RJT_REASON_CODE_UNABLE_TO_PERFORM 0x9
564 uint8_t reserved;
565} __packed;
566
567/*
568 * ISP queue - ABTS Response IOCB entry structure definition for 24xx.
569 * The ABTS response to the ABTS received is sent by the
570 * target driver to the ISP 24xx.
571 * The IOCB is placed on the request queue.
572 */
573struct abts_resp_to_24xx {
574 uint8_t entry_type; /* Entry type. */
575 uint8_t entry_count; /* Entry count. */
576 uint8_t sys_define; /* System defined. */
577 uint8_t entry_status; /* Entry Status. */
578 uint32_t handle;
579 uint16_t reserved_1;
580 uint16_t nport_handle;
581 uint16_t control_flags;
582#define ABTS_CONTR_FLG_TERM_EXCHG BIT_0
583 uint8_t vp_index;
584 uint8_t reserved_3:4;
585 uint8_t sof_type:4;
586 uint32_t exchange_address;
587 struct fcp_hdr_le fcp_hdr_le;
588 union {
589 struct ba_acc_le ba_acct;
590 struct ba_rjt_le ba_rjt;
591 } __packed payload;
592 uint32_t reserved_4;
593 uint32_t exchange_addr_to_abort;
594} __packed;
595
596/*
597 * ISP queue - ABTS Response IOCB from ISP24xx Firmware entry structure.
598 * The ABTS response with completion status to the ABTS response
599 * (sent by the target driver to the ISP 24xx) is sent by the
600 * ISP24xx firmware to the target driver.
601 * The IOCB is placed on the response queue.
602 */
603struct abts_resp_from_24xx_fw {
604 uint8_t entry_type; /* Entry type. */
605 uint8_t entry_count; /* Entry count. */
606 uint8_t sys_define; /* System defined. */
607 uint8_t entry_status; /* Entry Status. */
608 uint32_t handle;
609 uint16_t compl_status;
610#define ABTS_RESP_COMPL_SUCCESS 0
611#define ABTS_RESP_COMPL_SUBCODE_ERROR 0x31
612 uint16_t nport_handle;
613 uint16_t reserved_1;
614 uint8_t reserved_2;
615 uint8_t reserved_3:4;
616 uint8_t sof_type:4;
617 uint32_t exchange_address;
618 struct fcp_hdr_le fcp_hdr_le;
619 uint8_t reserved_4[8];
620 uint32_t error_subcode1;
621#define ABTS_RESP_SUBCODE_ERR_ABORTED_EXCH_NOT_TERM 0x1E
622 uint32_t error_subcode2;
623 uint32_t exchange_addr_to_abort;
624} __packed;
625
626/********************************************************************\
627 * Type Definitions used by initiator & target halves
628\********************************************************************/
629
630struct qla_tgt_mgmt_cmd;
631struct qla_tgt_sess;
632
633/*
634 * This structure provides a template of function calls that the
635 * target driver (from within qla_target.c) can issue to the
636 * target module (tcm_qla2xxx).
637 */
638struct qla_tgt_func_tmpl {
639
640 int (*handle_cmd)(struct scsi_qla_host *, struct qla_tgt_cmd *,
641 unsigned char *, uint32_t, int, int, int);
642 int (*handle_data)(struct qla_tgt_cmd *);
643 int (*handle_tmr)(struct qla_tgt_mgmt_cmd *, uint32_t, uint8_t,
644 uint32_t);
645 void (*free_cmd)(struct qla_tgt_cmd *);
646 void (*free_mcmd)(struct qla_tgt_mgmt_cmd *);
647 void (*free_session)(struct qla_tgt_sess *);
648
649 int (*check_initiator_node_acl)(struct scsi_qla_host *, unsigned char *,
650 void *, uint8_t *, uint16_t);
651 struct qla_tgt_sess *(*find_sess_by_loop_id)(struct scsi_qla_host *,
652 const uint16_t);
653 struct qla_tgt_sess *(*find_sess_by_s_id)(struct scsi_qla_host *,
654 const uint8_t *);
655 void (*clear_nacl_from_fcport_map)(struct qla_tgt_sess *);
656 void (*put_sess)(struct qla_tgt_sess *);
657 void (*shutdown_sess)(struct qla_tgt_sess *);
658};
659
660int qla2x00_wait_for_hba_online(struct scsi_qla_host *);
661
662#include <target/target_core_base.h>
663
664#define QLA_TGT_TIMEOUT 10 /* in seconds */
665
666#define QLA_TGT_MAX_HW_PENDING_TIME 60 /* in seconds */
667
668/* Immediate notify status constants */
669#define IMM_NTFY_LIP_RESET 0x000E
670#define IMM_NTFY_LIP_LINK_REINIT 0x000F
671#define IMM_NTFY_IOCB_OVERFLOW 0x0016
672#define IMM_NTFY_ABORT_TASK 0x0020
673#define IMM_NTFY_PORT_LOGOUT 0x0029
674#define IMM_NTFY_PORT_CONFIG 0x002A
675#define IMM_NTFY_GLBL_TPRLO 0x002D
676#define IMM_NTFY_GLBL_LOGO 0x002E
677#define IMM_NTFY_RESOURCE 0x0034
678#define IMM_NTFY_MSG_RX 0x0036
679#define IMM_NTFY_SRR 0x0045
680#define IMM_NTFY_ELS 0x0046
681
682/* Immediate notify task flags */
683#define IMM_NTFY_TASK_MGMT_SHIFT 8
684
685#define QLA_TGT_CLEAR_ACA 0x40
686#define QLA_TGT_TARGET_RESET 0x20
687#define QLA_TGT_LUN_RESET 0x10
688#define QLA_TGT_CLEAR_TS 0x04
689#define QLA_TGT_ABORT_TS 0x02
690#define QLA_TGT_ABORT_ALL_SESS 0xFFFF
691#define QLA_TGT_ABORT_ALL 0xFFFE
692#define QLA_TGT_NEXUS_LOSS_SESS 0xFFFD
693#define QLA_TGT_NEXUS_LOSS 0xFFFC
694
695/* Notify Acknowledge flags */
696#define NOTIFY_ACK_RES_COUNT BIT_8
697#define NOTIFY_ACK_CLEAR_LIP_RESET BIT_5
698#define NOTIFY_ACK_TM_RESP_CODE_VALID BIT_4
699
700/* Command's states */
701#define QLA_TGT_STATE_NEW 0 /* New command + target processing */
702#define QLA_TGT_STATE_NEED_DATA 1 /* target needs data to continue */
703#define QLA_TGT_STATE_DATA_IN 2 /* Data arrived + target processing */
704#define QLA_TGT_STATE_PROCESSED 3 /* target done processing */
705#define QLA_TGT_STATE_ABORTED 4 /* Command aborted */
706
707/* Special handles */
708#define QLA_TGT_NULL_HANDLE 0
709#define QLA_TGT_SKIP_HANDLE (0xFFFFFFFF & ~CTIO_COMPLETION_HANDLE_MARK)
710
711/* ATIO task_codes field */
712#define ATIO_SIMPLE_QUEUE 0
713#define ATIO_HEAD_OF_QUEUE 1
714#define ATIO_ORDERED_QUEUE 2
715#define ATIO_ACA_QUEUE 4
716#define ATIO_UNTAGGED 5
717
718/* TM failed response codes, see FCP (9.4.11 FCP_RSP_INFO) */
719#define FC_TM_SUCCESS 0
720#define FC_TM_BAD_FCP_DATA 1
721#define FC_TM_BAD_CMD 2
722#define FC_TM_FCP_DATA_MISMATCH 3
723#define FC_TM_REJECT 4
724#define FC_TM_FAILED 5
725
726/*
727 * Error code of qlt_pre_xmit_response() meaning that cmd's exchange was
728 * terminated, so no more actions is needed and success should be returned
729 * to target.
730 */
731#define QLA_TGT_PRE_XMIT_RESP_CMD_ABORTED 0x1717
732
733#if (BITS_PER_LONG > 32) || defined(CONFIG_HIGHMEM64G)
734#define pci_dma_lo32(a) (a & 0xffffffff)
735#define pci_dma_hi32(a) ((((a) >> 16)>>16) & 0xffffffff)
736#else
737#define pci_dma_lo32(a) (a & 0xffffffff)
738#define pci_dma_hi32(a) 0
739#endif
740
741#define QLA_TGT_SENSE_VALID(sense) ((sense != NULL) && \
742 (((const uint8_t *)(sense))[0] & 0x70) == 0x70)
743
744struct qla_port_24xx_data {
745 uint8_t port_name[WWN_SIZE];
746 uint16_t loop_id;
747 uint16_t reserved;
748};
749
750struct qla_tgt {
751 struct scsi_qla_host *vha;
752 struct qla_hw_data *ha;
753
754 /*
755 * To sync between IRQ handlers and qlt_target_release(). Needed,
756 * because req_pkt() can drop/reaquire HW lock inside. Protected by
757 * HW lock.
758 */
759 int irq_cmd_count;
760
761 int datasegs_per_cmd, datasegs_per_cont, sg_tablesize;
762
763 /* Target's flags, serialized by pha->hardware_lock */
764 unsigned int tgt_enable_64bit_addr:1; /* 64-bits PCI addr enabled */
765 unsigned int link_reinit_iocb_pending:1;
766
767 /*
768 * Protected by tgt_mutex AND hardware_lock for writing and tgt_mutex
769 * OR hardware_lock for reading.
770 */
771 int tgt_stop; /* the target mode driver is being stopped */
772 int tgt_stopped; /* the target mode driver has been stopped */
773
774 /* Count of sessions refering qla_tgt. Protected by hardware_lock. */
775 int sess_count;
776
777 /* Protected by hardware_lock. Addition also protected by tgt_mutex. */
778 struct list_head sess_list;
779
780 /* Protected by hardware_lock */
781 struct list_head del_sess_list;
782 struct delayed_work sess_del_work;
783
784 spinlock_t sess_work_lock;
785 struct list_head sess_works_list;
786 struct work_struct sess_work;
787
788 struct imm_ntfy_from_isp link_reinit_iocb;
789 wait_queue_head_t waitQ;
790 int notify_ack_expected;
791 int abts_resp_expected;
792 int modify_lun_expected;
793
794 int ctio_srr_id;
795 int imm_srr_id;
796 spinlock_t srr_lock;
797 struct list_head srr_ctio_list;
798 struct list_head srr_imm_list;
799 struct work_struct srr_work;
800
801 atomic_t tgt_global_resets_count;
802
803 struct list_head tgt_list_entry;
804};
805
806/*
807 * Equivilant to IT Nexus (Initiator-Target)
808 */
809struct qla_tgt_sess {
810 uint16_t loop_id;
811 port_id_t s_id;
812
813 unsigned int conf_compl_supported:1;
814 unsigned int deleted:1;
815 unsigned int local:1;
816 unsigned int tearing_down:1;
817
818 struct se_session *se_sess;
819 struct scsi_qla_host *vha;
820 struct qla_tgt *tgt;
821
822 struct list_head sess_list_entry;
823 unsigned long expires;
824 struct list_head del_list_entry;
825
826 uint8_t port_name[WWN_SIZE];
827 struct work_struct free_work;
828};
829
830struct qla_tgt_cmd {
831 struct qla_tgt_sess *sess;
832 int state;
833 struct se_cmd se_cmd;
834 struct work_struct free_work;
835 struct work_struct work;
836 /* Sense buffer that will be mapped into outgoing status */
837 unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
838
839 /* to save extra sess dereferences */
840 unsigned int conf_compl_supported:1;
841 unsigned int sg_mapped:1;
842 unsigned int free_sg:1;
843 unsigned int aborted:1; /* Needed in case of SRR */
844 unsigned int write_data_transferred:1;
845
846 struct scatterlist *sg; /* cmd data buffer SG vector */
847 int sg_cnt; /* SG segments count */
848 int bufflen; /* cmd buffer length */
849 int offset;
850 uint32_t tag;
851 uint32_t unpacked_lun;
852 enum dma_data_direction dma_data_direction;
853
854 uint16_t loop_id; /* to save extra sess dereferences */
855 struct qla_tgt *tgt; /* to save extra sess dereferences */
856 struct scsi_qla_host *vha;
857 struct list_head cmd_list;
858
859 struct atio_from_isp atio;
860};
861
862struct qla_tgt_sess_work_param {
863 struct list_head sess_works_list_entry;
864
865#define QLA_TGT_SESS_WORK_ABORT 1
866#define QLA_TGT_SESS_WORK_TM 2
867 int type;
868
869 union {
870 struct abts_recv_from_24xx abts;
871 struct imm_ntfy_from_isp tm_iocb;
872 struct atio_from_isp tm_iocb2;
873 };
874};
875
876struct qla_tgt_mgmt_cmd {
877 uint8_t tmr_func;
878 uint8_t fc_tm_rsp;
879 struct qla_tgt_sess *sess;
880 struct se_cmd se_cmd;
881 struct work_struct free_work;
882 unsigned int flags;
883#define QLA24XX_MGMT_SEND_NACK 1
884 union {
885 struct atio_from_isp atio;
886 struct imm_ntfy_from_isp imm_ntfy;
887 struct abts_recv_from_24xx abts;
888 } __packed orig_iocb;
889};
890
891struct qla_tgt_prm {
892 struct qla_tgt_cmd *cmd;
893 struct qla_tgt *tgt;
894 void *pkt;
895 struct scatterlist *sg; /* cmd data buffer SG vector */
896 int seg_cnt;
897 int req_cnt;
898 uint16_t rq_result;
899 uint16_t scsi_status;
900 unsigned char *sense_buffer;
901 int sense_buffer_len;
902 int residual;
903 int add_status_pkt;
904};
905
906struct qla_tgt_srr_imm {
907 struct list_head srr_list_entry;
908 int srr_id;
909 struct imm_ntfy_from_isp imm_ntfy;
910};
911
912struct qla_tgt_srr_ctio {
913 struct list_head srr_list_entry;
914 int srr_id;
915 struct qla_tgt_cmd *cmd;
916};
917
918#define QLA_TGT_XMIT_DATA 1
919#define QLA_TGT_XMIT_STATUS 2
920#define QLA_TGT_XMIT_ALL (QLA_TGT_XMIT_STATUS|QLA_TGT_XMIT_DATA)
921
922
923extern struct qla_tgt_data qla_target;
924/*
925 * Internal function prototypes
926 */
927void qlt_disable_vha(struct scsi_qla_host *);
928
929/*
930 * Function prototypes for qla_target.c logic used by qla2xxx LLD code.
931 */
932extern int qlt_add_target(struct qla_hw_data *, struct scsi_qla_host *);
933extern int qlt_remove_target(struct qla_hw_data *, struct scsi_qla_host *);
934extern int qlt_lport_register(struct qla_tgt_func_tmpl *, u64,
935 int (*callback)(struct scsi_qla_host *), void *);
936extern void qlt_lport_deregister(struct scsi_qla_host *);
937extern void qlt_unreg_sess(struct qla_tgt_sess *);
938extern void qlt_fc_port_added(struct scsi_qla_host *, fc_port_t *);
939extern void qlt_fc_port_deleted(struct scsi_qla_host *, fc_port_t *);
940extern void qlt_set_mode(struct scsi_qla_host *ha);
941extern void qlt_clear_mode(struct scsi_qla_host *ha);
942extern int __init qlt_init(void);
943extern void qlt_exit(void);
944extern void qlt_update_vp_map(struct scsi_qla_host *, int);
945
946/*
947 * This macro is used during early initializations when host->active_mode
948 * is not set. Right now, ha value is ignored.
949 */
950#define QLA_TGT_MODE_ENABLED() (ql2x_ini_mode != QLA2XXX_INI_MODE_ENABLED)
951
952static inline bool qla_tgt_mode_enabled(struct scsi_qla_host *ha)
953{
954 return ha->host->active_mode & MODE_TARGET;
955}
956
957static inline bool qla_ini_mode_enabled(struct scsi_qla_host *ha)
958{
959 return ha->host->active_mode & MODE_INITIATOR;
960}
961
962static inline void qla_reverse_ini_mode(struct scsi_qla_host *ha)
963{
964 if (ha->host->active_mode & MODE_INITIATOR)
965 ha->host->active_mode &= ~MODE_INITIATOR;
966 else
967 ha->host->active_mode |= MODE_INITIATOR;
968}
969
970/*
971 * Exported symbols from qla_target.c LLD logic used by qla2xxx code..
972 */
973extern void qlt_24xx_atio_pkt_all_vps(struct scsi_qla_host *,
974 struct atio_from_isp *);
975extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
976extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
977extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
978extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
979extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
980extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
981extern void qlt_ctio_completion(struct scsi_qla_host *, uint32_t);
982extern void qlt_async_event(uint16_t, struct scsi_qla_host *, uint16_t *);
983extern void qlt_enable_vha(struct scsi_qla_host *);
984extern void qlt_vport_create(struct scsi_qla_host *, struct qla_hw_data *);
985extern void qlt_rff_id(struct scsi_qla_host *, struct ct_sns_req *);
986extern void qlt_init_atio_q_entries(struct scsi_qla_host *);
987extern void qlt_24xx_process_atio_queue(struct scsi_qla_host *);
988extern void qlt_24xx_config_rings(struct scsi_qla_host *,
989 device_reg_t __iomem *);
990extern void qlt_24xx_config_nvram_stage1(struct scsi_qla_host *,
991 struct nvram_24xx *);
992extern void qlt_24xx_config_nvram_stage2(struct scsi_qla_host *,
993 struct init_cb_24xx *);
994extern int qlt_24xx_process_response_error(struct scsi_qla_host *,
995 struct sts_entry_24xx *);
996extern void qlt_modify_vp_config(struct scsi_qla_host *,
997 struct vp_config_entry_24xx *);
998extern void qlt_probe_one_stage1(struct scsi_qla_host *, struct qla_hw_data *);
999extern int qlt_mem_alloc(struct qla_hw_data *);
1000extern void qlt_mem_free(struct qla_hw_data *);
1001extern void qlt_stop_phase1(struct qla_tgt *);
1002extern void qlt_stop_phase2(struct qla_tgt *);
1003
1004#endif /* __QLA_TARGET_H */
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
new file mode 100644
index 000000000000..6e64314dbbb3
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -0,0 +1,1919 @@
1/*******************************************************************************
2 * This file contains tcm implementation using v4 configfs fabric infrastructure
3 * for QLogic target mode HBAs
4 *
5 * ?? Copyright 2010-2011 RisingTide Systems LLC.
6 *
7 * Licensed to the Linux Foundation under the General Public License (GPL)
8 * version 2.
9 *
10 * Author: Nicholas A. Bellinger <nab@risingtidesystems.com>
11 *
12 * tcm_qla2xxx_parse_wwn() and tcm_qla2xxx_format_wwn() contains code from
13 * the TCM_FC / Open-FCoE.org fabric module.
14 *
15 * Copyright (c) 2010 Cisco Systems, Inc
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License as published by
19 * the Free Software Foundation; either version 2 of the License, or
20 * (at your option) any later version.
21 *
22 * This program is distributed in the hope that it will be useful,
23 * but WITHOUT ANY WARRANTY; without even the implied warranty of
24 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
25 * GNU General Public License for more details.
26 ****************************************************************************/
27
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <generated/utsrelease.h>
32#include <linux/utsname.h>
33#include <linux/init.h>
34#include <linux/list.h>
35#include <linux/slab.h>
36#include <linux/kthread.h>
37#include <linux/types.h>
38#include <linux/string.h>
39#include <linux/configfs.h>
40#include <linux/ctype.h>
41#include <linux/string.h>
42#include <linux/ctype.h>
43#include <asm/unaligned.h>
44#include <scsi/scsi.h>
45#include <scsi/scsi_host.h>
46#include <scsi/scsi_device.h>
47#include <scsi/scsi_cmnd.h>
48#include <target/target_core_base.h>
49#include <target/target_core_fabric.h>
50#include <target/target_core_fabric_configfs.h>
51#include <target/target_core_configfs.h>
52#include <target/configfs_macros.h>
53
54#include "qla_def.h"
55#include "qla_target.h"
56#include "tcm_qla2xxx.h"
57
58struct workqueue_struct *tcm_qla2xxx_free_wq;
59struct workqueue_struct *tcm_qla2xxx_cmd_wq;
60
61static int tcm_qla2xxx_check_true(struct se_portal_group *se_tpg)
62{
63 return 1;
64}
65
66static int tcm_qla2xxx_check_false(struct se_portal_group *se_tpg)
67{
68 return 0;
69}
70
71/*
72 * Parse WWN.
73 * If strict, we require lower-case hex and colon separators to be sure
74 * the name is the same as what would be generated by ft_format_wwn()
75 * so the name and wwn are mapped one-to-one.
76 */
77static ssize_t tcm_qla2xxx_parse_wwn(const char *name, u64 *wwn, int strict)
78{
79 const char *cp;
80 char c;
81 u32 nibble;
82 u32 byte = 0;
83 u32 pos = 0;
84 u32 err;
85
86 *wwn = 0;
87 for (cp = name; cp < &name[TCM_QLA2XXX_NAMELEN - 1]; cp++) {
88 c = *cp;
89 if (c == '\n' && cp[1] == '\0')
90 continue;
91 if (strict && pos++ == 2 && byte++ < 7) {
92 pos = 0;
93 if (c == ':')
94 continue;
95 err = 1;
96 goto fail;
97 }
98 if (c == '\0') {
99 err = 2;
100 if (strict && byte != 8)
101 goto fail;
102 return cp - name;
103 }
104 err = 3;
105 if (isdigit(c))
106 nibble = c - '0';
107 else if (isxdigit(c) && (islower(c) || !strict))
108 nibble = tolower(c) - 'a' + 10;
109 else
110 goto fail;
111 *wwn = (*wwn << 4) | nibble;
112 }
113 err = 4;
114fail:
115 pr_debug("err %u len %zu pos %u byte %u\n",
116 err, cp - name, pos, byte);
117 return -1;
118}
119
120static ssize_t tcm_qla2xxx_format_wwn(char *buf, size_t len, u64 wwn)
121{
122 u8 b[8];
123
124 put_unaligned_be64(wwn, b);
125 return snprintf(buf, len,
126 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
127 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7]);
128}
129
130static char *tcm_qla2xxx_get_fabric_name(void)
131{
132 return "qla2xxx";
133}
134
135/*
136 * From drivers/scsi/scsi_transport_fc.c:fc_parse_wwn
137 */
138static int tcm_qla2xxx_npiv_extract_wwn(const char *ns, u64 *nm)
139{
140 unsigned int i, j;
141 u8 wwn[8];
142
143 memset(wwn, 0, sizeof(wwn));
144
145 /* Validate and store the new name */
146 for (i = 0, j = 0; i < 16; i++) {
147 int value;
148
149 value = hex_to_bin(*ns++);
150 if (value >= 0)
151 j = (j << 4) | value;
152 else
153 return -EINVAL;
154
155 if (i % 2) {
156 wwn[i/2] = j & 0xff;
157 j = 0;
158 }
159 }
160
161 *nm = wwn_to_u64(wwn);
162 return 0;
163}
164
165/*
166 * This parsing logic follows drivers/scsi/scsi_transport_fc.c:
167 * store_fc_host_vport_create()
168 */
169static int tcm_qla2xxx_npiv_parse_wwn(
170 const char *name,
171 size_t count,
172 u64 *wwpn,
173 u64 *wwnn)
174{
175 unsigned int cnt = count;
176 int rc;
177
178 *wwpn = 0;
179 *wwnn = 0;
180
181 /* count may include a LF at end of string */
182 if (name[cnt-1] == '\n')
183 cnt--;
184
185 /* validate we have enough characters for WWPN */
186 if ((cnt != (16+1+16)) || (name[16] != ':'))
187 return -EINVAL;
188
189 rc = tcm_qla2xxx_npiv_extract_wwn(&name[0], wwpn);
190 if (rc != 0)
191 return rc;
192
193 rc = tcm_qla2xxx_npiv_extract_wwn(&name[17], wwnn);
194 if (rc != 0)
195 return rc;
196
197 return 0;
198}
199
200static ssize_t tcm_qla2xxx_npiv_format_wwn(char *buf, size_t len,
201 u64 wwpn, u64 wwnn)
202{
203 u8 b[8], b2[8];
204
205 put_unaligned_be64(wwpn, b);
206 put_unaligned_be64(wwnn, b2);
207 return snprintf(buf, len,
208 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x,"
209 "%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x",
210 b[0], b[1], b[2], b[3], b[4], b[5], b[6], b[7],
211 b2[0], b2[1], b2[2], b2[3], b2[4], b2[5], b2[6], b2[7]);
212}
213
214static char *tcm_qla2xxx_npiv_get_fabric_name(void)
215{
216 return "qla2xxx_npiv";
217}
218
219static u8 tcm_qla2xxx_get_fabric_proto_ident(struct se_portal_group *se_tpg)
220{
221 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
222 struct tcm_qla2xxx_tpg, se_tpg);
223 struct tcm_qla2xxx_lport *lport = tpg->lport;
224 u8 proto_id;
225
226 switch (lport->lport_proto_id) {
227 case SCSI_PROTOCOL_FCP:
228 default:
229 proto_id = fc_get_fabric_proto_ident(se_tpg);
230 break;
231 }
232
233 return proto_id;
234}
235
236static char *tcm_qla2xxx_get_fabric_wwn(struct se_portal_group *se_tpg)
237{
238 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
239 struct tcm_qla2xxx_tpg, se_tpg);
240 struct tcm_qla2xxx_lport *lport = tpg->lport;
241
242 return &lport->lport_name[0];
243}
244
245static char *tcm_qla2xxx_npiv_get_fabric_wwn(struct se_portal_group *se_tpg)
246{
247 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
248 struct tcm_qla2xxx_tpg, se_tpg);
249 struct tcm_qla2xxx_lport *lport = tpg->lport;
250
251 return &lport->lport_npiv_name[0];
252}
253
254static u16 tcm_qla2xxx_get_tag(struct se_portal_group *se_tpg)
255{
256 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
257 struct tcm_qla2xxx_tpg, se_tpg);
258 return tpg->lport_tpgt;
259}
260
261static u32 tcm_qla2xxx_get_default_depth(struct se_portal_group *se_tpg)
262{
263 return 1;
264}
265
266static u32 tcm_qla2xxx_get_pr_transport_id(
267 struct se_portal_group *se_tpg,
268 struct se_node_acl *se_nacl,
269 struct t10_pr_registration *pr_reg,
270 int *format_code,
271 unsigned char *buf)
272{
273 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
274 struct tcm_qla2xxx_tpg, se_tpg);
275 struct tcm_qla2xxx_lport *lport = tpg->lport;
276 int ret = 0;
277
278 switch (lport->lport_proto_id) {
279 case SCSI_PROTOCOL_FCP:
280 default:
281 ret = fc_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
282 format_code, buf);
283 break;
284 }
285
286 return ret;
287}
288
289static u32 tcm_qla2xxx_get_pr_transport_id_len(
290 struct se_portal_group *se_tpg,
291 struct se_node_acl *se_nacl,
292 struct t10_pr_registration *pr_reg,
293 int *format_code)
294{
295 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
296 struct tcm_qla2xxx_tpg, se_tpg);
297 struct tcm_qla2xxx_lport *lport = tpg->lport;
298 int ret = 0;
299
300 switch (lport->lport_proto_id) {
301 case SCSI_PROTOCOL_FCP:
302 default:
303 ret = fc_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
304 format_code);
305 break;
306 }
307
308 return ret;
309}
310
311static char *tcm_qla2xxx_parse_pr_out_transport_id(
312 struct se_portal_group *se_tpg,
313 const char *buf,
314 u32 *out_tid_len,
315 char **port_nexus_ptr)
316{
317 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
318 struct tcm_qla2xxx_tpg, se_tpg);
319 struct tcm_qla2xxx_lport *lport = tpg->lport;
320 char *tid = NULL;
321
322 switch (lport->lport_proto_id) {
323 case SCSI_PROTOCOL_FCP:
324 default:
325 tid = fc_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
326 port_nexus_ptr);
327 break;
328 }
329
330 return tid;
331}
332
333static int tcm_qla2xxx_check_demo_mode(struct se_portal_group *se_tpg)
334{
335 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
336 struct tcm_qla2xxx_tpg, se_tpg);
337
338 return QLA_TPG_ATTRIB(tpg)->generate_node_acls;
339}
340
341static int tcm_qla2xxx_check_demo_mode_cache(struct se_portal_group *se_tpg)
342{
343 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
344 struct tcm_qla2xxx_tpg, se_tpg);
345
346 return QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls;
347}
348
349static int tcm_qla2xxx_check_demo_write_protect(struct se_portal_group *se_tpg)
350{
351 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
352 struct tcm_qla2xxx_tpg, se_tpg);
353
354 return QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect;
355}
356
357static int tcm_qla2xxx_check_prod_write_protect(struct se_portal_group *se_tpg)
358{
359 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
360 struct tcm_qla2xxx_tpg, se_tpg);
361
362 return QLA_TPG_ATTRIB(tpg)->prod_mode_write_protect;
363}
364
365static struct se_node_acl *tcm_qla2xxx_alloc_fabric_acl(
366 struct se_portal_group *se_tpg)
367{
368 struct tcm_qla2xxx_nacl *nacl;
369
370 nacl = kzalloc(sizeof(struct tcm_qla2xxx_nacl), GFP_KERNEL);
371 if (!nacl) {
372 pr_err("Unable to alocate struct tcm_qla2xxx_nacl\n");
373 return NULL;
374 }
375
376 return &nacl->se_node_acl;
377}
378
379static void tcm_qla2xxx_release_fabric_acl(
380 struct se_portal_group *se_tpg,
381 struct se_node_acl *se_nacl)
382{
383 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
384 struct tcm_qla2xxx_nacl, se_node_acl);
385 kfree(nacl);
386}
387
388static u32 tcm_qla2xxx_tpg_get_inst_index(struct se_portal_group *se_tpg)
389{
390 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
391 struct tcm_qla2xxx_tpg, se_tpg);
392
393 return tpg->lport_tpgt;
394}
395
396static void tcm_qla2xxx_complete_mcmd(struct work_struct *work)
397{
398 struct qla_tgt_mgmt_cmd *mcmd = container_of(work,
399 struct qla_tgt_mgmt_cmd, free_work);
400
401 transport_generic_free_cmd(&mcmd->se_cmd, 0);
402}
403
404/*
405 * Called from qla_target_template->free_mcmd(), and will call
406 * tcm_qla2xxx_release_cmd() via normal struct target_core_fabric_ops
407 * release callback. qla_hw_data->hardware_lock is expected to be held
408 */
409static void tcm_qla2xxx_free_mcmd(struct qla_tgt_mgmt_cmd *mcmd)
410{
411 INIT_WORK(&mcmd->free_work, tcm_qla2xxx_complete_mcmd);
412 queue_work(tcm_qla2xxx_free_wq, &mcmd->free_work);
413}
414
415static void tcm_qla2xxx_complete_free(struct work_struct *work)
416{
417 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
418
419 transport_generic_free_cmd(&cmd->se_cmd, 0);
420}
421
422/*
423 * Called from qla_target_template->free_cmd(), and will call
424 * tcm_qla2xxx_release_cmd via normal struct target_core_fabric_ops
425 * release callback. qla_hw_data->hardware_lock is expected to be held
426 */
427static void tcm_qla2xxx_free_cmd(struct qla_tgt_cmd *cmd)
428{
429 INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
430 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
431}
432
433/*
434 * Called from struct target_core_fabric_ops->check_stop_free() context
435 */
436static int tcm_qla2xxx_check_stop_free(struct se_cmd *se_cmd)
437{
438 return target_put_sess_cmd(se_cmd->se_sess, se_cmd);
439}
440
441/* tcm_qla2xxx_release_cmd - Callback from TCM Core to release underlying
442 * fabric descriptor @se_cmd command to release
443 */
444static void tcm_qla2xxx_release_cmd(struct se_cmd *se_cmd)
445{
446 struct qla_tgt_cmd *cmd;
447
448 if (se_cmd->se_cmd_flags & SCF_SCSI_TMR_CDB) {
449 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
450 struct qla_tgt_mgmt_cmd, se_cmd);
451 qlt_free_mcmd(mcmd);
452 return;
453 }
454
455 cmd = container_of(se_cmd, struct qla_tgt_cmd, se_cmd);
456 qlt_free_cmd(cmd);
457}
458
459static int tcm_qla2xxx_shutdown_session(struct se_session *se_sess)
460{
461 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
462 struct scsi_qla_host *vha;
463 unsigned long flags;
464
465 BUG_ON(!sess);
466 vha = sess->vha;
467
468 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
469 sess->tearing_down = 1;
470 target_splice_sess_cmd_list(se_sess);
471 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
472
473 return 1;
474}
475
476static void tcm_qla2xxx_close_session(struct se_session *se_sess)
477{
478 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
479 struct scsi_qla_host *vha;
480 unsigned long flags;
481
482 BUG_ON(!sess);
483 vha = sess->vha;
484
485 spin_lock_irqsave(&vha->hw->hardware_lock, flags);
486 qlt_unreg_sess(sess);
487 spin_unlock_irqrestore(&vha->hw->hardware_lock, flags);
488}
489
490static u32 tcm_qla2xxx_sess_get_index(struct se_session *se_sess)
491{
492 return 0;
493}
494
495/*
496 * The LIO target core uses DMA_TO_DEVICE to mean that data is going
497 * to the target (eg handling a WRITE) and DMA_FROM_DEVICE to mean
498 * that data is coming from the target (eg handling a READ). However,
499 * this is just the opposite of what we have to tell the DMA mapping
500 * layer -- eg when handling a READ, the HBA will have to DMA the data
501 * out of memory so it can send it to the initiator, which means we
502 * need to use DMA_TO_DEVICE when we map the data.
503 */
504static enum dma_data_direction tcm_qla2xxx_mapping_dir(struct se_cmd *se_cmd)
505{
506 if (se_cmd->se_cmd_flags & SCF_BIDI)
507 return DMA_BIDIRECTIONAL;
508
509 switch (se_cmd->data_direction) {
510 case DMA_TO_DEVICE:
511 return DMA_FROM_DEVICE;
512 case DMA_FROM_DEVICE:
513 return DMA_TO_DEVICE;
514 case DMA_NONE:
515 default:
516 return DMA_NONE;
517 }
518}
519
520static int tcm_qla2xxx_write_pending(struct se_cmd *se_cmd)
521{
522 struct qla_tgt_cmd *cmd = container_of(se_cmd,
523 struct qla_tgt_cmd, se_cmd);
524
525 cmd->bufflen = se_cmd->data_length;
526 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
527
528 cmd->sg_cnt = se_cmd->t_data_nents;
529 cmd->sg = se_cmd->t_data_sg;
530
531 /*
532 * qla_target.c:qlt_rdy_to_xfer() will call pci_map_sg() to setup
533 * the SGL mappings into PCIe memory for incoming FCP WRITE data.
534 */
535 return qlt_rdy_to_xfer(cmd);
536}
537
538static int tcm_qla2xxx_write_pending_status(struct se_cmd *se_cmd)
539{
540 unsigned long flags;
541 /*
542 * Check for WRITE_PENDING status to determine if we need to wait for
543 * CTIO aborts to be posted via hardware in tcm_qla2xxx_handle_data().
544 */
545 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
546 if (se_cmd->t_state == TRANSPORT_WRITE_PENDING ||
547 se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
548 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
549 wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
550 3000);
551 return 0;
552 }
553 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
554
555 return 0;
556}
557
558static void tcm_qla2xxx_set_default_node_attrs(struct se_node_acl *nacl)
559{
560 return;
561}
562
563static u32 tcm_qla2xxx_get_task_tag(struct se_cmd *se_cmd)
564{
565 struct qla_tgt_cmd *cmd = container_of(se_cmd,
566 struct qla_tgt_cmd, se_cmd);
567
568 return cmd->tag;
569}
570
571static int tcm_qla2xxx_get_cmd_state(struct se_cmd *se_cmd)
572{
573 return 0;
574}
575
576/*
577 * Called from process context in qla_target.c:qlt_do_work() code
578 */
579static int tcm_qla2xxx_handle_cmd(scsi_qla_host_t *vha, struct qla_tgt_cmd *cmd,
580 unsigned char *cdb, uint32_t data_length, int fcp_task_attr,
581 int data_dir, int bidi)
582{
583 struct se_cmd *se_cmd = &cmd->se_cmd;
584 struct se_session *se_sess;
585 struct qla_tgt_sess *sess;
586 int flags = TARGET_SCF_ACK_KREF;
587
588 if (bidi)
589 flags |= TARGET_SCF_BIDI_OP;
590
591 sess = cmd->sess;
592 if (!sess) {
593 pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
594 return -EINVAL;
595 }
596
597 se_sess = sess->se_sess;
598 if (!se_sess) {
599 pr_err("Unable to locate active struct se_session\n");
600 return -EINVAL;
601 }
602
603 target_submit_cmd(se_cmd, se_sess, cdb, &cmd->sense_buffer[0],
604 cmd->unpacked_lun, data_length, fcp_task_attr,
605 data_dir, flags);
606 return 0;
607}
608
609static void tcm_qla2xxx_do_rsp(struct work_struct *work)
610{
611 struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
612 /*
613 * Dispatch ->queue_status from workqueue process context
614 */
615 transport_generic_request_failure(&cmd->se_cmd);
616}
617
618/*
619 * Called from qla_target.c:qlt_do_ctio_completion()
620 */
621static int tcm_qla2xxx_handle_data(struct qla_tgt_cmd *cmd)
622{
623 struct se_cmd *se_cmd = &cmd->se_cmd;
624 unsigned long flags;
625 /*
626 * Ensure that the complete FCP WRITE payload has been received.
627 * Otherwise return an exception via CHECK_CONDITION status.
628 */
629 if (!cmd->write_data_transferred) {
630 /*
631 * Check if se_cmd has already been aborted via LUN_RESET, and
632 * waiting upon completion in tcm_qla2xxx_write_pending_status()
633 */
634 spin_lock_irqsave(&se_cmd->t_state_lock, flags);
635 if (se_cmd->transport_state & CMD_T_ABORTED) {
636 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
637 complete(&se_cmd->t_transport_stop_comp);
638 return 0;
639 }
640 spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
641
642 se_cmd->scsi_sense_reason = TCM_CHECK_CONDITION_ABORT_CMD;
643 INIT_WORK(&cmd->work, tcm_qla2xxx_do_rsp);
644 queue_work(tcm_qla2xxx_free_wq, &cmd->work);
645 return 0;
646 }
647 /*
648 * We now tell TCM to queue this WRITE CDB with TRANSPORT_PROCESS_WRITE
649 * status to the backstore processing thread.
650 */
651 return transport_generic_handle_data(&cmd->se_cmd);
652}
653
654/*
655 * Called from qla_target.c:qlt_issue_task_mgmt()
656 */
657static int tcm_qla2xxx_handle_tmr(struct qla_tgt_mgmt_cmd *mcmd, uint32_t lun,
658 uint8_t tmr_func, uint32_t tag)
659{
660 struct qla_tgt_sess *sess = mcmd->sess;
661 struct se_cmd *se_cmd = &mcmd->se_cmd;
662
663 return target_submit_tmr(se_cmd, sess->se_sess, NULL, lun, mcmd,
664 tmr_func, GFP_ATOMIC, tag, TARGET_SCF_ACK_KREF);
665}
666
667static int tcm_qla2xxx_queue_data_in(struct se_cmd *se_cmd)
668{
669 struct qla_tgt_cmd *cmd = container_of(se_cmd,
670 struct qla_tgt_cmd, se_cmd);
671
672 cmd->bufflen = se_cmd->data_length;
673 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
674 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
675
676 cmd->sg_cnt = se_cmd->t_data_nents;
677 cmd->sg = se_cmd->t_data_sg;
678 cmd->offset = 0;
679
680 /*
681 * Now queue completed DATA_IN the qla2xxx LLD and response ring
682 */
683 return qlt_xmit_response(cmd, QLA_TGT_XMIT_DATA|QLA_TGT_XMIT_STATUS,
684 se_cmd->scsi_status);
685}
686
687static int tcm_qla2xxx_queue_status(struct se_cmd *se_cmd)
688{
689 struct qla_tgt_cmd *cmd = container_of(se_cmd,
690 struct qla_tgt_cmd, se_cmd);
691 int xmit_type = QLA_TGT_XMIT_STATUS;
692
693 cmd->bufflen = se_cmd->data_length;
694 cmd->sg = NULL;
695 cmd->sg_cnt = 0;
696 cmd->offset = 0;
697 cmd->dma_data_direction = tcm_qla2xxx_mapping_dir(se_cmd);
698 cmd->aborted = (se_cmd->transport_state & CMD_T_ABORTED);
699
700 if (se_cmd->data_direction == DMA_FROM_DEVICE) {
701 /*
702 * For FCP_READ with CHECK_CONDITION status, clear cmd->bufflen
703 * for qla_tgt_xmit_response LLD code
704 */
705 se_cmd->se_cmd_flags |= SCF_UNDERFLOW_BIT;
706 se_cmd->residual_count = se_cmd->data_length;
707
708 cmd->bufflen = 0;
709 }
710 /*
711 * Now queue status response to qla2xxx LLD code and response ring
712 */
713 return qlt_xmit_response(cmd, xmit_type, se_cmd->scsi_status);
714}
715
716static int tcm_qla2xxx_queue_tm_rsp(struct se_cmd *se_cmd)
717{
718 struct se_tmr_req *se_tmr = se_cmd->se_tmr_req;
719 struct qla_tgt_mgmt_cmd *mcmd = container_of(se_cmd,
720 struct qla_tgt_mgmt_cmd, se_cmd);
721
722 pr_debug("queue_tm_rsp: mcmd: %p func: 0x%02x response: 0x%02x\n",
723 mcmd, se_tmr->function, se_tmr->response);
724 /*
725 * Do translation between TCM TM response codes and
726 * QLA2xxx FC TM response codes.
727 */
728 switch (se_tmr->response) {
729 case TMR_FUNCTION_COMPLETE:
730 mcmd->fc_tm_rsp = FC_TM_SUCCESS;
731 break;
732 case TMR_TASK_DOES_NOT_EXIST:
733 mcmd->fc_tm_rsp = FC_TM_BAD_CMD;
734 break;
735 case TMR_FUNCTION_REJECTED:
736 mcmd->fc_tm_rsp = FC_TM_REJECT;
737 break;
738 case TMR_LUN_DOES_NOT_EXIST:
739 default:
740 mcmd->fc_tm_rsp = FC_TM_FAILED;
741 break;
742 }
743 /*
744 * Queue the TM response to QLA2xxx LLD to build a
745 * CTIO response packet.
746 */
747 qlt_xmit_tm_rsp(mcmd);
748
749 return 0;
750}
751
752static u16 tcm_qla2xxx_get_fabric_sense_len(void)
753{
754 return 0;
755}
756
757static u16 tcm_qla2xxx_set_fabric_sense_len(struct se_cmd *se_cmd,
758 u32 sense_length)
759{
760 return 0;
761}
762
763/* Local pointer to allocated TCM configfs fabric module */
764struct target_fabric_configfs *tcm_qla2xxx_fabric_configfs;
765struct target_fabric_configfs *tcm_qla2xxx_npiv_fabric_configfs;
766
767static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
768 struct tcm_qla2xxx_nacl *, struct qla_tgt_sess *);
769/*
770 * Expected to be called with struct qla_hw_data->hardware_lock held
771 */
772static void tcm_qla2xxx_clear_nacl_from_fcport_map(struct qla_tgt_sess *sess)
773{
774 struct se_node_acl *se_nacl = sess->se_sess->se_node_acl;
775 struct se_portal_group *se_tpg = se_nacl->se_tpg;
776 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
777 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
778 struct tcm_qla2xxx_lport, lport_wwn);
779 struct tcm_qla2xxx_nacl *nacl = container_of(se_nacl,
780 struct tcm_qla2xxx_nacl, se_node_acl);
781 void *node;
782
783 pr_debug("fc_rport domain: port_id 0x%06x\n", nacl->nport_id);
784
785 node = btree_remove32(&lport->lport_fcport_map, nacl->nport_id);
786 WARN_ON(node && (node != se_nacl));
787
788 pr_debug("Removed from fcport_map: %p for WWNN: 0x%016LX, port_id: 0x%06x\n",
789 se_nacl, nacl->nport_wwnn, nacl->nport_id);
790 /*
791 * Now clear the se_nacl and session pointers from our HW lport lookup
792 * table mapping for this initiator's fabric S_ID and LOOP_ID entries.
793 *
794 * This is done ahead of callbacks into tcm_qla2xxx_free_session() ->
795 * target_wait_for_sess_cmds() before the session waits for outstanding
796 * I/O to complete, to avoid a race between session shutdown execution
797 * and incoming ATIOs or TMRs picking up a stale se_node_act reference.
798 */
799 tcm_qla2xxx_clear_sess_lookup(lport, nacl, sess);
800}
801
802static void tcm_qla2xxx_release_session(struct kref *kref)
803{
804 struct se_session *se_sess = container_of(kref,
805 struct se_session, sess_kref);
806
807 qlt_unreg_sess(se_sess->fabric_sess_ptr);
808}
809
810static void tcm_qla2xxx_put_session(struct se_session *se_sess)
811{
812 struct qla_tgt_sess *sess = se_sess->fabric_sess_ptr;
813 struct qla_hw_data *ha = sess->vha->hw;
814 unsigned long flags;
815
816 spin_lock_irqsave(&ha->hardware_lock, flags);
817 kref_put(&se_sess->sess_kref, tcm_qla2xxx_release_session);
818 spin_unlock_irqrestore(&ha->hardware_lock, flags);
819}
820
821static void tcm_qla2xxx_put_sess(struct qla_tgt_sess *sess)
822{
823 tcm_qla2xxx_put_session(sess->se_sess);
824}
825
826static void tcm_qla2xxx_shutdown_sess(struct qla_tgt_sess *sess)
827{
828 tcm_qla2xxx_shutdown_session(sess->se_sess);
829}
830
831static struct se_node_acl *tcm_qla2xxx_make_nodeacl(
832 struct se_portal_group *se_tpg,
833 struct config_group *group,
834 const char *name)
835{
836 struct se_node_acl *se_nacl, *se_nacl_new;
837 struct tcm_qla2xxx_nacl *nacl;
838 u64 wwnn;
839 u32 qla2xxx_nexus_depth;
840
841 if (tcm_qla2xxx_parse_wwn(name, &wwnn, 1) < 0)
842 return ERR_PTR(-EINVAL);
843
844 se_nacl_new = tcm_qla2xxx_alloc_fabric_acl(se_tpg);
845 if (!se_nacl_new)
846 return ERR_PTR(-ENOMEM);
847/* #warning FIXME: Hardcoded qla2xxx_nexus depth in tcm_qla2xxx_make_nodeacl */
848 qla2xxx_nexus_depth = 1;
849
850 /*
851 * se_nacl_new may be released by core_tpg_add_initiator_node_acl()
852 * when converting a NodeACL from demo mode -> explict
853 */
854 se_nacl = core_tpg_add_initiator_node_acl(se_tpg, se_nacl_new,
855 name, qla2xxx_nexus_depth);
856 if (IS_ERR(se_nacl)) {
857 tcm_qla2xxx_release_fabric_acl(se_tpg, se_nacl_new);
858 return se_nacl;
859 }
860 /*
861 * Locate our struct tcm_qla2xxx_nacl and set the FC Nport WWPN
862 */
863 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
864 nacl->nport_wwnn = wwnn;
865 tcm_qla2xxx_format_wwn(&nacl->nport_name[0], TCM_QLA2XXX_NAMELEN, wwnn);
866
867 return se_nacl;
868}
869
870static void tcm_qla2xxx_drop_nodeacl(struct se_node_acl *se_acl)
871{
872 struct se_portal_group *se_tpg = se_acl->se_tpg;
873 struct tcm_qla2xxx_nacl *nacl = container_of(se_acl,
874 struct tcm_qla2xxx_nacl, se_node_acl);
875
876 core_tpg_del_initiator_node_acl(se_tpg, se_acl, 1);
877 kfree(nacl);
878}
879
880/* Start items for tcm_qla2xxx_tpg_attrib_cit */
881
882#define DEF_QLA_TPG_ATTRIB(name) \
883 \
884static ssize_t tcm_qla2xxx_tpg_attrib_show_##name( \
885 struct se_portal_group *se_tpg, \
886 char *page) \
887{ \
888 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
889 struct tcm_qla2xxx_tpg, se_tpg); \
890 \
891 return sprintf(page, "%u\n", QLA_TPG_ATTRIB(tpg)->name); \
892} \
893 \
894static ssize_t tcm_qla2xxx_tpg_attrib_store_##name( \
895 struct se_portal_group *se_tpg, \
896 const char *page, \
897 size_t count) \
898{ \
899 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg, \
900 struct tcm_qla2xxx_tpg, se_tpg); \
901 unsigned long val; \
902 int ret; \
903 \
904 ret = kstrtoul(page, 0, &val); \
905 if (ret < 0) { \
906 pr_err("kstrtoul() failed with" \
907 " ret: %d\n", ret); \
908 return -EINVAL; \
909 } \
910 ret = tcm_qla2xxx_set_attrib_##name(tpg, val); \
911 \
912 return (!ret) ? count : -EINVAL; \
913}
914
915#define DEF_QLA_TPG_ATTR_BOOL(_name) \
916 \
917static int tcm_qla2xxx_set_attrib_##_name( \
918 struct tcm_qla2xxx_tpg *tpg, \
919 unsigned long val) \
920{ \
921 struct tcm_qla2xxx_tpg_attrib *a = &tpg->tpg_attrib; \
922 \
923 if ((val != 0) && (val != 1)) { \
924 pr_err("Illegal boolean value %lu\n", val); \
925 return -EINVAL; \
926 } \
927 \
928 a->_name = val; \
929 return 0; \
930}
931
932#define QLA_TPG_ATTR(_name, _mode) \
933 TF_TPG_ATTRIB_ATTR(tcm_qla2xxx, _name, _mode);
934
935/*
936 * Define tcm_qla2xxx_tpg_attrib_s_generate_node_acls
937 */
938DEF_QLA_TPG_ATTR_BOOL(generate_node_acls);
939DEF_QLA_TPG_ATTRIB(generate_node_acls);
940QLA_TPG_ATTR(generate_node_acls, S_IRUGO | S_IWUSR);
941
942/*
943 Define tcm_qla2xxx_attrib_s_cache_dynamic_acls
944 */
945DEF_QLA_TPG_ATTR_BOOL(cache_dynamic_acls);
946DEF_QLA_TPG_ATTRIB(cache_dynamic_acls);
947QLA_TPG_ATTR(cache_dynamic_acls, S_IRUGO | S_IWUSR);
948
949/*
950 * Define tcm_qla2xxx_tpg_attrib_s_demo_mode_write_protect
951 */
952DEF_QLA_TPG_ATTR_BOOL(demo_mode_write_protect);
953DEF_QLA_TPG_ATTRIB(demo_mode_write_protect);
954QLA_TPG_ATTR(demo_mode_write_protect, S_IRUGO | S_IWUSR);
955
956/*
957 * Define tcm_qla2xxx_tpg_attrib_s_prod_mode_write_protect
958 */
959DEF_QLA_TPG_ATTR_BOOL(prod_mode_write_protect);
960DEF_QLA_TPG_ATTRIB(prod_mode_write_protect);
961QLA_TPG_ATTR(prod_mode_write_protect, S_IRUGO | S_IWUSR);
962
963static struct configfs_attribute *tcm_qla2xxx_tpg_attrib_attrs[] = {
964 &tcm_qla2xxx_tpg_attrib_generate_node_acls.attr,
965 &tcm_qla2xxx_tpg_attrib_cache_dynamic_acls.attr,
966 &tcm_qla2xxx_tpg_attrib_demo_mode_write_protect.attr,
967 &tcm_qla2xxx_tpg_attrib_prod_mode_write_protect.attr,
968 NULL,
969};
970
971/* End items for tcm_qla2xxx_tpg_attrib_cit */
972
973static ssize_t tcm_qla2xxx_tpg_show_enable(
974 struct se_portal_group *se_tpg,
975 char *page)
976{
977 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
978 struct tcm_qla2xxx_tpg, se_tpg);
979
980 return snprintf(page, PAGE_SIZE, "%d\n",
981 atomic_read(&tpg->lport_tpg_enabled));
982}
983
984static ssize_t tcm_qla2xxx_tpg_store_enable(
985 struct se_portal_group *se_tpg,
986 const char *page,
987 size_t count)
988{
989 struct se_wwn *se_wwn = se_tpg->se_tpg_wwn;
990 struct tcm_qla2xxx_lport *lport = container_of(se_wwn,
991 struct tcm_qla2xxx_lport, lport_wwn);
992 struct scsi_qla_host *vha = lport->qla_vha;
993 struct qla_hw_data *ha = vha->hw;
994 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
995 struct tcm_qla2xxx_tpg, se_tpg);
996 unsigned long op;
997 int rc;
998
999 rc = kstrtoul(page, 0, &op);
1000 if (rc < 0) {
1001 pr_err("kstrtoul() returned %d\n", rc);
1002 return -EINVAL;
1003 }
1004 if ((op != 1) && (op != 0)) {
1005 pr_err("Illegal value for tpg_enable: %lu\n", op);
1006 return -EINVAL;
1007 }
1008
1009 if (op) {
1010 atomic_set(&tpg->lport_tpg_enabled, 1);
1011 qlt_enable_vha(vha);
1012 } else {
1013 if (!ha->tgt.qla_tgt) {
1014 pr_err("truct qla_hw_data *ha->tgt.qla_tgt is NULL\n");
1015 return -ENODEV;
1016 }
1017 atomic_set(&tpg->lport_tpg_enabled, 0);
1018 qlt_stop_phase1(ha->tgt.qla_tgt);
1019 }
1020
1021 return count;
1022}
1023
1024TF_TPG_BASE_ATTR(tcm_qla2xxx, enable, S_IRUGO | S_IWUSR);
1025
1026static struct configfs_attribute *tcm_qla2xxx_tpg_attrs[] = {
1027 &tcm_qla2xxx_tpg_enable.attr,
1028 NULL,
1029};
1030
1031static struct se_portal_group *tcm_qla2xxx_make_tpg(
1032 struct se_wwn *wwn,
1033 struct config_group *group,
1034 const char *name)
1035{
1036 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1037 struct tcm_qla2xxx_lport, lport_wwn);
1038 struct tcm_qla2xxx_tpg *tpg;
1039 unsigned long tpgt;
1040 int ret;
1041
1042 if (strstr(name, "tpgt_") != name)
1043 return ERR_PTR(-EINVAL);
1044 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1045 return ERR_PTR(-EINVAL);
1046
1047 if (!lport->qla_npiv_vp && (tpgt != 1)) {
1048 pr_err("In non NPIV mode, a single TPG=1 is used for HW port mappings\n");
1049 return ERR_PTR(-ENOSYS);
1050 }
1051
1052 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1053 if (!tpg) {
1054 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1055 return ERR_PTR(-ENOMEM);
1056 }
1057 tpg->lport = lport;
1058 tpg->lport_tpgt = tpgt;
1059 /*
1060 * By default allow READ-ONLY TPG demo-mode access w/ cached dynamic
1061 * NodeACLs
1062 */
1063 QLA_TPG_ATTRIB(tpg)->generate_node_acls = 1;
1064 QLA_TPG_ATTRIB(tpg)->demo_mode_write_protect = 1;
1065 QLA_TPG_ATTRIB(tpg)->cache_dynamic_acls = 1;
1066
1067 ret = core_tpg_register(&tcm_qla2xxx_fabric_configfs->tf_ops, wwn,
1068 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1069 if (ret < 0) {
1070 kfree(tpg);
1071 return NULL;
1072 }
1073 /*
1074 * Setup local TPG=1 pointer for non NPIV mode.
1075 */
1076 if (lport->qla_npiv_vp == NULL)
1077 lport->tpg_1 = tpg;
1078
1079 return &tpg->se_tpg;
1080}
1081
1082static void tcm_qla2xxx_drop_tpg(struct se_portal_group *se_tpg)
1083{
1084 struct tcm_qla2xxx_tpg *tpg = container_of(se_tpg,
1085 struct tcm_qla2xxx_tpg, se_tpg);
1086 struct tcm_qla2xxx_lport *lport = tpg->lport;
1087 struct scsi_qla_host *vha = lport->qla_vha;
1088 struct qla_hw_data *ha = vha->hw;
1089 /*
1090 * Call into qla2x_target.c LLD logic to shutdown the active
1091 * FC Nexuses and disable target mode operation for this qla_hw_data
1092 */
1093 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stop)
1094 qlt_stop_phase1(ha->tgt.qla_tgt);
1095
1096 core_tpg_deregister(se_tpg);
1097 /*
1098 * Clear local TPG=1 pointer for non NPIV mode.
1099 */
1100 if (lport->qla_npiv_vp == NULL)
1101 lport->tpg_1 = NULL;
1102
1103 kfree(tpg);
1104}
1105
1106static struct se_portal_group *tcm_qla2xxx_npiv_make_tpg(
1107 struct se_wwn *wwn,
1108 struct config_group *group,
1109 const char *name)
1110{
1111 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1112 struct tcm_qla2xxx_lport, lport_wwn);
1113 struct tcm_qla2xxx_tpg *tpg;
1114 unsigned long tpgt;
1115 int ret;
1116
1117 if (strstr(name, "tpgt_") != name)
1118 return ERR_PTR(-EINVAL);
1119 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > USHRT_MAX)
1120 return ERR_PTR(-EINVAL);
1121
1122 tpg = kzalloc(sizeof(struct tcm_qla2xxx_tpg), GFP_KERNEL);
1123 if (!tpg) {
1124 pr_err("Unable to allocate struct tcm_qla2xxx_tpg\n");
1125 return ERR_PTR(-ENOMEM);
1126 }
1127 tpg->lport = lport;
1128 tpg->lport_tpgt = tpgt;
1129
1130 ret = core_tpg_register(&tcm_qla2xxx_npiv_fabric_configfs->tf_ops, wwn,
1131 &tpg->se_tpg, tpg, TRANSPORT_TPG_TYPE_NORMAL);
1132 if (ret < 0) {
1133 kfree(tpg);
1134 return NULL;
1135 }
1136 return &tpg->se_tpg;
1137}
1138
1139/*
1140 * Expected to be called with struct qla_hw_data->hardware_lock held
1141 */
1142static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_s_id(
1143 scsi_qla_host_t *vha,
1144 const uint8_t *s_id)
1145{
1146 struct qla_hw_data *ha = vha->hw;
1147 struct tcm_qla2xxx_lport *lport;
1148 struct se_node_acl *se_nacl;
1149 struct tcm_qla2xxx_nacl *nacl;
1150 u32 key;
1151
1152 lport = ha->tgt.target_lport_ptr;
1153 if (!lport) {
1154 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1155 dump_stack();
1156 return NULL;
1157 }
1158
1159 key = (((unsigned long)s_id[0] << 16) |
1160 ((unsigned long)s_id[1] << 8) |
1161 (unsigned long)s_id[2]);
1162 pr_debug("find_sess_by_s_id: 0x%06x\n", key);
1163
1164 se_nacl = btree_lookup32(&lport->lport_fcport_map, key);
1165 if (!se_nacl) {
1166 pr_debug("Unable to locate s_id: 0x%06x\n", key);
1167 return NULL;
1168 }
1169 pr_debug("find_sess_by_s_id: located se_nacl: %p, initiatorname: %s\n",
1170 se_nacl, se_nacl->initiatorname);
1171
1172 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1173 if (!nacl->qla_tgt_sess) {
1174 pr_err("Unable to locate struct qla_tgt_sess\n");
1175 return NULL;
1176 }
1177
1178 return nacl->qla_tgt_sess;
1179}
1180
1181/*
1182 * Expected to be called with struct qla_hw_data->hardware_lock held
1183 */
1184static void tcm_qla2xxx_set_sess_by_s_id(
1185 struct tcm_qla2xxx_lport *lport,
1186 struct se_node_acl *new_se_nacl,
1187 struct tcm_qla2xxx_nacl *nacl,
1188 struct se_session *se_sess,
1189 struct qla_tgt_sess *qla_tgt_sess,
1190 uint8_t *s_id)
1191{
1192 u32 key;
1193 void *slot;
1194 int rc;
1195
1196 key = (((unsigned long)s_id[0] << 16) |
1197 ((unsigned long)s_id[1] << 8) |
1198 (unsigned long)s_id[2]);
1199 pr_debug("set_sess_by_s_id: %06x\n", key);
1200
1201 slot = btree_lookup32(&lport->lport_fcport_map, key);
1202 if (!slot) {
1203 if (new_se_nacl) {
1204 pr_debug("Setting up new fc_port entry to new_se_nacl\n");
1205 nacl->nport_id = key;
1206 rc = btree_insert32(&lport->lport_fcport_map, key,
1207 new_se_nacl, GFP_ATOMIC);
1208 if (rc)
1209 printk(KERN_ERR "Unable to insert s_id into fcport_map: %06x\n",
1210 (int)key);
1211 } else {
1212 pr_debug("Wiping nonexisting fc_port entry\n");
1213 }
1214
1215 qla_tgt_sess->se_sess = se_sess;
1216 nacl->qla_tgt_sess = qla_tgt_sess;
1217 return;
1218 }
1219
1220 if (nacl->qla_tgt_sess) {
1221 if (new_se_nacl == NULL) {
1222 pr_debug("Clearing existing nacl->qla_tgt_sess and fc_port entry\n");
1223 btree_remove32(&lport->lport_fcport_map, key);
1224 nacl->qla_tgt_sess = NULL;
1225 return;
1226 }
1227 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_port entry\n");
1228 btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1229 qla_tgt_sess->se_sess = se_sess;
1230 nacl->qla_tgt_sess = qla_tgt_sess;
1231 return;
1232 }
1233
1234 if (new_se_nacl == NULL) {
1235 pr_debug("Clearing existing fc_port entry\n");
1236 btree_remove32(&lport->lport_fcport_map, key);
1237 return;
1238 }
1239
1240 pr_debug("Replacing existing fc_port entry w/o active nacl->qla_tgt_sess\n");
1241 btree_update32(&lport->lport_fcport_map, key, new_se_nacl);
1242 qla_tgt_sess->se_sess = se_sess;
1243 nacl->qla_tgt_sess = qla_tgt_sess;
1244
1245 pr_debug("Setup nacl->qla_tgt_sess %p by s_id for se_nacl: %p, initiatorname: %s\n",
1246 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1247}
1248
1249/*
1250 * Expected to be called with struct qla_hw_data->hardware_lock held
1251 */
1252static struct qla_tgt_sess *tcm_qla2xxx_find_sess_by_loop_id(
1253 scsi_qla_host_t *vha,
1254 const uint16_t loop_id)
1255{
1256 struct qla_hw_data *ha = vha->hw;
1257 struct tcm_qla2xxx_lport *lport;
1258 struct se_node_acl *se_nacl;
1259 struct tcm_qla2xxx_nacl *nacl;
1260 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1261
1262 lport = ha->tgt.target_lport_ptr;
1263 if (!lport) {
1264 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1265 dump_stack();
1266 return NULL;
1267 }
1268
1269 pr_debug("find_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1270
1271 fc_loopid = lport->lport_loopid_map + loop_id;
1272 se_nacl = fc_loopid->se_nacl;
1273 if (!se_nacl) {
1274 pr_debug("Unable to locate se_nacl by loop_id: 0x%04x\n",
1275 loop_id);
1276 return NULL;
1277 }
1278
1279 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1280
1281 if (!nacl->qla_tgt_sess) {
1282 pr_err("Unable to locate struct qla_tgt_sess\n");
1283 return NULL;
1284 }
1285
1286 return nacl->qla_tgt_sess;
1287}
1288
1289/*
1290 * Expected to be called with struct qla_hw_data->hardware_lock held
1291 */
1292static void tcm_qla2xxx_set_sess_by_loop_id(
1293 struct tcm_qla2xxx_lport *lport,
1294 struct se_node_acl *new_se_nacl,
1295 struct tcm_qla2xxx_nacl *nacl,
1296 struct se_session *se_sess,
1297 struct qla_tgt_sess *qla_tgt_sess,
1298 uint16_t loop_id)
1299{
1300 struct se_node_acl *saved_nacl;
1301 struct tcm_qla2xxx_fc_loopid *fc_loopid;
1302
1303 pr_debug("set_sess_by_loop_id: Using loop_id: 0x%04x\n", loop_id);
1304
1305 fc_loopid = &((struct tcm_qla2xxx_fc_loopid *)
1306 lport->lport_loopid_map)[loop_id];
1307
1308 saved_nacl = fc_loopid->se_nacl;
1309 if (!saved_nacl) {
1310 pr_debug("Setting up new fc_loopid->se_nacl to new_se_nacl\n");
1311 fc_loopid->se_nacl = new_se_nacl;
1312 if (qla_tgt_sess->se_sess != se_sess)
1313 qla_tgt_sess->se_sess = se_sess;
1314 if (nacl->qla_tgt_sess != qla_tgt_sess)
1315 nacl->qla_tgt_sess = qla_tgt_sess;
1316 return;
1317 }
1318
1319 if (nacl->qla_tgt_sess) {
1320 if (new_se_nacl == NULL) {
1321 pr_debug("Clearing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1322 fc_loopid->se_nacl = NULL;
1323 nacl->qla_tgt_sess = NULL;
1324 return;
1325 }
1326
1327 pr_debug("Replacing existing nacl->qla_tgt_sess and fc_loopid->se_nacl\n");
1328 fc_loopid->se_nacl = new_se_nacl;
1329 if (qla_tgt_sess->se_sess != se_sess)
1330 qla_tgt_sess->se_sess = se_sess;
1331 if (nacl->qla_tgt_sess != qla_tgt_sess)
1332 nacl->qla_tgt_sess = qla_tgt_sess;
1333 return;
1334 }
1335
1336 if (new_se_nacl == NULL) {
1337 pr_debug("Clearing fc_loopid->se_nacl\n");
1338 fc_loopid->se_nacl = NULL;
1339 return;
1340 }
1341
1342 pr_debug("Replacing existing fc_loopid->se_nacl w/o active nacl->qla_tgt_sess\n");
1343 fc_loopid->se_nacl = new_se_nacl;
1344 if (qla_tgt_sess->se_sess != se_sess)
1345 qla_tgt_sess->se_sess = se_sess;
1346 if (nacl->qla_tgt_sess != qla_tgt_sess)
1347 nacl->qla_tgt_sess = qla_tgt_sess;
1348
1349 pr_debug("Setup nacl->qla_tgt_sess %p by loop_id for se_nacl: %p, initiatorname: %s\n",
1350 nacl->qla_tgt_sess, new_se_nacl, new_se_nacl->initiatorname);
1351}
1352
1353/*
1354 * Should always be called with qla_hw_data->hardware_lock held.
1355 */
1356static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *lport,
1357 struct tcm_qla2xxx_nacl *nacl, struct qla_tgt_sess *sess)
1358{
1359 struct se_session *se_sess = sess->se_sess;
1360 unsigned char be_sid[3];
1361
1362 be_sid[0] = sess->s_id.b.domain;
1363 be_sid[1] = sess->s_id.b.area;
1364 be_sid[2] = sess->s_id.b.al_pa;
1365
1366 tcm_qla2xxx_set_sess_by_s_id(lport, NULL, nacl, se_sess,
1367 sess, be_sid);
1368 tcm_qla2xxx_set_sess_by_loop_id(lport, NULL, nacl, se_sess,
1369 sess, sess->loop_id);
1370}
1371
1372static void tcm_qla2xxx_free_session(struct qla_tgt_sess *sess)
1373{
1374 struct qla_tgt *tgt = sess->tgt;
1375 struct qla_hw_data *ha = tgt->ha;
1376 struct se_session *se_sess;
1377 struct se_node_acl *se_nacl;
1378 struct tcm_qla2xxx_lport *lport;
1379 struct tcm_qla2xxx_nacl *nacl;
1380
1381 BUG_ON(in_interrupt());
1382
1383 se_sess = sess->se_sess;
1384 if (!se_sess) {
1385 pr_err("struct qla_tgt_sess->se_sess is NULL\n");
1386 dump_stack();
1387 return;
1388 }
1389 se_nacl = se_sess->se_node_acl;
1390 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1391
1392 lport = ha->tgt.target_lport_ptr;
1393 if (!lport) {
1394 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1395 dump_stack();
1396 return;
1397 }
1398 target_wait_for_sess_cmds(se_sess, 0);
1399
1400 transport_deregister_session_configfs(sess->se_sess);
1401 transport_deregister_session(sess->se_sess);
1402}
1403
1404/*
1405 * Called via qlt_create_sess():ha->qla2x_tmpl->check_initiator_node_acl()
1406 * to locate struct se_node_acl
1407 */
1408static int tcm_qla2xxx_check_initiator_node_acl(
1409 scsi_qla_host_t *vha,
1410 unsigned char *fc_wwpn,
1411 void *qla_tgt_sess,
1412 uint8_t *s_id,
1413 uint16_t loop_id)
1414{
1415 struct qla_hw_data *ha = vha->hw;
1416 struct tcm_qla2xxx_lport *lport;
1417 struct tcm_qla2xxx_tpg *tpg;
1418 struct tcm_qla2xxx_nacl *nacl;
1419 struct se_portal_group *se_tpg;
1420 struct se_node_acl *se_nacl;
1421 struct se_session *se_sess;
1422 struct qla_tgt_sess *sess = qla_tgt_sess;
1423 unsigned char port_name[36];
1424 unsigned long flags;
1425
1426 lport = ha->tgt.target_lport_ptr;
1427 if (!lport) {
1428 pr_err("Unable to locate struct tcm_qla2xxx_lport\n");
1429 dump_stack();
1430 return -EINVAL;
1431 }
1432 /*
1433 * Locate the TPG=1 reference..
1434 */
1435 tpg = lport->tpg_1;
1436 if (!tpg) {
1437 pr_err("Unable to lcoate struct tcm_qla2xxx_lport->tpg_1\n");
1438 return -EINVAL;
1439 }
1440 se_tpg = &tpg->se_tpg;
1441
1442 se_sess = transport_init_session();
1443 if (IS_ERR(se_sess)) {
1444 pr_err("Unable to initialize struct se_session\n");
1445 return PTR_ERR(se_sess);
1446 }
1447 /*
1448 * Format the FCP Initiator port_name into colon seperated values to
1449 * match the format by tcm_qla2xxx explict ConfigFS NodeACLs.
1450 */
1451 memset(&port_name, 0, 36);
1452 snprintf(port_name, 36, "%02x:%02x:%02x:%02x:%02x:%02x:%02x:%02x",
1453 fc_wwpn[0], fc_wwpn[1], fc_wwpn[2], fc_wwpn[3], fc_wwpn[4],
1454 fc_wwpn[5], fc_wwpn[6], fc_wwpn[7]);
1455 /*
1456 * Locate our struct se_node_acl either from an explict NodeACL created
1457 * via ConfigFS, or via running in TPG demo mode.
1458 */
1459 se_sess->se_node_acl = core_tpg_check_initiator_node_acl(se_tpg,
1460 port_name);
1461 if (!se_sess->se_node_acl) {
1462 transport_free_session(se_sess);
1463 return -EINVAL;
1464 }
1465 se_nacl = se_sess->se_node_acl;
1466 nacl = container_of(se_nacl, struct tcm_qla2xxx_nacl, se_node_acl);
1467 /*
1468 * And now setup the new se_nacl and session pointers into our HW lport
1469 * mappings for fabric S_ID and LOOP_ID.
1470 */
1471 spin_lock_irqsave(&ha->hardware_lock, flags);
1472 tcm_qla2xxx_set_sess_by_s_id(lport, se_nacl, nacl, se_sess,
1473 qla_tgt_sess, s_id);
1474 tcm_qla2xxx_set_sess_by_loop_id(lport, se_nacl, nacl, se_sess,
1475 qla_tgt_sess, loop_id);
1476 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1477 /*
1478 * Finally register the new FC Nexus with TCM
1479 */
1480 __transport_register_session(se_nacl->se_tpg, se_nacl, se_sess, sess);
1481
1482 return 0;
1483}
1484
1485/*
1486 * Calls into tcm_qla2xxx used by qla2xxx LLD I/O path.
1487 */
1488static struct qla_tgt_func_tmpl tcm_qla2xxx_template = {
1489 .handle_cmd = tcm_qla2xxx_handle_cmd,
1490 .handle_data = tcm_qla2xxx_handle_data,
1491 .handle_tmr = tcm_qla2xxx_handle_tmr,
1492 .free_cmd = tcm_qla2xxx_free_cmd,
1493 .free_mcmd = tcm_qla2xxx_free_mcmd,
1494 .free_session = tcm_qla2xxx_free_session,
1495 .check_initiator_node_acl = tcm_qla2xxx_check_initiator_node_acl,
1496 .find_sess_by_s_id = tcm_qla2xxx_find_sess_by_s_id,
1497 .find_sess_by_loop_id = tcm_qla2xxx_find_sess_by_loop_id,
1498 .clear_nacl_from_fcport_map = tcm_qla2xxx_clear_nacl_from_fcport_map,
1499 .put_sess = tcm_qla2xxx_put_sess,
1500 .shutdown_sess = tcm_qla2xxx_shutdown_sess,
1501};
1502
1503static int tcm_qla2xxx_init_lport(struct tcm_qla2xxx_lport *lport)
1504{
1505 int rc;
1506
1507 rc = btree_init32(&lport->lport_fcport_map);
1508 if (rc) {
1509 pr_err("Unable to initialize lport->lport_fcport_map btree\n");
1510 return rc;
1511 }
1512
1513 lport->lport_loopid_map = vmalloc(sizeof(struct tcm_qla2xxx_fc_loopid) *
1514 65536);
1515 if (!lport->lport_loopid_map) {
1516 pr_err("Unable to allocate lport->lport_loopid_map of %zu bytes\n",
1517 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1518 btree_destroy32(&lport->lport_fcport_map);
1519 return -ENOMEM;
1520 }
1521 memset(lport->lport_loopid_map, 0, sizeof(struct tcm_qla2xxx_fc_loopid)
1522 * 65536);
1523 pr_debug("qla2xxx: Allocated lport_loopid_map of %zu bytes\n",
1524 sizeof(struct tcm_qla2xxx_fc_loopid) * 65536);
1525 return 0;
1526}
1527
1528static int tcm_qla2xxx_lport_register_cb(struct scsi_qla_host *vha)
1529{
1530 struct qla_hw_data *ha = vha->hw;
1531 struct tcm_qla2xxx_lport *lport;
1532 /*
1533 * Setup local pointer to vha, NPIV VP pointer (if present) and
1534 * vha->tcm_lport pointer
1535 */
1536 lport = (struct tcm_qla2xxx_lport *)ha->tgt.target_lport_ptr;
1537 lport->qla_vha = vha;
1538
1539 return 0;
1540}
1541
1542static struct se_wwn *tcm_qla2xxx_make_lport(
1543 struct target_fabric_configfs *tf,
1544 struct config_group *group,
1545 const char *name)
1546{
1547 struct tcm_qla2xxx_lport *lport;
1548 u64 wwpn;
1549 int ret = -ENODEV;
1550
1551 if (tcm_qla2xxx_parse_wwn(name, &wwpn, 1) < 0)
1552 return ERR_PTR(-EINVAL);
1553
1554 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1555 if (!lport) {
1556 pr_err("Unable to allocate struct tcm_qla2xxx_lport\n");
1557 return ERR_PTR(-ENOMEM);
1558 }
1559 lport->lport_wwpn = wwpn;
1560 tcm_qla2xxx_format_wwn(&lport->lport_name[0], TCM_QLA2XXX_NAMELEN,
1561 wwpn);
1562
1563 ret = tcm_qla2xxx_init_lport(lport);
1564 if (ret != 0)
1565 goto out;
1566
1567 ret = qlt_lport_register(&tcm_qla2xxx_template, wwpn,
1568 tcm_qla2xxx_lport_register_cb, lport);
1569 if (ret != 0)
1570 goto out_lport;
1571
1572 return &lport->lport_wwn;
1573out_lport:
1574 vfree(lport->lport_loopid_map);
1575 btree_destroy32(&lport->lport_fcport_map);
1576out:
1577 kfree(lport);
1578 return ERR_PTR(ret);
1579}
1580
1581static void tcm_qla2xxx_drop_lport(struct se_wwn *wwn)
1582{
1583 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1584 struct tcm_qla2xxx_lport, lport_wwn);
1585 struct scsi_qla_host *vha = lport->qla_vha;
1586 struct qla_hw_data *ha = vha->hw;
1587 struct se_node_acl *node;
1588 u32 key = 0;
1589
1590 /*
1591 * Call into qla2x_target.c LLD logic to complete the
1592 * shutdown of struct qla_tgt after the call to
1593 * qlt_stop_phase1() from tcm_qla2xxx_drop_tpg() above..
1594 */
1595 if (ha->tgt.qla_tgt && !ha->tgt.qla_tgt->tgt_stopped)
1596 qlt_stop_phase2(ha->tgt.qla_tgt);
1597
1598 qlt_lport_deregister(vha);
1599
1600 vfree(lport->lport_loopid_map);
1601 btree_for_each_safe32(&lport->lport_fcport_map, key, node)
1602 btree_remove32(&lport->lport_fcport_map, key);
1603 btree_destroy32(&lport->lport_fcport_map);
1604 kfree(lport);
1605}
1606
1607static struct se_wwn *tcm_qla2xxx_npiv_make_lport(
1608 struct target_fabric_configfs *tf,
1609 struct config_group *group,
1610 const char *name)
1611{
1612 struct tcm_qla2xxx_lport *lport;
1613 u64 npiv_wwpn, npiv_wwnn;
1614 int ret;
1615
1616 if (tcm_qla2xxx_npiv_parse_wwn(name, strlen(name)+1,
1617 &npiv_wwpn, &npiv_wwnn) < 0)
1618 return ERR_PTR(-EINVAL);
1619
1620 lport = kzalloc(sizeof(struct tcm_qla2xxx_lport), GFP_KERNEL);
1621 if (!lport) {
1622 pr_err("Unable to allocate struct tcm_qla2xxx_lport for NPIV\n");
1623 return ERR_PTR(-ENOMEM);
1624 }
1625 lport->lport_npiv_wwpn = npiv_wwpn;
1626 lport->lport_npiv_wwnn = npiv_wwnn;
1627 tcm_qla2xxx_npiv_format_wwn(&lport->lport_npiv_name[0],
1628 TCM_QLA2XXX_NAMELEN, npiv_wwpn, npiv_wwnn);
1629
1630/* FIXME: tcm_qla2xxx_npiv_make_lport */
1631 ret = -ENOSYS;
1632 if (ret != 0)
1633 goto out;
1634
1635 return &lport->lport_wwn;
1636out:
1637 kfree(lport);
1638 return ERR_PTR(ret);
1639}
1640
1641static void tcm_qla2xxx_npiv_drop_lport(struct se_wwn *wwn)
1642{
1643 struct tcm_qla2xxx_lport *lport = container_of(wwn,
1644 struct tcm_qla2xxx_lport, lport_wwn);
1645 struct scsi_qla_host *vha = lport->qla_vha;
1646 struct Scsi_Host *sh = vha->host;
1647 /*
1648 * Notify libfc that we want to release the lport->npiv_vport
1649 */
1650 fc_vport_terminate(lport->npiv_vport);
1651
1652 scsi_host_put(sh);
1653 kfree(lport);
1654}
1655
1656
1657static ssize_t tcm_qla2xxx_wwn_show_attr_version(
1658 struct target_fabric_configfs *tf,
1659 char *page)
1660{
1661 return sprintf(page,
1662 "TCM QLOGIC QLA2XXX NPIV capable fabric module %s on %s/%s on "
1663 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1664 utsname()->machine);
1665}
1666
1667TF_WWN_ATTR_RO(tcm_qla2xxx, version);
1668
1669static struct configfs_attribute *tcm_qla2xxx_wwn_attrs[] = {
1670 &tcm_qla2xxx_wwn_version.attr,
1671 NULL,
1672};
1673
1674static struct target_core_fabric_ops tcm_qla2xxx_ops = {
1675 .get_fabric_name = tcm_qla2xxx_get_fabric_name,
1676 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
1677 .tpg_get_wwn = tcm_qla2xxx_get_fabric_wwn,
1678 .tpg_get_tag = tcm_qla2xxx_get_tag,
1679 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
1680 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
1681 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
1682 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
1683 .tpg_check_demo_mode = tcm_qla2xxx_check_demo_mode,
1684 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_demo_mode_cache,
1685 .tpg_check_demo_mode_write_protect =
1686 tcm_qla2xxx_check_demo_write_protect,
1687 .tpg_check_prod_mode_write_protect =
1688 tcm_qla2xxx_check_prod_write_protect,
1689 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
1690 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1691 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1692 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1693 .new_cmd_map = NULL,
1694 .check_stop_free = tcm_qla2xxx_check_stop_free,
1695 .release_cmd = tcm_qla2xxx_release_cmd,
1696 .put_session = tcm_qla2xxx_put_session,
1697 .shutdown_session = tcm_qla2xxx_shutdown_session,
1698 .close_session = tcm_qla2xxx_close_session,
1699 .sess_get_index = tcm_qla2xxx_sess_get_index,
1700 .sess_get_initiator_sid = NULL,
1701 .write_pending = tcm_qla2xxx_write_pending,
1702 .write_pending_status = tcm_qla2xxx_write_pending_status,
1703 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
1704 .get_task_tag = tcm_qla2xxx_get_task_tag,
1705 .get_cmd_state = tcm_qla2xxx_get_cmd_state,
1706 .queue_data_in = tcm_qla2xxx_queue_data_in,
1707 .queue_status = tcm_qla2xxx_queue_status,
1708 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1709 .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
1710 .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
1711 /*
1712 * Setup function pointers for generic logic in
1713 * target_core_fabric_configfs.c
1714 */
1715 .fabric_make_wwn = tcm_qla2xxx_make_lport,
1716 .fabric_drop_wwn = tcm_qla2xxx_drop_lport,
1717 .fabric_make_tpg = tcm_qla2xxx_make_tpg,
1718 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
1719 .fabric_post_link = NULL,
1720 .fabric_pre_unlink = NULL,
1721 .fabric_make_np = NULL,
1722 .fabric_drop_np = NULL,
1723 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
1724 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
1725};
1726
1727static struct target_core_fabric_ops tcm_qla2xxx_npiv_ops = {
1728 .get_fabric_name = tcm_qla2xxx_npiv_get_fabric_name,
1729 .get_fabric_proto_ident = tcm_qla2xxx_get_fabric_proto_ident,
1730 .tpg_get_wwn = tcm_qla2xxx_npiv_get_fabric_wwn,
1731 .tpg_get_tag = tcm_qla2xxx_get_tag,
1732 .tpg_get_default_depth = tcm_qla2xxx_get_default_depth,
1733 .tpg_get_pr_transport_id = tcm_qla2xxx_get_pr_transport_id,
1734 .tpg_get_pr_transport_id_len = tcm_qla2xxx_get_pr_transport_id_len,
1735 .tpg_parse_pr_out_transport_id = tcm_qla2xxx_parse_pr_out_transport_id,
1736 .tpg_check_demo_mode = tcm_qla2xxx_check_false,
1737 .tpg_check_demo_mode_cache = tcm_qla2xxx_check_true,
1738 .tpg_check_demo_mode_write_protect = tcm_qla2xxx_check_true,
1739 .tpg_check_prod_mode_write_protect = tcm_qla2xxx_check_false,
1740 .tpg_check_demo_mode_login_only = tcm_qla2xxx_check_true,
1741 .tpg_alloc_fabric_acl = tcm_qla2xxx_alloc_fabric_acl,
1742 .tpg_release_fabric_acl = tcm_qla2xxx_release_fabric_acl,
1743 .tpg_get_inst_index = tcm_qla2xxx_tpg_get_inst_index,
1744 .release_cmd = tcm_qla2xxx_release_cmd,
1745 .put_session = tcm_qla2xxx_put_session,
1746 .shutdown_session = tcm_qla2xxx_shutdown_session,
1747 .close_session = tcm_qla2xxx_close_session,
1748 .sess_get_index = tcm_qla2xxx_sess_get_index,
1749 .sess_get_initiator_sid = NULL,
1750 .write_pending = tcm_qla2xxx_write_pending,
1751 .write_pending_status = tcm_qla2xxx_write_pending_status,
1752 .set_default_node_attributes = tcm_qla2xxx_set_default_node_attrs,
1753 .get_task_tag = tcm_qla2xxx_get_task_tag,
1754 .get_cmd_state = tcm_qla2xxx_get_cmd_state,
1755 .queue_data_in = tcm_qla2xxx_queue_data_in,
1756 .queue_status = tcm_qla2xxx_queue_status,
1757 .queue_tm_rsp = tcm_qla2xxx_queue_tm_rsp,
1758 .get_fabric_sense_len = tcm_qla2xxx_get_fabric_sense_len,
1759 .set_fabric_sense_len = tcm_qla2xxx_set_fabric_sense_len,
1760 /*
1761 * Setup function pointers for generic logic in
1762 * target_core_fabric_configfs.c
1763 */
1764 .fabric_make_wwn = tcm_qla2xxx_npiv_make_lport,
1765 .fabric_drop_wwn = tcm_qla2xxx_npiv_drop_lport,
1766 .fabric_make_tpg = tcm_qla2xxx_npiv_make_tpg,
1767 .fabric_drop_tpg = tcm_qla2xxx_drop_tpg,
1768 .fabric_post_link = NULL,
1769 .fabric_pre_unlink = NULL,
1770 .fabric_make_np = NULL,
1771 .fabric_drop_np = NULL,
1772 .fabric_make_nodeacl = tcm_qla2xxx_make_nodeacl,
1773 .fabric_drop_nodeacl = tcm_qla2xxx_drop_nodeacl,
1774};
1775
1776static int tcm_qla2xxx_register_configfs(void)
1777{
1778 struct target_fabric_configfs *fabric, *npiv_fabric;
1779 int ret;
1780
1781 pr_debug("TCM QLOGIC QLA2XXX fabric module %s on %s/%s on "
1782 UTS_RELEASE"\n", TCM_QLA2XXX_VERSION, utsname()->sysname,
1783 utsname()->machine);
1784 /*
1785 * Register the top level struct config_item_type with TCM core
1786 */
1787 fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx");
1788 if (IS_ERR(fabric)) {
1789 pr_err("target_fabric_configfs_init() failed\n");
1790 return PTR_ERR(fabric);
1791 }
1792 /*
1793 * Setup fabric->tf_ops from our local tcm_qla2xxx_ops
1794 */
1795 fabric->tf_ops = tcm_qla2xxx_ops;
1796 /*
1797 * Setup default attribute lists for various fabric->tf_cit_tmpl
1798 */
1799 TF_CIT_TMPL(fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1800 TF_CIT_TMPL(fabric)->tfc_tpg_base_cit.ct_attrs = tcm_qla2xxx_tpg_attrs;
1801 TF_CIT_TMPL(fabric)->tfc_tpg_attrib_cit.ct_attrs =
1802 tcm_qla2xxx_tpg_attrib_attrs;
1803 TF_CIT_TMPL(fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1804 TF_CIT_TMPL(fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1805 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1806 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1807 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1808 TF_CIT_TMPL(fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1809 /*
1810 * Register the fabric for use within TCM
1811 */
1812 ret = target_fabric_configfs_register(fabric);
1813 if (ret < 0) {
1814 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
1815 return ret;
1816 }
1817 /*
1818 * Setup our local pointer to *fabric
1819 */
1820 tcm_qla2xxx_fabric_configfs = fabric;
1821 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_fabric_configfs\n");
1822
1823 /*
1824 * Register the top level struct config_item_type for NPIV with TCM core
1825 */
1826 npiv_fabric = target_fabric_configfs_init(THIS_MODULE, "qla2xxx_npiv");
1827 if (IS_ERR(npiv_fabric)) {
1828 pr_err("target_fabric_configfs_init() failed\n");
1829 ret = PTR_ERR(npiv_fabric);
1830 goto out_fabric;
1831 }
1832 /*
1833 * Setup fabric->tf_ops from our local tcm_qla2xxx_npiv_ops
1834 */
1835 npiv_fabric->tf_ops = tcm_qla2xxx_npiv_ops;
1836 /*
1837 * Setup default attribute lists for various npiv_fabric->tf_cit_tmpl
1838 */
1839 TF_CIT_TMPL(npiv_fabric)->tfc_wwn_cit.ct_attrs = tcm_qla2xxx_wwn_attrs;
1840 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_base_cit.ct_attrs = NULL;
1841 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_attrib_cit.ct_attrs = NULL;
1842 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_param_cit.ct_attrs = NULL;
1843 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_np_base_cit.ct_attrs = NULL;
1844 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_base_cit.ct_attrs = NULL;
1845 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_attrib_cit.ct_attrs = NULL;
1846 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_auth_cit.ct_attrs = NULL;
1847 TF_CIT_TMPL(npiv_fabric)->tfc_tpg_nacl_param_cit.ct_attrs = NULL;
1848 /*
1849 * Register the npiv_fabric for use within TCM
1850 */
1851 ret = target_fabric_configfs_register(npiv_fabric);
1852 if (ret < 0) {
1853 pr_err("target_fabric_configfs_register() failed for TCM_QLA2XXX\n");
1854 goto out_fabric;
1855 }
1856 /*
1857 * Setup our local pointer to *npiv_fabric
1858 */
1859 tcm_qla2xxx_npiv_fabric_configfs = npiv_fabric;
1860 pr_debug("TCM_QLA2XXX[0] - Set fabric -> tcm_qla2xxx_npiv_fabric_configfs\n");
1861
1862 tcm_qla2xxx_free_wq = alloc_workqueue("tcm_qla2xxx_free",
1863 WQ_MEM_RECLAIM, 0);
1864 if (!tcm_qla2xxx_free_wq) {
1865 ret = -ENOMEM;
1866 goto out_fabric_npiv;
1867 }
1868
1869 tcm_qla2xxx_cmd_wq = alloc_workqueue("tcm_qla2xxx_cmd", 0, 0);
1870 if (!tcm_qla2xxx_cmd_wq) {
1871 ret = -ENOMEM;
1872 goto out_free_wq;
1873 }
1874
1875 return 0;
1876
1877out_free_wq:
1878 destroy_workqueue(tcm_qla2xxx_free_wq);
1879out_fabric_npiv:
1880 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
1881out_fabric:
1882 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
1883 return ret;
1884}
1885
1886static void tcm_qla2xxx_deregister_configfs(void)
1887{
1888 destroy_workqueue(tcm_qla2xxx_cmd_wq);
1889 destroy_workqueue(tcm_qla2xxx_free_wq);
1890
1891 target_fabric_configfs_deregister(tcm_qla2xxx_fabric_configfs);
1892 tcm_qla2xxx_fabric_configfs = NULL;
1893 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_fabric_configfs\n");
1894
1895 target_fabric_configfs_deregister(tcm_qla2xxx_npiv_fabric_configfs);
1896 tcm_qla2xxx_npiv_fabric_configfs = NULL;
1897 pr_debug("TCM_QLA2XXX[0] - Cleared tcm_qla2xxx_npiv_fabric_configfs\n");
1898}
1899
1900static int __init tcm_qla2xxx_init(void)
1901{
1902 int ret;
1903
1904 ret = tcm_qla2xxx_register_configfs();
1905 if (ret < 0)
1906 return ret;
1907
1908 return 0;
1909}
1910
1911static void __exit tcm_qla2xxx_exit(void)
1912{
1913 tcm_qla2xxx_deregister_configfs();
1914}
1915
1916MODULE_DESCRIPTION("TCM QLA2XXX series NPIV enabled fabric driver");
1917MODULE_LICENSE("GPL");
1918module_init(tcm_qla2xxx_init);
1919module_exit(tcm_qla2xxx_exit);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.h b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
new file mode 100644
index 000000000000..825498103352
--- /dev/null
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.h
@@ -0,0 +1,82 @@
1#include <target/target_core_base.h>
2#include <linux/btree.h>
3
4#define TCM_QLA2XXX_VERSION "v0.1"
5/* length of ASCII WWPNs including pad */
6#define TCM_QLA2XXX_NAMELEN 32
7/* lenth of ASCII NPIV 'WWPN+WWNN' including pad */
8#define TCM_QLA2XXX_NPIV_NAMELEN 66
9
10#include "qla_target.h"
11
12struct tcm_qla2xxx_nacl {
13 /* From libfc struct fc_rport->port_id */
14 u32 nport_id;
15 /* Binary World Wide unique Node Name for remote FC Initiator Nport */
16 u64 nport_wwnn;
17 /* ASCII formatted WWPN for FC Initiator Nport */
18 char nport_name[TCM_QLA2XXX_NAMELEN];
19 /* Pointer to qla_tgt_sess */
20 struct qla_tgt_sess *qla_tgt_sess;
21 /* Pointer to TCM FC nexus */
22 struct se_session *nport_nexus;
23 /* Returned by tcm_qla2xxx_make_nodeacl() */
24 struct se_node_acl se_node_acl;
25};
26
27struct tcm_qla2xxx_tpg_attrib {
28 int generate_node_acls;
29 int cache_dynamic_acls;
30 int demo_mode_write_protect;
31 int prod_mode_write_protect;
32};
33
34struct tcm_qla2xxx_tpg {
35 /* FC lport target portal group tag for TCM */
36 u16 lport_tpgt;
37 /* Atomic bit to determine TPG active status */
38 atomic_t lport_tpg_enabled;
39 /* Pointer back to tcm_qla2xxx_lport */
40 struct tcm_qla2xxx_lport *lport;
41 /* Used by tcm_qla2xxx_tpg_attrib_cit */
42 struct tcm_qla2xxx_tpg_attrib tpg_attrib;
43 /* Returned by tcm_qla2xxx_make_tpg() */
44 struct se_portal_group se_tpg;
45};
46
47#define QLA_TPG_ATTRIB(tpg) (&(tpg)->tpg_attrib)
48
49struct tcm_qla2xxx_fc_loopid {
50 struct se_node_acl *se_nacl;
51};
52
53struct tcm_qla2xxx_lport {
54 /* SCSI protocol the lport is providing */
55 u8 lport_proto_id;
56 /* Binary World Wide unique Port Name for FC Target Lport */
57 u64 lport_wwpn;
58 /* Binary World Wide unique Port Name for FC NPIV Target Lport */
59 u64 lport_npiv_wwpn;
60 /* Binary World Wide unique Node Name for FC NPIV Target Lport */
61 u64 lport_npiv_wwnn;
62 /* ASCII formatted WWPN for FC Target Lport */
63 char lport_name[TCM_QLA2XXX_NAMELEN];
64 /* ASCII formatted WWPN+WWNN for NPIV FC Target Lport */
65 char lport_npiv_name[TCM_QLA2XXX_NPIV_NAMELEN];
66 /* map for fc_port pointers in 24-bit FC Port ID space */
67 struct btree_head32 lport_fcport_map;
68 /* vmalloc-ed memory for fc_port pointers for 16-bit FC loop ID */
69 struct tcm_qla2xxx_fc_loopid *lport_loopid_map;
70 /* Pointer to struct scsi_qla_host from qla2xxx LLD */
71 struct scsi_qla_host *qla_vha;
72 /* Pointer to struct scsi_qla_host for NPIV VP from qla2xxx LLD */
73 struct scsi_qla_host *qla_npiv_vp;
74 /* Pointer to struct qla_tgt pointer */
75 struct qla_tgt lport_qla_tgt;
76 /* Pointer to struct fc_vport for NPIV vport from libfc */
77 struct fc_vport *npiv_vport;
78 /* Pointer to TPG=1 for non NPIV mode */
79 struct tcm_qla2xxx_tpg *tpg_1;
80 /* Returned by tcm_qla2xxx_make_lport() */
81 struct se_wwn lport_wwn;
82};
diff --git a/drivers/scsi/qla4xxx/ql4_attr.c b/drivers/scsi/qla4xxx/ql4_attr.c
index 0b0a7d42137d..c681b2a355e1 100644
--- a/drivers/scsi/qla4xxx/ql4_attr.c
+++ b/drivers/scsi/qla4xxx/ql4_attr.c
@@ -9,6 +9,140 @@
9#include "ql4_glbl.h" 9#include "ql4_glbl.h"
10#include "ql4_dbg.h" 10#include "ql4_dbg.h"
11 11
12static ssize_t
13qla4_8xxx_sysfs_read_fw_dump(struct file *filep, struct kobject *kobj,
14 struct bin_attribute *ba, char *buf, loff_t off,
15 size_t count)
16{
17 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
18 struct device, kobj)));
19
20 if (!is_qla8022(ha))
21 return -EINVAL;
22
23 if (!test_bit(AF_82XX_DUMP_READING, &ha->flags))
24 return 0;
25
26 return memory_read_from_buffer(buf, count, &off, ha->fw_dump,
27 ha->fw_dump_size);
28}
29
30static ssize_t
31qla4_8xxx_sysfs_write_fw_dump(struct file *filep, struct kobject *kobj,
32 struct bin_attribute *ba, char *buf, loff_t off,
33 size_t count)
34{
35 struct scsi_qla_host *ha = to_qla_host(dev_to_shost(container_of(kobj,
36 struct device, kobj)));
37 uint32_t dev_state;
38 long reading;
39 int ret = 0;
40
41 if (!is_qla8022(ha))
42 return -EINVAL;
43
44 if (off != 0)
45 return ret;
46
47 buf[1] = 0;
48 ret = kstrtol(buf, 10, &reading);
49 if (ret) {
50 ql4_printk(KERN_ERR, ha, "%s: Invalid input. Return err %d\n",
51 __func__, ret);
52 return ret;
53 }
54
55 switch (reading) {
56 case 0:
57 /* clear dump collection flags */
58 if (test_and_clear_bit(AF_82XX_DUMP_READING, &ha->flags)) {
59 clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
60 /* Reload minidump template */
61 qla4xxx_alloc_fw_dump(ha);
62 DEBUG2(ql4_printk(KERN_INFO, ha,
63 "Firmware template reloaded\n"));
64 }
65 break;
66 case 1:
67 /* Set flag to read dump */
68 if (test_bit(AF_82XX_FW_DUMPED, &ha->flags) &&
69 !test_bit(AF_82XX_DUMP_READING, &ha->flags)) {
70 set_bit(AF_82XX_DUMP_READING, &ha->flags);
71 DEBUG2(ql4_printk(KERN_INFO, ha,
72 "Raw firmware dump ready for read on (%ld).\n",
73 ha->host_no));
74 }
75 break;
76 case 2:
77 /* Reset HBA */
78 qla4_8xxx_idc_lock(ha);
79 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
80 if (dev_state == QLA82XX_DEV_READY) {
81 ql4_printk(KERN_INFO, ha,
82 "%s: Setting Need reset, reset_owner is 0x%x.\n",
83 __func__, ha->func_num);
84 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
85 QLA82XX_DEV_NEED_RESET);
86 set_bit(AF_82XX_RST_OWNER, &ha->flags);
87 } else
88 ql4_printk(KERN_INFO, ha,
89 "%s: Reset not performed as device state is 0x%x\n",
90 __func__, dev_state);
91
92 qla4_8xxx_idc_unlock(ha);
93 break;
94 default:
95 /* do nothing */
96 break;
97 }
98
99 return count;
100}
101
102static struct bin_attribute sysfs_fw_dump_attr = {
103 .attr = {
104 .name = "fw_dump",
105 .mode = S_IRUSR | S_IWUSR,
106 },
107 .size = 0,
108 .read = qla4_8xxx_sysfs_read_fw_dump,
109 .write = qla4_8xxx_sysfs_write_fw_dump,
110};
111
112static struct sysfs_entry {
113 char *name;
114 struct bin_attribute *attr;
115} bin_file_entries[] = {
116 { "fw_dump", &sysfs_fw_dump_attr },
117 { NULL },
118};
119
120void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha)
121{
122 struct Scsi_Host *host = ha->host;
123 struct sysfs_entry *iter;
124 int ret;
125
126 for (iter = bin_file_entries; iter->name; iter++) {
127 ret = sysfs_create_bin_file(&host->shost_gendev.kobj,
128 iter->attr);
129 if (ret)
130 ql4_printk(KERN_ERR, ha,
131 "Unable to create sysfs %s binary attribute (%d).\n",
132 iter->name, ret);
133 }
134}
135
136void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha)
137{
138 struct Scsi_Host *host = ha->host;
139 struct sysfs_entry *iter;
140
141 for (iter = bin_file_entries; iter->name; iter++)
142 sysfs_remove_bin_file(&host->shost_gendev.kobj,
143 iter->attr);
144}
145
12/* Scsi_Host attributes. */ 146/* Scsi_Host attributes. */
13static ssize_t 147static ssize_t
14qla4xxx_fw_version_show(struct device *dev, 148qla4xxx_fw_version_show(struct device *dev,
diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
index 7f2492e88be7..96a5616a8fda 100644
--- a/drivers/scsi/qla4xxx/ql4_def.h
+++ b/drivers/scsi/qla4xxx/ql4_def.h
@@ -398,6 +398,16 @@ struct isp_operations {
398 int (*get_sys_info) (struct scsi_qla_host *); 398 int (*get_sys_info) (struct scsi_qla_host *);
399}; 399};
400 400
401struct ql4_mdump_size_table {
402 uint32_t size;
403 uint32_t size_cmask_02;
404 uint32_t size_cmask_04;
405 uint32_t size_cmask_08;
406 uint32_t size_cmask_10;
407 uint32_t size_cmask_FF;
408 uint32_t version;
409};
410
401/*qla4xxx ipaddress configuration details */ 411/*qla4xxx ipaddress configuration details */
402struct ipaddress_config { 412struct ipaddress_config {
403 uint16_t ipv4_options; 413 uint16_t ipv4_options;
@@ -485,6 +495,10 @@ struct scsi_qla_host {
485#define AF_EEH_BUSY 20 /* 0x00100000 */ 495#define AF_EEH_BUSY 20 /* 0x00100000 */
486#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */ 496#define AF_PCI_CHANNEL_IO_PERM_FAILURE 21 /* 0x00200000 */
487#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */ 497#define AF_BUILD_DDB_LIST 22 /* 0x00400000 */
498#define AF_82XX_FW_DUMPED 24 /* 0x01000000 */
499#define AF_82XX_RST_OWNER 25 /* 0x02000000 */
500#define AF_82XX_DUMP_READING 26 /* 0x04000000 */
501
488 unsigned long dpc_flags; 502 unsigned long dpc_flags;
489 503
490#define DPC_RESET_HA 1 /* 0x00000002 */ 504#define DPC_RESET_HA 1 /* 0x00000002 */
@@ -662,6 +676,11 @@ struct scsi_qla_host {
662 676
663 uint32_t nx_dev_init_timeout; 677 uint32_t nx_dev_init_timeout;
664 uint32_t nx_reset_timeout; 678 uint32_t nx_reset_timeout;
679 void *fw_dump;
680 uint32_t fw_dump_size;
681 uint32_t fw_dump_capture_mask;
682 void *fw_dump_tmplt_hdr;
683 uint32_t fw_dump_tmplt_size;
665 684
666 struct completion mbx_intr_comp; 685 struct completion mbx_intr_comp;
667 686
@@ -936,4 +955,7 @@ static inline int ql4xxx_reset_active(struct scsi_qla_host *ha)
936#define PROCESS_ALL_AENS 0 955#define PROCESS_ALL_AENS 0
937#define FLUSH_DDB_CHANGED_AENS 1 956#define FLUSH_DDB_CHANGED_AENS 1
938 957
958/* Defines for udev events */
959#define QL4_UEVENT_CODE_FW_DUMP 0
960
939#endif /*_QLA4XXX_H */ 961#endif /*_QLA4XXX_H */
diff --git a/drivers/scsi/qla4xxx/ql4_fw.h b/drivers/scsi/qla4xxx/ql4_fw.h
index 210cd1d64475..7240948fb929 100644
--- a/drivers/scsi/qla4xxx/ql4_fw.h
+++ b/drivers/scsi/qla4xxx/ql4_fw.h
@@ -385,6 +385,11 @@ struct qla_flt_region {
385#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091 385#define MBOX_CMD_GET_IP_ADDR_STATE 0x0091
386#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092 386#define MBOX_CMD_SEND_IPV6_ROUTER_SOL 0x0092
387#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093 387#define MBOX_CMD_GET_DB_ENTRY_CURRENT_IP_ADDR 0x0093
388#define MBOX_CMD_MINIDUMP 0x0129
389
390/* Minidump subcommand */
391#define MINIDUMP_GET_SIZE_SUBCOMMAND 0x00
392#define MINIDUMP_GET_TMPLT_SUBCOMMAND 0x01
388 393
389/* Mailbox 1 */ 394/* Mailbox 1 */
390#define FW_STATE_READY 0x0000 395#define FW_STATE_READY 0x0000
@@ -1190,4 +1195,27 @@ struct ql_iscsi_stats {
1190 uint8_t reserved2[264]; /* 0x0308 - 0x040F */ 1195 uint8_t reserved2[264]; /* 0x0308 - 0x040F */
1191}; 1196};
1192 1197
1198#define QLA82XX_DBG_STATE_ARRAY_LEN 16
1199#define QLA82XX_DBG_CAP_SIZE_ARRAY_LEN 8
1200#define QLA82XX_DBG_RSVD_ARRAY_LEN 8
1201
1202struct qla4_8xxx_minidump_template_hdr {
1203 uint32_t entry_type;
1204 uint32_t first_entry_offset;
1205 uint32_t size_of_template;
1206 uint32_t capture_debug_level;
1207 uint32_t num_of_entries;
1208 uint32_t version;
1209 uint32_t driver_timestamp;
1210 uint32_t checksum;
1211
1212 uint32_t driver_capture_mask;
1213 uint32_t driver_info_word2;
1214 uint32_t driver_info_word3;
1215 uint32_t driver_info_word4;
1216
1217 uint32_t saved_state_array[QLA82XX_DBG_STATE_ARRAY_LEN];
1218 uint32_t capture_size_array[QLA82XX_DBG_CAP_SIZE_ARRAY_LEN];
1219};
1220
1193#endif /* _QLA4X_FW_H */ 1221#endif /* _QLA4X_FW_H */
diff --git a/drivers/scsi/qla4xxx/ql4_glbl.h b/drivers/scsi/qla4xxx/ql4_glbl.h
index 910536667cf5..20b49d019043 100644
--- a/drivers/scsi/qla4xxx/ql4_glbl.h
+++ b/drivers/scsi/qla4xxx/ql4_glbl.h
@@ -196,10 +196,18 @@ int qla4xxx_bsg_request(struct bsg_job *bsg_job);
196int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job); 196int qla4xxx_process_vendor_specific(struct bsg_job *bsg_job);
197 197
198void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry); 198void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry);
199int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
200 dma_addr_t phys_addr);
201int qla4xxx_req_template_size(struct scsi_qla_host *ha);
202void qla4_8xxx_alloc_sysfs_attr(struct scsi_qla_host *ha);
203void qla4_8xxx_free_sysfs_attr(struct scsi_qla_host *ha);
204void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha);
199 205
200extern int ql4xextended_error_logging; 206extern int ql4xextended_error_logging;
201extern int ql4xdontresethba; 207extern int ql4xdontresethba;
202extern int ql4xenablemsix; 208extern int ql4xenablemsix;
209extern int ql4xmdcapmask;
210extern int ql4xenablemd;
203 211
204extern struct device_attribute *qla4xxx_host_attrs[]; 212extern struct device_attribute *qla4xxx_host_attrs[];
205#endif /* _QLA4x_GBL_H */ 213#endif /* _QLA4x_GBL_H */
diff --git a/drivers/scsi/qla4xxx/ql4_init.c b/drivers/scsi/qla4xxx/ql4_init.c
index 90ee5d8fa731..bf36723b84e1 100644
--- a/drivers/scsi/qla4xxx/ql4_init.c
+++ b/drivers/scsi/qla4xxx/ql4_init.c
@@ -277,6 +277,94 @@ qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha)
277 return ipv4_wait|ipv6_wait; 277 return ipv4_wait|ipv6_wait;
278} 278}
279 279
280/**
281 * qla4xxx_alloc_fw_dump - Allocate memory for minidump data.
282 * @ha: pointer to host adapter structure.
283 **/
284void qla4xxx_alloc_fw_dump(struct scsi_qla_host *ha)
285{
286 int status;
287 uint32_t capture_debug_level;
288 int hdr_entry_bit, k;
289 void *md_tmp;
290 dma_addr_t md_tmp_dma;
291 struct qla4_8xxx_minidump_template_hdr *md_hdr;
292
293 if (ha->fw_dump) {
294 ql4_printk(KERN_WARNING, ha,
295 "Firmware dump previously allocated.\n");
296 return;
297 }
298
299 status = qla4xxx_req_template_size(ha);
300 if (status != QLA_SUCCESS) {
301 ql4_printk(KERN_INFO, ha,
302 "scsi%ld: Failed to get template size\n",
303 ha->host_no);
304 return;
305 }
306
307 clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
308
309 /* Allocate memory for saving the template */
310 md_tmp = dma_alloc_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
311 &md_tmp_dma, GFP_KERNEL);
312
313 /* Request template */
314 status = qla4xxx_get_minidump_template(ha, md_tmp_dma);
315 if (status != QLA_SUCCESS) {
316 ql4_printk(KERN_INFO, ha,
317 "scsi%ld: Failed to get minidump template\n",
318 ha->host_no);
319 goto alloc_cleanup;
320 }
321
322 md_hdr = (struct qla4_8xxx_minidump_template_hdr *)md_tmp;
323
324 capture_debug_level = md_hdr->capture_debug_level;
325
326 /* Get capture mask based on module loadtime setting. */
327 if (ql4xmdcapmask >= 0x3 && ql4xmdcapmask <= 0x7F)
328 ha->fw_dump_capture_mask = ql4xmdcapmask;
329 else
330 ha->fw_dump_capture_mask = capture_debug_level;
331
332 md_hdr->driver_capture_mask = ha->fw_dump_capture_mask;
333
334 DEBUG2(ql4_printk(KERN_INFO, ha, "Minimum num of entries = %d\n",
335 md_hdr->num_of_entries));
336 DEBUG2(ql4_printk(KERN_INFO, ha, "Dump template size = %d\n",
337 ha->fw_dump_tmplt_size));
338 DEBUG2(ql4_printk(KERN_INFO, ha, "Selected Capture mask =0x%x\n",
339 ha->fw_dump_capture_mask));
340
341 /* Calculate fw_dump_size */
342 for (hdr_entry_bit = 0x2, k = 1; (hdr_entry_bit & 0xFF);
343 hdr_entry_bit <<= 1, k++) {
344 if (hdr_entry_bit & ha->fw_dump_capture_mask)
345 ha->fw_dump_size += md_hdr->capture_size_array[k];
346 }
347
348 /* Total firmware dump size including command header */
349 ha->fw_dump_size += ha->fw_dump_tmplt_size;
350 ha->fw_dump = vmalloc(ha->fw_dump_size);
351 if (!ha->fw_dump)
352 goto alloc_cleanup;
353
354 DEBUG2(ql4_printk(KERN_INFO, ha,
355 "Minidump Tempalate Size = 0x%x KB\n",
356 ha->fw_dump_tmplt_size));
357 DEBUG2(ql4_printk(KERN_INFO, ha,
358 "Total Minidump size = 0x%x KB\n", ha->fw_dump_size));
359
360 memcpy(ha->fw_dump, md_tmp, ha->fw_dump_tmplt_size);
361 ha->fw_dump_tmplt_hdr = ha->fw_dump;
362
363alloc_cleanup:
364 dma_free_coherent(&ha->pdev->dev, ha->fw_dump_tmplt_size,
365 md_tmp, md_tmp_dma);
366}
367
280static int qla4xxx_fw_ready(struct scsi_qla_host *ha) 368static int qla4xxx_fw_ready(struct scsi_qla_host *ha)
281{ 369{
282 uint32_t timeout_count; 370 uint32_t timeout_count;
@@ -445,9 +533,13 @@ static int qla4xxx_init_firmware(struct scsi_qla_host *ha)
445 "control block\n", ha->host_no, __func__)); 533 "control block\n", ha->host_no, __func__));
446 return status; 534 return status;
447 } 535 }
536
448 if (!qla4xxx_fw_ready(ha)) 537 if (!qla4xxx_fw_ready(ha))
449 return status; 538 return status;
450 539
540 if (is_qla8022(ha) && !test_bit(AF_INIT_DONE, &ha->flags))
541 qla4xxx_alloc_fw_dump(ha);
542
451 return qla4xxx_get_firmware_status(ha); 543 return qla4xxx_get_firmware_status(ha);
452} 544}
453 545
@@ -884,8 +976,8 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
884 switch (state) { 976 switch (state) {
885 case DDB_DS_SESSION_ACTIVE: 977 case DDB_DS_SESSION_ACTIVE:
886 case DDB_DS_DISCOVERY: 978 case DDB_DS_DISCOVERY:
887 ddb_entry->unblock_sess(ddb_entry->sess);
888 qla4xxx_update_session_conn_param(ha, ddb_entry); 979 qla4xxx_update_session_conn_param(ha, ddb_entry);
980 ddb_entry->unblock_sess(ddb_entry->sess);
889 status = QLA_SUCCESS; 981 status = QLA_SUCCESS;
890 break; 982 break;
891 case DDB_DS_SESSION_FAILED: 983 case DDB_DS_SESSION_FAILED:
@@ -897,6 +989,7 @@ int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index,
897 } 989 }
898 break; 990 break;
899 case DDB_DS_SESSION_ACTIVE: 991 case DDB_DS_SESSION_ACTIVE:
992 case DDB_DS_DISCOVERY:
900 switch (state) { 993 switch (state) {
901 case DDB_DS_SESSION_FAILED: 994 case DDB_DS_SESSION_FAILED:
902 /* 995 /*
diff --git a/drivers/scsi/qla4xxx/ql4_mbx.c b/drivers/scsi/qla4xxx/ql4_mbx.c
index 7ac21dabbf22..cab8f665a41f 100644
--- a/drivers/scsi/qla4xxx/ql4_mbx.c
+++ b/drivers/scsi/qla4xxx/ql4_mbx.c
@@ -51,25 +51,6 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
51 } 51 }
52 } 52 }
53 53
54 if (is_qla8022(ha)) {
55 if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
56 DEBUG2(ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: "
57 "prematurely completing mbx cmd as firmware "
58 "recovery detected\n", ha->host_no, __func__));
59 return status;
60 }
61 /* Do not send any mbx cmd if h/w is in failed state*/
62 qla4_8xxx_idc_lock(ha);
63 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
64 qla4_8xxx_idc_unlock(ha);
65 if (dev_state == QLA82XX_DEV_FAILED) {
66 ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: H/W is in "
67 "failed state, do not send any mailbox commands\n",
68 ha->host_no, __func__);
69 return status;
70 }
71 }
72
73 if ((is_aer_supported(ha)) && 54 if ((is_aer_supported(ha)) &&
74 (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) { 55 (test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags))) {
75 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, " 56 DEBUG2(printk(KERN_WARNING "scsi%ld: %s: Perm failure on EEH, "
@@ -96,6 +77,25 @@ int qla4xxx_mailbox_command(struct scsi_qla_host *ha, uint8_t inCount,
96 msleep(10); 77 msleep(10);
97 } 78 }
98 79
80 if (is_qla8022(ha)) {
81 if (test_bit(AF_FW_RECOVERY, &ha->flags)) {
82 DEBUG2(ql4_printk(KERN_WARNING, ha,
83 "scsi%ld: %s: prematurely completing mbx cmd as firmware recovery detected\n",
84 ha->host_no, __func__));
85 goto mbox_exit;
86 }
87 /* Do not send any mbx cmd if h/w is in failed state*/
88 qla4_8xxx_idc_lock(ha);
89 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
90 qla4_8xxx_idc_unlock(ha);
91 if (dev_state == QLA82XX_DEV_FAILED) {
92 ql4_printk(KERN_WARNING, ha,
93 "scsi%ld: %s: H/W is in failed state, do not send any mailbox commands\n",
94 ha->host_no, __func__);
95 goto mbox_exit;
96 }
97 }
98
99 spin_lock_irqsave(&ha->hardware_lock, flags); 99 spin_lock_irqsave(&ha->hardware_lock, flags);
100 100
101 ha->mbox_status_count = outCount; 101 ha->mbox_status_count = outCount;
@@ -270,6 +270,79 @@ mbox_exit:
270 return status; 270 return status;
271} 271}
272 272
273/**
274 * qla4xxx_get_minidump_template - Get the firmware template
275 * @ha: Pointer to host adapter structure.
276 * @phys_addr: dma address for template
277 *
278 * Obtain the minidump template from firmware during initialization
279 * as it may not be available when minidump is desired.
280 **/
281int qla4xxx_get_minidump_template(struct scsi_qla_host *ha,
282 dma_addr_t phys_addr)
283{
284 uint32_t mbox_cmd[MBOX_REG_COUNT];
285 uint32_t mbox_sts[MBOX_REG_COUNT];
286 int status;
287
288 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
289 memset(&mbox_sts, 0, sizeof(mbox_sts));
290
291 mbox_cmd[0] = MBOX_CMD_MINIDUMP;
292 mbox_cmd[1] = MINIDUMP_GET_TMPLT_SUBCOMMAND;
293 mbox_cmd[2] = LSDW(phys_addr);
294 mbox_cmd[3] = MSDW(phys_addr);
295 mbox_cmd[4] = ha->fw_dump_tmplt_size;
296 mbox_cmd[5] = 0;
297
298 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 2, &mbox_cmd[0],
299 &mbox_sts[0]);
300 if (status != QLA_SUCCESS) {
301 DEBUG2(ql4_printk(KERN_INFO, ha,
302 "scsi%ld: %s: Cmd = %08X, mbx[0] = 0x%04x, mbx[1] = 0x%04x\n",
303 ha->host_no, __func__, mbox_cmd[0],
304 mbox_sts[0], mbox_sts[1]));
305 }
306 return status;
307}
308
309/**
310 * qla4xxx_req_template_size - Get minidump template size from firmware.
311 * @ha: Pointer to host adapter structure.
312 **/
313int qla4xxx_req_template_size(struct scsi_qla_host *ha)
314{
315 uint32_t mbox_cmd[MBOX_REG_COUNT];
316 uint32_t mbox_sts[MBOX_REG_COUNT];
317 int status;
318
319 memset(&mbox_cmd, 0, sizeof(mbox_cmd));
320 memset(&mbox_sts, 0, sizeof(mbox_sts));
321
322 mbox_cmd[0] = MBOX_CMD_MINIDUMP;
323 mbox_cmd[1] = MINIDUMP_GET_SIZE_SUBCOMMAND;
324
325 status = qla4xxx_mailbox_command(ha, MBOX_REG_COUNT, 8, &mbox_cmd[0],
326 &mbox_sts[0]);
327 if (status == QLA_SUCCESS) {
328 ha->fw_dump_tmplt_size = mbox_sts[1];
329 DEBUG2(ql4_printk(KERN_INFO, ha,
330 "%s: sts[0]=0x%04x, template size=0x%04x, size_cm_02=0x%04x, size_cm_04=0x%04x, size_cm_08=0x%04x, size_cm_10=0x%04x, size_cm_FF=0x%04x, version=0x%04x\n",
331 __func__, mbox_sts[0], mbox_sts[1],
332 mbox_sts[2], mbox_sts[3], mbox_sts[4],
333 mbox_sts[5], mbox_sts[6], mbox_sts[7]));
334 if (ha->fw_dump_tmplt_size == 0)
335 status = QLA_ERROR;
336 } else {
337 ql4_printk(KERN_WARNING, ha,
338 "%s: Error sts[0]=0x%04x, mbx[1]=0x%04x\n",
339 __func__, mbox_sts[0], mbox_sts[1]);
340 status = QLA_ERROR;
341 }
342
343 return status;
344}
345
273void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha) 346void qla4xxx_mailbox_premature_completion(struct scsi_qla_host *ha)
274{ 347{
275 set_bit(AF_FW_RECOVERY, &ha->flags); 348 set_bit(AF_FW_RECOVERY, &ha->flags);
diff --git a/drivers/scsi/qla4xxx/ql4_nx.c b/drivers/scsi/qla4xxx/ql4_nx.c
index e1e46b6dac75..228b67020d2c 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.c
+++ b/drivers/scsi/qla4xxx/ql4_nx.c
@@ -7,6 +7,7 @@
7#include <linux/delay.h> 7#include <linux/delay.h>
8#include <linux/io.h> 8#include <linux/io.h>
9#include <linux/pci.h> 9#include <linux/pci.h>
10#include <linux/ratelimit.h>
10#include "ql4_def.h" 11#include "ql4_def.h"
11#include "ql4_glbl.h" 12#include "ql4_glbl.h"
12 13
@@ -420,6 +421,38 @@ qla4_8xxx_rd_32(struct scsi_qla_host *ha, ulong off)
420 return data; 421 return data;
421} 422}
422 423
424/* Minidump related functions */
425static int qla4_8xxx_md_rw_32(struct scsi_qla_host *ha, uint32_t off,
426 u32 data, uint8_t flag)
427{
428 uint32_t win_read, off_value, rval = QLA_SUCCESS;
429
430 off_value = off & 0xFFFF0000;
431 writel(off_value, (void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
432
433 /* Read back value to make sure write has gone through before trying
434 * to use it.
435 */
436 win_read = readl((void __iomem *)(CRB_WINDOW_2M + ha->nx_pcibase));
437 if (win_read != off_value) {
438 DEBUG2(ql4_printk(KERN_INFO, ha,
439 "%s: Written (0x%x) != Read (0x%x), off=0x%x\n",
440 __func__, off_value, win_read, off));
441 return QLA_ERROR;
442 }
443
444 off_value = off & 0x0000FFFF;
445
446 if (flag)
447 writel(data, (void __iomem *)(off_value + CRB_INDIRECT_2M +
448 ha->nx_pcibase));
449 else
450 rval = readl((void __iomem *)(off_value + CRB_INDIRECT_2M +
451 ha->nx_pcibase));
452
453 return rval;
454}
455
423#define CRB_WIN_LOCK_TIMEOUT 100000000 456#define CRB_WIN_LOCK_TIMEOUT 100000000
424 457
425int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha) 458int qla4_8xxx_crb_win_lock(struct scsi_qla_host *ha)
@@ -1252,9 +1285,9 @@ qla4_8xxx_pci_mem_read_2M(struct scsi_qla_host *ha,
1252 } 1285 }
1253 1286
1254 if (j >= MAX_CTL_CHECK) { 1287 if (j >= MAX_CTL_CHECK) {
1255 if (printk_ratelimit()) 1288 printk_ratelimited(KERN_ERR
1256 ql4_printk(KERN_ERR, ha, 1289 "%s: failed to read through agent\n",
1257 "failed to read through agent\n"); 1290 __func__);
1258 break; 1291 break;
1259 } 1292 }
1260 1293
@@ -1390,7 +1423,8 @@ qla4_8xxx_pci_mem_write_2M(struct scsi_qla_host *ha,
1390 if (j >= MAX_CTL_CHECK) { 1423 if (j >= MAX_CTL_CHECK) {
1391 if (printk_ratelimit()) 1424 if (printk_ratelimit())
1392 ql4_printk(KERN_ERR, ha, 1425 ql4_printk(KERN_ERR, ha,
1393 "failed to write through agent\n"); 1426 "%s: failed to read through agent\n",
1427 __func__);
1394 ret = -1; 1428 ret = -1;
1395 break; 1429 break;
1396 } 1430 }
@@ -1462,6 +1496,8 @@ qla4_8xxx_set_drv_active(struct scsi_qla_host *ha)
1462 1496
1463 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 1497 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1464 drv_active |= (1 << (ha->func_num * 4)); 1498 drv_active |= (1 << (ha->func_num * 4));
1499 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1500 __func__, ha->host_no, drv_active);
1465 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 1501 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
1466} 1502}
1467 1503
@@ -1472,6 +1508,8 @@ qla4_8xxx_clear_drv_active(struct scsi_qla_host *ha)
1472 1508
1473 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 1509 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1474 drv_active &= ~(1 << (ha->func_num * 4)); 1510 drv_active &= ~(1 << (ha->func_num * 4));
1511 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_active: 0x%08x\n",
1512 __func__, ha->host_no, drv_active);
1475 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active); 1513 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_ACTIVE, drv_active);
1476} 1514}
1477 1515
@@ -1497,6 +1535,8 @@ qla4_8xxx_set_rst_ready(struct scsi_qla_host *ha)
1497 1535
1498 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1536 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
1499 drv_state |= (1 << (ha->func_num * 4)); 1537 drv_state |= (1 << (ha->func_num * 4));
1538 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1539 __func__, ha->host_no, drv_state);
1500 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 1540 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
1501} 1541}
1502 1542
@@ -1507,6 +1547,8 @@ qla4_8xxx_clear_rst_ready(struct scsi_qla_host *ha)
1507 1547
1508 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE); 1548 drv_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_STATE);
1509 drv_state &= ~(1 << (ha->func_num * 4)); 1549 drv_state &= ~(1 << (ha->func_num * 4));
1550 ql4_printk(KERN_INFO, ha, "%s(%ld): drv_state: 0x%08x\n",
1551 __func__, ha->host_no, drv_state);
1510 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state); 1552 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_STATE, drv_state);
1511} 1553}
1512 1554
@@ -1601,6 +1643,629 @@ static void qla4_8xxx_rom_lock_recovery(struct scsi_qla_host *ha)
1601 qla4_8xxx_rom_unlock(ha); 1643 qla4_8xxx_rom_unlock(ha);
1602} 1644}
1603 1645
1646static void qla4_8xxx_minidump_process_rdcrb(struct scsi_qla_host *ha,
1647 struct qla82xx_minidump_entry_hdr *entry_hdr,
1648 uint32_t **d_ptr)
1649{
1650 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
1651 struct qla82xx_minidump_entry_crb *crb_hdr;
1652 uint32_t *data_ptr = *d_ptr;
1653
1654 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1655 crb_hdr = (struct qla82xx_minidump_entry_crb *)entry_hdr;
1656 r_addr = crb_hdr->addr;
1657 r_stride = crb_hdr->crb_strd.addr_stride;
1658 loop_cnt = crb_hdr->op_count;
1659
1660 for (i = 0; i < loop_cnt; i++) {
1661 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
1662 *data_ptr++ = cpu_to_le32(r_addr);
1663 *data_ptr++ = cpu_to_le32(r_value);
1664 r_addr += r_stride;
1665 }
1666 *d_ptr = data_ptr;
1667}
1668
1669static int qla4_8xxx_minidump_process_l2tag(struct scsi_qla_host *ha,
1670 struct qla82xx_minidump_entry_hdr *entry_hdr,
1671 uint32_t **d_ptr)
1672{
1673 uint32_t addr, r_addr, c_addr, t_r_addr;
1674 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
1675 unsigned long p_wait, w_time, p_mask;
1676 uint32_t c_value_w, c_value_r;
1677 struct qla82xx_minidump_entry_cache *cache_hdr;
1678 int rval = QLA_ERROR;
1679 uint32_t *data_ptr = *d_ptr;
1680
1681 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1682 cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
1683
1684 loop_count = cache_hdr->op_count;
1685 r_addr = cache_hdr->read_addr;
1686 c_addr = cache_hdr->control_addr;
1687 c_value_w = cache_hdr->cache_ctrl.write_value;
1688
1689 t_r_addr = cache_hdr->tag_reg_addr;
1690 t_value = cache_hdr->addr_ctrl.init_tag_value;
1691 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
1692 p_wait = cache_hdr->cache_ctrl.poll_wait;
1693 p_mask = cache_hdr->cache_ctrl.poll_mask;
1694
1695 for (i = 0; i < loop_count; i++) {
1696 qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
1697
1698 if (c_value_w)
1699 qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
1700
1701 if (p_mask) {
1702 w_time = jiffies + p_wait;
1703 do {
1704 c_value_r = qla4_8xxx_md_rw_32(ha, c_addr,
1705 0, 0);
1706 if ((c_value_r & p_mask) == 0) {
1707 break;
1708 } else if (time_after_eq(jiffies, w_time)) {
1709 /* capturing dump failed */
1710 return rval;
1711 }
1712 } while (1);
1713 }
1714
1715 addr = r_addr;
1716 for (k = 0; k < r_cnt; k++) {
1717 r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
1718 *data_ptr++ = cpu_to_le32(r_value);
1719 addr += cache_hdr->read_ctrl.read_addr_stride;
1720 }
1721
1722 t_value += cache_hdr->addr_ctrl.tag_value_stride;
1723 }
1724 *d_ptr = data_ptr;
1725 return QLA_SUCCESS;
1726}
1727
1728static int qla4_8xxx_minidump_process_control(struct scsi_qla_host *ha,
1729 struct qla82xx_minidump_entry_hdr *entry_hdr)
1730{
1731 struct qla82xx_minidump_entry_crb *crb_entry;
1732 uint32_t read_value, opcode, poll_time, addr, index, rval = QLA_SUCCESS;
1733 uint32_t crb_addr;
1734 unsigned long wtime;
1735 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
1736 int i;
1737
1738 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1739 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
1740 ha->fw_dump_tmplt_hdr;
1741 crb_entry = (struct qla82xx_minidump_entry_crb *)entry_hdr;
1742
1743 crb_addr = crb_entry->addr;
1744 for (i = 0; i < crb_entry->op_count; i++) {
1745 opcode = crb_entry->crb_ctrl.opcode;
1746 if (opcode & QLA82XX_DBG_OPCODE_WR) {
1747 qla4_8xxx_md_rw_32(ha, crb_addr,
1748 crb_entry->value_1, 1);
1749 opcode &= ~QLA82XX_DBG_OPCODE_WR;
1750 }
1751 if (opcode & QLA82XX_DBG_OPCODE_RW) {
1752 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
1753 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
1754 opcode &= ~QLA82XX_DBG_OPCODE_RW;
1755 }
1756 if (opcode & QLA82XX_DBG_OPCODE_AND) {
1757 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
1758 read_value &= crb_entry->value_2;
1759 opcode &= ~QLA82XX_DBG_OPCODE_AND;
1760 if (opcode & QLA82XX_DBG_OPCODE_OR) {
1761 read_value |= crb_entry->value_3;
1762 opcode &= ~QLA82XX_DBG_OPCODE_OR;
1763 }
1764 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
1765 }
1766 if (opcode & QLA82XX_DBG_OPCODE_OR) {
1767 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
1768 read_value |= crb_entry->value_3;
1769 qla4_8xxx_md_rw_32(ha, crb_addr, read_value, 1);
1770 opcode &= ~QLA82XX_DBG_OPCODE_OR;
1771 }
1772 if (opcode & QLA82XX_DBG_OPCODE_POLL) {
1773 poll_time = crb_entry->crb_strd.poll_timeout;
1774 wtime = jiffies + poll_time;
1775 read_value = qla4_8xxx_md_rw_32(ha, crb_addr, 0, 0);
1776
1777 do {
1778 if ((read_value & crb_entry->value_2) ==
1779 crb_entry->value_1)
1780 break;
1781 else if (time_after_eq(jiffies, wtime)) {
1782 /* capturing dump failed */
1783 rval = QLA_ERROR;
1784 break;
1785 } else
1786 read_value = qla4_8xxx_md_rw_32(ha,
1787 crb_addr, 0, 0);
1788 } while (1);
1789 opcode &= ~QLA82XX_DBG_OPCODE_POLL;
1790 }
1791
1792 if (opcode & QLA82XX_DBG_OPCODE_RDSTATE) {
1793 if (crb_entry->crb_strd.state_index_a) {
1794 index = crb_entry->crb_strd.state_index_a;
1795 addr = tmplt_hdr->saved_state_array[index];
1796 } else {
1797 addr = crb_addr;
1798 }
1799
1800 read_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
1801 index = crb_entry->crb_ctrl.state_index_v;
1802 tmplt_hdr->saved_state_array[index] = read_value;
1803 opcode &= ~QLA82XX_DBG_OPCODE_RDSTATE;
1804 }
1805
1806 if (opcode & QLA82XX_DBG_OPCODE_WRSTATE) {
1807 if (crb_entry->crb_strd.state_index_a) {
1808 index = crb_entry->crb_strd.state_index_a;
1809 addr = tmplt_hdr->saved_state_array[index];
1810 } else {
1811 addr = crb_addr;
1812 }
1813
1814 if (crb_entry->crb_ctrl.state_index_v) {
1815 index = crb_entry->crb_ctrl.state_index_v;
1816 read_value =
1817 tmplt_hdr->saved_state_array[index];
1818 } else {
1819 read_value = crb_entry->value_1;
1820 }
1821
1822 qla4_8xxx_md_rw_32(ha, addr, read_value, 1);
1823 opcode &= ~QLA82XX_DBG_OPCODE_WRSTATE;
1824 }
1825
1826 if (opcode & QLA82XX_DBG_OPCODE_MDSTATE) {
1827 index = crb_entry->crb_ctrl.state_index_v;
1828 read_value = tmplt_hdr->saved_state_array[index];
1829 read_value <<= crb_entry->crb_ctrl.shl;
1830 read_value >>= crb_entry->crb_ctrl.shr;
1831 if (crb_entry->value_2)
1832 read_value &= crb_entry->value_2;
1833 read_value |= crb_entry->value_3;
1834 read_value += crb_entry->value_1;
1835 tmplt_hdr->saved_state_array[index] = read_value;
1836 opcode &= ~QLA82XX_DBG_OPCODE_MDSTATE;
1837 }
1838 crb_addr += crb_entry->crb_strd.addr_stride;
1839 }
1840 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s\n", __func__));
1841 return rval;
1842}
1843
1844static void qla4_8xxx_minidump_process_rdocm(struct scsi_qla_host *ha,
1845 struct qla82xx_minidump_entry_hdr *entry_hdr,
1846 uint32_t **d_ptr)
1847{
1848 uint32_t r_addr, r_stride, loop_cnt, i, r_value;
1849 struct qla82xx_minidump_entry_rdocm *ocm_hdr;
1850 uint32_t *data_ptr = *d_ptr;
1851
1852 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1853 ocm_hdr = (struct qla82xx_minidump_entry_rdocm *)entry_hdr;
1854 r_addr = ocm_hdr->read_addr;
1855 r_stride = ocm_hdr->read_addr_stride;
1856 loop_cnt = ocm_hdr->op_count;
1857
1858 DEBUG2(ql4_printk(KERN_INFO, ha,
1859 "[%s]: r_addr: 0x%x, r_stride: 0x%x, loop_cnt: 0x%x\n",
1860 __func__, r_addr, r_stride, loop_cnt));
1861
1862 for (i = 0; i < loop_cnt; i++) {
1863 r_value = readl((void __iomem *)(r_addr + ha->nx_pcibase));
1864 *data_ptr++ = cpu_to_le32(r_value);
1865 r_addr += r_stride;
1866 }
1867 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%lx\n",
1868 __func__, (loop_cnt * sizeof(uint32_t))));
1869 *d_ptr = data_ptr;
1870}
1871
1872static void qla4_8xxx_minidump_process_rdmux(struct scsi_qla_host *ha,
1873 struct qla82xx_minidump_entry_hdr *entry_hdr,
1874 uint32_t **d_ptr)
1875{
1876 uint32_t r_addr, s_stride, s_addr, s_value, loop_cnt, i, r_value;
1877 struct qla82xx_minidump_entry_mux *mux_hdr;
1878 uint32_t *data_ptr = *d_ptr;
1879
1880 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1881 mux_hdr = (struct qla82xx_minidump_entry_mux *)entry_hdr;
1882 r_addr = mux_hdr->read_addr;
1883 s_addr = mux_hdr->select_addr;
1884 s_stride = mux_hdr->select_value_stride;
1885 s_value = mux_hdr->select_value;
1886 loop_cnt = mux_hdr->op_count;
1887
1888 for (i = 0; i < loop_cnt; i++) {
1889 qla4_8xxx_md_rw_32(ha, s_addr, s_value, 1);
1890 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
1891 *data_ptr++ = cpu_to_le32(s_value);
1892 *data_ptr++ = cpu_to_le32(r_value);
1893 s_value += s_stride;
1894 }
1895 *d_ptr = data_ptr;
1896}
1897
1898static void qla4_8xxx_minidump_process_l1cache(struct scsi_qla_host *ha,
1899 struct qla82xx_minidump_entry_hdr *entry_hdr,
1900 uint32_t **d_ptr)
1901{
1902 uint32_t addr, r_addr, c_addr, t_r_addr;
1903 uint32_t i, k, loop_count, t_value, r_cnt, r_value;
1904 uint32_t c_value_w;
1905 struct qla82xx_minidump_entry_cache *cache_hdr;
1906 uint32_t *data_ptr = *d_ptr;
1907
1908 cache_hdr = (struct qla82xx_minidump_entry_cache *)entry_hdr;
1909 loop_count = cache_hdr->op_count;
1910 r_addr = cache_hdr->read_addr;
1911 c_addr = cache_hdr->control_addr;
1912 c_value_w = cache_hdr->cache_ctrl.write_value;
1913
1914 t_r_addr = cache_hdr->tag_reg_addr;
1915 t_value = cache_hdr->addr_ctrl.init_tag_value;
1916 r_cnt = cache_hdr->read_ctrl.read_addr_cnt;
1917
1918 for (i = 0; i < loop_count; i++) {
1919 qla4_8xxx_md_rw_32(ha, t_r_addr, t_value, 1);
1920 qla4_8xxx_md_rw_32(ha, c_addr, c_value_w, 1);
1921 addr = r_addr;
1922 for (k = 0; k < r_cnt; k++) {
1923 r_value = qla4_8xxx_md_rw_32(ha, addr, 0, 0);
1924 *data_ptr++ = cpu_to_le32(r_value);
1925 addr += cache_hdr->read_ctrl.read_addr_stride;
1926 }
1927 t_value += cache_hdr->addr_ctrl.tag_value_stride;
1928 }
1929 *d_ptr = data_ptr;
1930}
1931
1932static void qla4_8xxx_minidump_process_queue(struct scsi_qla_host *ha,
1933 struct qla82xx_minidump_entry_hdr *entry_hdr,
1934 uint32_t **d_ptr)
1935{
1936 uint32_t s_addr, r_addr;
1937 uint32_t r_stride, r_value, r_cnt, qid = 0;
1938 uint32_t i, k, loop_cnt;
1939 struct qla82xx_minidump_entry_queue *q_hdr;
1940 uint32_t *data_ptr = *d_ptr;
1941
1942 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1943 q_hdr = (struct qla82xx_minidump_entry_queue *)entry_hdr;
1944 s_addr = q_hdr->select_addr;
1945 r_cnt = q_hdr->rd_strd.read_addr_cnt;
1946 r_stride = q_hdr->rd_strd.read_addr_stride;
1947 loop_cnt = q_hdr->op_count;
1948
1949 for (i = 0; i < loop_cnt; i++) {
1950 qla4_8xxx_md_rw_32(ha, s_addr, qid, 1);
1951 r_addr = q_hdr->read_addr;
1952 for (k = 0; k < r_cnt; k++) {
1953 r_value = qla4_8xxx_md_rw_32(ha, r_addr, 0, 0);
1954 *data_ptr++ = cpu_to_le32(r_value);
1955 r_addr += r_stride;
1956 }
1957 qid += q_hdr->q_strd.queue_id_stride;
1958 }
1959 *d_ptr = data_ptr;
1960}
1961
1962#define MD_DIRECT_ROM_WINDOW 0x42110030
1963#define MD_DIRECT_ROM_READ_BASE 0x42150000
1964
1965static void qla4_8xxx_minidump_process_rdrom(struct scsi_qla_host *ha,
1966 struct qla82xx_minidump_entry_hdr *entry_hdr,
1967 uint32_t **d_ptr)
1968{
1969 uint32_t r_addr, r_value;
1970 uint32_t i, loop_cnt;
1971 struct qla82xx_minidump_entry_rdrom *rom_hdr;
1972 uint32_t *data_ptr = *d_ptr;
1973
1974 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
1975 rom_hdr = (struct qla82xx_minidump_entry_rdrom *)entry_hdr;
1976 r_addr = rom_hdr->read_addr;
1977 loop_cnt = rom_hdr->read_data_size/sizeof(uint32_t);
1978
1979 DEBUG2(ql4_printk(KERN_INFO, ha,
1980 "[%s]: flash_addr: 0x%x, read_data_size: 0x%x\n",
1981 __func__, r_addr, loop_cnt));
1982
1983 for (i = 0; i < loop_cnt; i++) {
1984 qla4_8xxx_md_rw_32(ha, MD_DIRECT_ROM_WINDOW,
1985 (r_addr & 0xFFFF0000), 1);
1986 r_value = qla4_8xxx_md_rw_32(ha,
1987 MD_DIRECT_ROM_READ_BASE +
1988 (r_addr & 0x0000FFFF), 0, 0);
1989 *data_ptr++ = cpu_to_le32(r_value);
1990 r_addr += sizeof(uint32_t);
1991 }
1992 *d_ptr = data_ptr;
1993}
1994
1995#define MD_MIU_TEST_AGT_CTRL 0x41000090
1996#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
1997#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
1998
1999static int qla4_8xxx_minidump_process_rdmem(struct scsi_qla_host *ha,
2000 struct qla82xx_minidump_entry_hdr *entry_hdr,
2001 uint32_t **d_ptr)
2002{
2003 uint32_t r_addr, r_value, r_data;
2004 uint32_t i, j, loop_cnt;
2005 struct qla82xx_minidump_entry_rdmem *m_hdr;
2006 unsigned long flags;
2007 uint32_t *data_ptr = *d_ptr;
2008
2009 DEBUG2(ql4_printk(KERN_INFO, ha, "Entering fn: %s\n", __func__));
2010 m_hdr = (struct qla82xx_minidump_entry_rdmem *)entry_hdr;
2011 r_addr = m_hdr->read_addr;
2012 loop_cnt = m_hdr->read_data_size/16;
2013
2014 DEBUG2(ql4_printk(KERN_INFO, ha,
2015 "[%s]: Read addr: 0x%x, read_data_size: 0x%x\n",
2016 __func__, r_addr, m_hdr->read_data_size));
2017
2018 if (r_addr & 0xf) {
2019 DEBUG2(ql4_printk(KERN_INFO, ha,
2020 "[%s]: Read addr 0x%x not 16 bytes alligned\n",
2021 __func__, r_addr));
2022 return QLA_ERROR;
2023 }
2024
2025 if (m_hdr->read_data_size % 16) {
2026 DEBUG2(ql4_printk(KERN_INFO, ha,
2027 "[%s]: Read data[0x%x] not multiple of 16 bytes\n",
2028 __func__, m_hdr->read_data_size));
2029 return QLA_ERROR;
2030 }
2031
2032 DEBUG2(ql4_printk(KERN_INFO, ha,
2033 "[%s]: rdmem_addr: 0x%x, read_data_size: 0x%x, loop_cnt: 0x%x\n",
2034 __func__, r_addr, m_hdr->read_data_size, loop_cnt));
2035
2036 write_lock_irqsave(&ha->hw_lock, flags);
2037 for (i = 0; i < loop_cnt; i++) {
2038 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_LO, r_addr, 1);
2039 r_value = 0;
2040 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_ADDR_HI, r_value, 1);
2041 r_value = MIU_TA_CTL_ENABLE;
2042 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
2043 r_value = MIU_TA_CTL_START | MIU_TA_CTL_ENABLE;
2044 qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL, r_value, 1);
2045
2046 for (j = 0; j < MAX_CTL_CHECK; j++) {
2047 r_value = qla4_8xxx_md_rw_32(ha, MD_MIU_TEST_AGT_CTRL,
2048 0, 0);
2049 if ((r_value & MIU_TA_CTL_BUSY) == 0)
2050 break;
2051 }
2052
2053 if (j >= MAX_CTL_CHECK) {
2054 printk_ratelimited(KERN_ERR
2055 "%s: failed to read through agent\n",
2056 __func__);
2057 write_unlock_irqrestore(&ha->hw_lock, flags);
2058 return QLA_SUCCESS;
2059 }
2060
2061 for (j = 0; j < 4; j++) {
2062 r_data = qla4_8xxx_md_rw_32(ha,
2063 MD_MIU_TEST_AGT_RDDATA[j],
2064 0, 0);
2065 *data_ptr++ = cpu_to_le32(r_data);
2066 }
2067
2068 r_addr += 16;
2069 }
2070 write_unlock_irqrestore(&ha->hw_lock, flags);
2071
2072 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s datacount: 0x%x\n",
2073 __func__, (loop_cnt * 16)));
2074
2075 *d_ptr = data_ptr;
2076 return QLA_SUCCESS;
2077}
2078
2079static void ql4_8xxx_mark_entry_skipped(struct scsi_qla_host *ha,
2080 struct qla82xx_minidump_entry_hdr *entry_hdr,
2081 int index)
2082{
2083 entry_hdr->d_ctrl.driver_flags |= QLA82XX_DBG_SKIPPED_FLAG;
2084 DEBUG2(ql4_printk(KERN_INFO, ha,
2085 "scsi(%ld): Skipping entry[%d]: ETYPE[0x%x]-ELEVEL[0x%x]\n",
2086 ha->host_no, index, entry_hdr->entry_type,
2087 entry_hdr->d_ctrl.entry_capture_mask));
2088}
2089
2090/**
2091 * qla82xx_collect_md_data - Retrieve firmware minidump data.
2092 * @ha: pointer to adapter structure
2093 **/
2094static int qla4_8xxx_collect_md_data(struct scsi_qla_host *ha)
2095{
2096 int num_entry_hdr = 0;
2097 struct qla82xx_minidump_entry_hdr *entry_hdr;
2098 struct qla4_8xxx_minidump_template_hdr *tmplt_hdr;
2099 uint32_t *data_ptr;
2100 uint32_t data_collected = 0;
2101 int i, rval = QLA_ERROR;
2102 uint64_t now;
2103 uint32_t timestamp;
2104
2105 if (!ha->fw_dump) {
2106 ql4_printk(KERN_INFO, ha, "%s(%ld) No buffer to dump\n",
2107 __func__, ha->host_no);
2108 return rval;
2109 }
2110
2111 tmplt_hdr = (struct qla4_8xxx_minidump_template_hdr *)
2112 ha->fw_dump_tmplt_hdr;
2113 data_ptr = (uint32_t *)((uint8_t *)ha->fw_dump +
2114 ha->fw_dump_tmplt_size);
2115 data_collected += ha->fw_dump_tmplt_size;
2116
2117 num_entry_hdr = tmplt_hdr->num_of_entries;
2118 ql4_printk(KERN_INFO, ha, "[%s]: starting data ptr: %p\n",
2119 __func__, data_ptr);
2120 ql4_printk(KERN_INFO, ha,
2121 "[%s]: no of entry headers in Template: 0x%x\n",
2122 __func__, num_entry_hdr);
2123 ql4_printk(KERN_INFO, ha, "[%s]: Capture Mask obtained: 0x%x\n",
2124 __func__, ha->fw_dump_capture_mask);
2125 ql4_printk(KERN_INFO, ha, "[%s]: Total_data_size 0x%x, %d obtained\n",
2126 __func__, ha->fw_dump_size, ha->fw_dump_size);
2127
2128 /* Update current timestamp before taking dump */
2129 now = get_jiffies_64();
2130 timestamp = (u32)(jiffies_to_msecs(now) / 1000);
2131 tmplt_hdr->driver_timestamp = timestamp;
2132
2133 entry_hdr = (struct qla82xx_minidump_entry_hdr *)
2134 (((uint8_t *)ha->fw_dump_tmplt_hdr) +
2135 tmplt_hdr->first_entry_offset);
2136
2137 /* Walk through the entry headers - validate/perform required action */
2138 for (i = 0; i < num_entry_hdr; i++) {
2139 if (data_collected >= ha->fw_dump_size) {
2140 ql4_printk(KERN_INFO, ha,
2141 "Data collected: [0x%x], Total Dump size: [0x%x]\n",
2142 data_collected, ha->fw_dump_size);
2143 return rval;
2144 }
2145
2146 if (!(entry_hdr->d_ctrl.entry_capture_mask &
2147 ha->fw_dump_capture_mask)) {
2148 entry_hdr->d_ctrl.driver_flags |=
2149 QLA82XX_DBG_SKIPPED_FLAG;
2150 goto skip_nxt_entry;
2151 }
2152
2153 DEBUG2(ql4_printk(KERN_INFO, ha,
2154 "Data collected: [0x%x], Dump size left:[0x%x]\n",
2155 data_collected,
2156 (ha->fw_dump_size - data_collected)));
2157
2158 /* Decode the entry type and take required action to capture
2159 * debug data
2160 */
2161 switch (entry_hdr->entry_type) {
2162 case QLA82XX_RDEND:
2163 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2164 break;
2165 case QLA82XX_CNTRL:
2166 rval = qla4_8xxx_minidump_process_control(ha,
2167 entry_hdr);
2168 if (rval != QLA_SUCCESS) {
2169 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2170 goto md_failed;
2171 }
2172 break;
2173 case QLA82XX_RDCRB:
2174 qla4_8xxx_minidump_process_rdcrb(ha, entry_hdr,
2175 &data_ptr);
2176 break;
2177 case QLA82XX_RDMEM:
2178 rval = qla4_8xxx_minidump_process_rdmem(ha, entry_hdr,
2179 &data_ptr);
2180 if (rval != QLA_SUCCESS) {
2181 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2182 goto md_failed;
2183 }
2184 break;
2185 case QLA82XX_BOARD:
2186 case QLA82XX_RDROM:
2187 qla4_8xxx_minidump_process_rdrom(ha, entry_hdr,
2188 &data_ptr);
2189 break;
2190 case QLA82XX_L2DTG:
2191 case QLA82XX_L2ITG:
2192 case QLA82XX_L2DAT:
2193 case QLA82XX_L2INS:
2194 rval = qla4_8xxx_minidump_process_l2tag(ha, entry_hdr,
2195 &data_ptr);
2196 if (rval != QLA_SUCCESS) {
2197 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2198 goto md_failed;
2199 }
2200 break;
2201 case QLA82XX_L1DAT:
2202 case QLA82XX_L1INS:
2203 qla4_8xxx_minidump_process_l1cache(ha, entry_hdr,
2204 &data_ptr);
2205 break;
2206 case QLA82XX_RDOCM:
2207 qla4_8xxx_minidump_process_rdocm(ha, entry_hdr,
2208 &data_ptr);
2209 break;
2210 case QLA82XX_RDMUX:
2211 qla4_8xxx_minidump_process_rdmux(ha, entry_hdr,
2212 &data_ptr);
2213 break;
2214 case QLA82XX_QUEUE:
2215 qla4_8xxx_minidump_process_queue(ha, entry_hdr,
2216 &data_ptr);
2217 break;
2218 case QLA82XX_RDNOP:
2219 default:
2220 ql4_8xxx_mark_entry_skipped(ha, entry_hdr, i);
2221 break;
2222 }
2223
2224 data_collected = (uint8_t *)data_ptr -
2225 ((uint8_t *)((uint8_t *)ha->fw_dump +
2226 ha->fw_dump_tmplt_size));
2227skip_nxt_entry:
2228 /* next entry in the template */
2229 entry_hdr = (struct qla82xx_minidump_entry_hdr *)
2230 (((uint8_t *)entry_hdr) +
2231 entry_hdr->entry_size);
2232 }
2233
2234 if ((data_collected + ha->fw_dump_tmplt_size) != ha->fw_dump_size) {
2235 ql4_printk(KERN_INFO, ha,
2236 "Dump data mismatch: Data collected: [0x%x], total_data_size:[0x%x]\n",
2237 data_collected, ha->fw_dump_size);
2238 goto md_failed;
2239 }
2240
2241 DEBUG2(ql4_printk(KERN_INFO, ha, "Leaving fn: %s Last entry: 0x%x\n",
2242 __func__, i));
2243md_failed:
2244 return rval;
2245}
2246
2247/**
2248 * qla4_8xxx_uevent_emit - Send uevent when the firmware dump is ready.
2249 * @ha: pointer to adapter structure
2250 **/
2251static void qla4_8xxx_uevent_emit(struct scsi_qla_host *ha, u32 code)
2252{
2253 char event_string[40];
2254 char *envp[] = { event_string, NULL };
2255
2256 switch (code) {
2257 case QL4_UEVENT_CODE_FW_DUMP:
2258 snprintf(event_string, sizeof(event_string), "FW_DUMP=%ld",
2259 ha->host_no);
2260 break;
2261 default:
2262 /*do nothing*/
2263 break;
2264 }
2265
2266 kobject_uevent_env(&(&ha->pdev->dev)->kobj, KOBJ_CHANGE, envp);
2267}
2268
1604/** 2269/**
1605 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw 2270 * qla4_8xxx_device_bootstrap - Initialize device, set DEV_READY, start fw
1606 * @ha: pointer to adapter structure 2271 * @ha: pointer to adapter structure
@@ -1659,6 +2324,15 @@ dev_initialize:
1659 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION); 2324 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DRV_IDC_VERSION, QLA82XX_IDC_VERSION);
1660 2325
1661 qla4_8xxx_idc_unlock(ha); 2326 qla4_8xxx_idc_unlock(ha);
2327 if (ql4xenablemd && test_bit(AF_FW_RECOVERY, &ha->flags) &&
2328 !test_and_set_bit(AF_82XX_FW_DUMPED, &ha->flags)) {
2329 if (!qla4_8xxx_collect_md_data(ha)) {
2330 qla4_8xxx_uevent_emit(ha, QL4_UEVENT_CODE_FW_DUMP);
2331 } else {
2332 ql4_printk(KERN_INFO, ha, "Unable to collect minidump\n");
2333 clear_bit(AF_82XX_FW_DUMPED, &ha->flags);
2334 }
2335 }
1662 rval = qla4_8xxx_try_start_fw(ha); 2336 rval = qla4_8xxx_try_start_fw(ha);
1663 qla4_8xxx_idc_lock(ha); 2337 qla4_8xxx_idc_lock(ha);
1664 2338
@@ -1686,6 +2360,7 @@ static void
1686qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha) 2360qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1687{ 2361{
1688 uint32_t dev_state, drv_state, drv_active; 2362 uint32_t dev_state, drv_state, drv_active;
2363 uint32_t active_mask = 0xFFFFFFFF;
1689 unsigned long reset_timeout; 2364 unsigned long reset_timeout;
1690 2365
1691 ql4_printk(KERN_INFO, ha, 2366 ql4_printk(KERN_INFO, ha,
@@ -1697,7 +2372,14 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1697 qla4_8xxx_idc_lock(ha); 2372 qla4_8xxx_idc_lock(ha);
1698 } 2373 }
1699 2374
1700 qla4_8xxx_set_rst_ready(ha); 2375 if (!test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
2376 DEBUG2(ql4_printk(KERN_INFO, ha,
2377 "%s(%ld): reset acknowledged\n",
2378 __func__, ha->host_no));
2379 qla4_8xxx_set_rst_ready(ha);
2380 } else {
2381 active_mask = (~(1 << (ha->func_num * 4)));
2382 }
1701 2383
1702 /* wait for 10 seconds for reset ack from all functions */ 2384 /* wait for 10 seconds for reset ack from all functions */
1703 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ); 2385 reset_timeout = jiffies + (ha->nx_reset_timeout * HZ);
@@ -1709,12 +2391,24 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1709 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n", 2391 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
1710 __func__, ha->host_no, drv_state, drv_active); 2392 __func__, ha->host_no, drv_state, drv_active);
1711 2393
1712 while (drv_state != drv_active) { 2394 while (drv_state != (drv_active & active_mask)) {
1713 if (time_after_eq(jiffies, reset_timeout)) { 2395 if (time_after_eq(jiffies, reset_timeout)) {
1714 printk("%s: RESET TIMEOUT!\n", DRIVER_NAME); 2396 ql4_printk(KERN_INFO, ha,
2397 "%s: RESET TIMEOUT! drv_state: 0x%08x, drv_active: 0x%08x\n",
2398 DRIVER_NAME, drv_state, drv_active);
1715 break; 2399 break;
1716 } 2400 }
1717 2401
2402 /*
2403 * When reset_owner times out, check which functions
2404 * acked/did not ack
2405 */
2406 if (test_bit(AF_82XX_RST_OWNER, &ha->flags)) {
2407 ql4_printk(KERN_INFO, ha,
2408 "%s(%ld): drv_state = 0x%x, drv_active = 0x%x\n",
2409 __func__, ha->host_no, drv_state,
2410 drv_active);
2411 }
1718 qla4_8xxx_idc_unlock(ha); 2412 qla4_8xxx_idc_unlock(ha);
1719 msleep(1000); 2413 msleep(1000);
1720 qla4_8xxx_idc_lock(ha); 2414 qla4_8xxx_idc_lock(ha);
@@ -1723,14 +2417,18 @@ qla4_8xxx_need_reset_handler(struct scsi_qla_host *ha)
1723 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE); 2417 drv_active = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DRV_ACTIVE);
1724 } 2418 }
1725 2419
2420 /* Clear RESET OWNER as we are not going to use it any further */
2421 clear_bit(AF_82XX_RST_OWNER, &ha->flags);
2422
1726 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2423 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1727 ql4_printk(KERN_INFO, ha, "3:Device state is 0x%x = %s\n", dev_state, 2424 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n", dev_state,
1728 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 2425 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown");
1729 2426
1730 /* Force to DEV_COLD unless someone else is starting a reset */ 2427 /* Force to DEV_COLD unless someone else is starting a reset */
1731 if (dev_state != QLA82XX_DEV_INITIALIZING) { 2428 if (dev_state != QLA82XX_DEV_INITIALIZING) {
1732 ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n"); 2429 ql4_printk(KERN_INFO, ha, "HW State: COLD/RE-INIT\n");
1733 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD); 2430 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, QLA82XX_DEV_COLD);
2431 qla4_8xxx_set_rst_ready(ha);
1734 } 2432 }
1735} 2433}
1736 2434
@@ -1765,8 +2463,9 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
1765 } 2463 }
1766 2464
1767 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2465 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1768 ql4_printk(KERN_INFO, ha, "1:Device state is 0x%x = %s\n", dev_state, 2466 DEBUG2(ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
1769 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 2467 dev_state, dev_state < MAX_STATES ?
2468 qdev_state[dev_state] : "Unknown"));
1770 2469
1771 /* wait for 30 seconds for device to go ready */ 2470 /* wait for 30 seconds for device to go ready */
1772 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ); 2471 dev_init_timeout = jiffies + (ha->nx_dev_init_timeout * HZ);
@@ -1775,15 +2474,19 @@ int qla4_8xxx_device_state_handler(struct scsi_qla_host *ha)
1775 while (1) { 2474 while (1) {
1776 2475
1777 if (time_after_eq(jiffies, dev_init_timeout)) { 2476 if (time_after_eq(jiffies, dev_init_timeout)) {
1778 ql4_printk(KERN_WARNING, ha, "Device init failed!\n"); 2477 ql4_printk(KERN_WARNING, ha,
2478 "%s: Device Init Failed 0x%x = %s\n",
2479 DRIVER_NAME,
2480 dev_state, dev_state < MAX_STATES ?
2481 qdev_state[dev_state] : "Unknown");
1779 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2482 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
1780 QLA82XX_DEV_FAILED); 2483 QLA82XX_DEV_FAILED);
1781 } 2484 }
1782 2485
1783 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE); 2486 dev_state = qla4_8xxx_rd_32(ha, QLA82XX_CRB_DEV_STATE);
1784 ql4_printk(KERN_INFO, ha, 2487 ql4_printk(KERN_INFO, ha, "Device state is 0x%x = %s\n",
1785 "2:Device state is 0x%x = %s\n", dev_state, 2488 dev_state, dev_state < MAX_STATES ?
1786 dev_state < MAX_STATES ? qdev_state[dev_state] : "Unknown"); 2489 qdev_state[dev_state] : "Unknown");
1787 2490
1788 /* NOTE: Make sure idc unlocked upon exit of switch statement */ 2491 /* NOTE: Make sure idc unlocked upon exit of switch statement */
1789 switch (dev_state) { 2492 switch (dev_state) {
@@ -2184,6 +2887,7 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
2184 ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n"); 2887 ql4_printk(KERN_INFO, ha, "HW State: NEED RESET\n");
2185 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE, 2888 qla4_8xxx_wr_32(ha, QLA82XX_CRB_DEV_STATE,
2186 QLA82XX_DEV_NEED_RESET); 2889 QLA82XX_DEV_NEED_RESET);
2890 set_bit(AF_82XX_RST_OWNER, &ha->flags);
2187 } else 2891 } else
2188 ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n"); 2892 ql4_printk(KERN_INFO, ha, "HW State: DEVICE INITIALIZING\n");
2189 2893
@@ -2195,8 +2899,10 @@ qla4_8xxx_isp_reset(struct scsi_qla_host *ha)
2195 qla4_8xxx_clear_rst_ready(ha); 2899 qla4_8xxx_clear_rst_ready(ha);
2196 qla4_8xxx_idc_unlock(ha); 2900 qla4_8xxx_idc_unlock(ha);
2197 2901
2198 if (rval == QLA_SUCCESS) 2902 if (rval == QLA_SUCCESS) {
2903 ql4_printk(KERN_INFO, ha, "Clearing AF_RECOVERY in qla4_8xxx_isp_reset\n");
2199 clear_bit(AF_FW_RECOVERY, &ha->flags); 2904 clear_bit(AF_FW_RECOVERY, &ha->flags);
2905 }
2200 2906
2201 return rval; 2907 return rval;
2202} 2908}
diff --git a/drivers/scsi/qla4xxx/ql4_nx.h b/drivers/scsi/qla4xxx/ql4_nx.h
index dc7500e47b8b..30258479f100 100644
--- a/drivers/scsi/qla4xxx/ql4_nx.h
+++ b/drivers/scsi/qla4xxx/ql4_nx.h
@@ -792,4 +792,196 @@ struct crb_addr_pair {
792#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0) 792#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x0b0)
793#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4) 793#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x0b4)
794 794
795/* Minidump related */
796
797/* Entry Type Defines */
798#define QLA82XX_RDNOP 0
799#define QLA82XX_RDCRB 1
800#define QLA82XX_RDMUX 2
801#define QLA82XX_QUEUE 3
802#define QLA82XX_BOARD 4
803#define QLA82XX_RDOCM 6
804#define QLA82XX_PREGS 7
805#define QLA82XX_L1DTG 8
806#define QLA82XX_L1ITG 9
807#define QLA82XX_L1DAT 11
808#define QLA82XX_L1INS 12
809#define QLA82XX_L2DTG 21
810#define QLA82XX_L2ITG 22
811#define QLA82XX_L2DAT 23
812#define QLA82XX_L2INS 24
813#define QLA82XX_RDROM 71
814#define QLA82XX_RDMEM 72
815#define QLA82XX_CNTRL 98
816#define QLA82XX_RDEND 255
817
818/* Opcodes for Control Entries.
819 * These Flags are bit fields.
820 */
821#define QLA82XX_DBG_OPCODE_WR 0x01
822#define QLA82XX_DBG_OPCODE_RW 0x02
823#define QLA82XX_DBG_OPCODE_AND 0x04
824#define QLA82XX_DBG_OPCODE_OR 0x08
825#define QLA82XX_DBG_OPCODE_POLL 0x10
826#define QLA82XX_DBG_OPCODE_RDSTATE 0x20
827#define QLA82XX_DBG_OPCODE_WRSTATE 0x40
828#define QLA82XX_DBG_OPCODE_MDSTATE 0x80
829
830/* Driver Flags */
831#define QLA82XX_DBG_SKIPPED_FLAG 0x80 /* driver skipped this entry */
832#define QLA82XX_DBG_SIZE_ERR_FLAG 0x40 /* Entry vs Capture size
833 * mismatch */
834
835/* Driver_code is for driver to write some info about the entry
836 * currently not used.
837 */
838struct qla82xx_minidump_entry_hdr {
839 uint32_t entry_type;
840 uint32_t entry_size;
841 uint32_t entry_capture_size;
842 struct {
843 uint8_t entry_capture_mask;
844 uint8_t entry_code;
845 uint8_t driver_code;
846 uint8_t driver_flags;
847 } d_ctrl;
848};
849
850/* Read CRB entry header */
851struct qla82xx_minidump_entry_crb {
852 struct qla82xx_minidump_entry_hdr h;
853 uint32_t addr;
854 struct {
855 uint8_t addr_stride;
856 uint8_t state_index_a;
857 uint16_t poll_timeout;
858 } crb_strd;
859 uint32_t data_size;
860 uint32_t op_count;
861
862 struct {
863 uint8_t opcode;
864 uint8_t state_index_v;
865 uint8_t shl;
866 uint8_t shr;
867 } crb_ctrl;
868
869 uint32_t value_1;
870 uint32_t value_2;
871 uint32_t value_3;
872};
873
874struct qla82xx_minidump_entry_cache {
875 struct qla82xx_minidump_entry_hdr h;
876 uint32_t tag_reg_addr;
877 struct {
878 uint16_t tag_value_stride;
879 uint16_t init_tag_value;
880 } addr_ctrl;
881 uint32_t data_size;
882 uint32_t op_count;
883 uint32_t control_addr;
884 struct {
885 uint16_t write_value;
886 uint8_t poll_mask;
887 uint8_t poll_wait;
888 } cache_ctrl;
889 uint32_t read_addr;
890 struct {
891 uint8_t read_addr_stride;
892 uint8_t read_addr_cnt;
893 uint16_t rsvd_1;
894 } read_ctrl;
895};
896
897/* Read OCM */
898struct qla82xx_minidump_entry_rdocm {
899 struct qla82xx_minidump_entry_hdr h;
900 uint32_t rsvd_0;
901 uint32_t rsvd_1;
902 uint32_t data_size;
903 uint32_t op_count;
904 uint32_t rsvd_2;
905 uint32_t rsvd_3;
906 uint32_t read_addr;
907 uint32_t read_addr_stride;
908};
909
910/* Read Memory */
911struct qla82xx_minidump_entry_rdmem {
912 struct qla82xx_minidump_entry_hdr h;
913 uint32_t rsvd[6];
914 uint32_t read_addr;
915 uint32_t read_data_size;
916};
917
918/* Read ROM */
919struct qla82xx_minidump_entry_rdrom {
920 struct qla82xx_minidump_entry_hdr h;
921 uint32_t rsvd[6];
922 uint32_t read_addr;
923 uint32_t read_data_size;
924};
925
926/* Mux entry */
927struct qla82xx_minidump_entry_mux {
928 struct qla82xx_minidump_entry_hdr h;
929 uint32_t select_addr;
930 uint32_t rsvd_0;
931 uint32_t data_size;
932 uint32_t op_count;
933 uint32_t select_value;
934 uint32_t select_value_stride;
935 uint32_t read_addr;
936 uint32_t rsvd_1;
937};
938
939/* Queue entry */
940struct qla82xx_minidump_entry_queue {
941 struct qla82xx_minidump_entry_hdr h;
942 uint32_t select_addr;
943 struct {
944 uint16_t queue_id_stride;
945 uint16_t rsvd_0;
946 } q_strd;
947 uint32_t data_size;
948 uint32_t op_count;
949 uint32_t rsvd_1;
950 uint32_t rsvd_2;
951 uint32_t read_addr;
952 struct {
953 uint8_t read_addr_stride;
954 uint8_t read_addr_cnt;
955 uint16_t rsvd_3;
956 } rd_strd;
957};
958
959#define QLA82XX_MINIDUMP_OCM0_SIZE (256 * 1024)
960#define QLA82XX_MINIDUMP_L1C_SIZE (256 * 1024)
961#define QLA82XX_MINIDUMP_L2C_SIZE 1572864
962#define QLA82XX_MINIDUMP_COMMON_STR_SIZE 0
963#define QLA82XX_MINIDUMP_FCOE_STR_SIZE 0
964#define QLA82XX_MINIDUMP_MEM_SIZE 0
965#define QLA82XX_MAX_ENTRY_HDR 4
966
967struct qla82xx_minidump {
968 uint32_t md_ocm0_data[QLA82XX_MINIDUMP_OCM0_SIZE];
969 uint32_t md_l1c_data[QLA82XX_MINIDUMP_L1C_SIZE];
970 uint32_t md_l2c_data[QLA82XX_MINIDUMP_L2C_SIZE];
971 uint32_t md_cs_data[QLA82XX_MINIDUMP_COMMON_STR_SIZE];
972 uint32_t md_fcoes_data[QLA82XX_MINIDUMP_FCOE_STR_SIZE];
973 uint32_t md_mem_data[QLA82XX_MINIDUMP_MEM_SIZE];
974};
975
976#define MBC_DIAGNOSTIC_MINIDUMP_TEMPLATE 0x129
977#define RQST_TMPLT_SIZE 0x0
978#define RQST_TMPLT 0x1
979#define MD_DIRECT_ROM_WINDOW 0x42110030
980#define MD_DIRECT_ROM_READ_BASE 0x42150000
981#define MD_MIU_TEST_AGT_CTRL 0x41000090
982#define MD_MIU_TEST_AGT_ADDR_LO 0x41000094
983#define MD_MIU_TEST_AGT_ADDR_HI 0x41000098
984
985static const int MD_MIU_TEST_AGT_RDDATA[] = { 0x410000A8,
986 0x410000AC, 0x410000B8, 0x410000BC };
795#endif 987#endif
diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
index ee47820c30a6..cd15678f9ada 100644
--- a/drivers/scsi/qla4xxx/ql4_os.c
+++ b/drivers/scsi/qla4xxx/ql4_os.c
@@ -68,12 +68,34 @@ MODULE_PARM_DESC(ql4xmaxqdepth,
68 " Maximum queue depth to report for target devices.\n" 68 " Maximum queue depth to report for target devices.\n"
69 "\t\t Default: 32."); 69 "\t\t Default: 32.");
70 70
71static int ql4xqfulltracking = 1;
72module_param(ql4xqfulltracking, int, S_IRUGO | S_IWUSR);
73MODULE_PARM_DESC(ql4xqfulltracking,
74 " Enable or disable dynamic tracking and adjustment of\n"
75 "\t\t scsi device queue depth.\n"
76 "\t\t 0 - Disable.\n"
77 "\t\t 1 - Enable. (Default)");
78
71static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO; 79static int ql4xsess_recovery_tmo = QL4_SESS_RECOVERY_TMO;
72module_param(ql4xsess_recovery_tmo, int, S_IRUGO); 80module_param(ql4xsess_recovery_tmo, int, S_IRUGO);
73MODULE_PARM_DESC(ql4xsess_recovery_tmo, 81MODULE_PARM_DESC(ql4xsess_recovery_tmo,
74 " Target Session Recovery Timeout.\n" 82 " Target Session Recovery Timeout.\n"
75 "\t\t Default: 120 sec."); 83 "\t\t Default: 120 sec.");
76 84
85int ql4xmdcapmask = 0x1F;
86module_param(ql4xmdcapmask, int, S_IRUGO);
87MODULE_PARM_DESC(ql4xmdcapmask,
88 " Set the Minidump driver capture mask level.\n"
89 "\t\t Default is 0x1F.\n"
90 "\t\t Can be set to 0x3, 0x7, 0xF, 0x1F, 0x3F, 0x7F");
91
92int ql4xenablemd = 1;
93module_param(ql4xenablemd, int, S_IRUGO | S_IWUSR);
94MODULE_PARM_DESC(ql4xenablemd,
95 " Set to enable minidump.\n"
96 "\t\t 0 - disable minidump\n"
97 "\t\t 1 - enable minidump (Default)");
98
77static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha); 99static int qla4xxx_wait_for_hba_online(struct scsi_qla_host *ha);
78/* 100/*
79 * SCSI host template entry points 101 * SCSI host template entry points
@@ -140,6 +162,8 @@ static int qla4xxx_slave_configure(struct scsi_device *device);
140static void qla4xxx_slave_destroy(struct scsi_device *sdev); 162static void qla4xxx_slave_destroy(struct scsi_device *sdev);
141static umode_t ql4_attr_is_visible(int param_type, int param); 163static umode_t ql4_attr_is_visible(int param_type, int param);
142static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type); 164static int qla4xxx_host_reset(struct Scsi_Host *shost, int reset_type);
165static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
166 int reason);
143 167
144static struct qla4_8xxx_legacy_intr_set legacy_intr[] = 168static struct qla4_8xxx_legacy_intr_set legacy_intr[] =
145 QLA82XX_LEGACY_INTR_CONFIG; 169 QLA82XX_LEGACY_INTR_CONFIG;
@@ -159,6 +183,7 @@ static struct scsi_host_template qla4xxx_driver_template = {
159 .slave_configure = qla4xxx_slave_configure, 183 .slave_configure = qla4xxx_slave_configure,
160 .slave_alloc = qla4xxx_slave_alloc, 184 .slave_alloc = qla4xxx_slave_alloc,
161 .slave_destroy = qla4xxx_slave_destroy, 185 .slave_destroy = qla4xxx_slave_destroy,
186 .change_queue_depth = qla4xxx_change_queue_depth,
162 187
163 .this_id = -1, 188 .this_id = -1,
164 .cmd_per_lun = 3, 189 .cmd_per_lun = 3,
@@ -1555,19 +1580,53 @@ static void qla4xxx_session_destroy(struct iscsi_cls_session *cls_sess)
1555 struct iscsi_session *sess; 1580 struct iscsi_session *sess;
1556 struct ddb_entry *ddb_entry; 1581 struct ddb_entry *ddb_entry;
1557 struct scsi_qla_host *ha; 1582 struct scsi_qla_host *ha;
1558 unsigned long flags; 1583 unsigned long flags, wtime;
1584 struct dev_db_entry *fw_ddb_entry = NULL;
1585 dma_addr_t fw_ddb_entry_dma;
1586 uint32_t ddb_state;
1587 int ret;
1559 1588
1560 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__)); 1589 DEBUG2(printk(KERN_INFO "Func: %s\n", __func__));
1561 sess = cls_sess->dd_data; 1590 sess = cls_sess->dd_data;
1562 ddb_entry = sess->dd_data; 1591 ddb_entry = sess->dd_data;
1563 ha = ddb_entry->ha; 1592 ha = ddb_entry->ha;
1564 1593
1594 fw_ddb_entry = dma_alloc_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1595 &fw_ddb_entry_dma, GFP_KERNEL);
1596 if (!fw_ddb_entry) {
1597 ql4_printk(KERN_ERR, ha,
1598 "%s: Unable to allocate dma buffer\n", __func__);
1599 goto destroy_session;
1600 }
1601
1602 wtime = jiffies + (HZ * LOGOUT_TOV);
1603 do {
1604 ret = qla4xxx_get_fwddb_entry(ha, ddb_entry->fw_ddb_index,
1605 fw_ddb_entry, fw_ddb_entry_dma,
1606 NULL, NULL, &ddb_state, NULL,
1607 NULL, NULL);
1608 if (ret == QLA_ERROR)
1609 goto destroy_session;
1610
1611 if ((ddb_state == DDB_DS_NO_CONNECTION_ACTIVE) ||
1612 (ddb_state == DDB_DS_SESSION_FAILED))
1613 goto destroy_session;
1614
1615 schedule_timeout_uninterruptible(HZ);
1616 } while ((time_after(wtime, jiffies)));
1617
1618destroy_session:
1565 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index); 1619 qla4xxx_clear_ddb_entry(ha, ddb_entry->fw_ddb_index);
1566 1620
1567 spin_lock_irqsave(&ha->hardware_lock, flags); 1621 spin_lock_irqsave(&ha->hardware_lock, flags);
1568 qla4xxx_free_ddb(ha, ddb_entry); 1622 qla4xxx_free_ddb(ha, ddb_entry);
1569 spin_unlock_irqrestore(&ha->hardware_lock, flags); 1623 spin_unlock_irqrestore(&ha->hardware_lock, flags);
1624
1570 iscsi_session_teardown(cls_sess); 1625 iscsi_session_teardown(cls_sess);
1626
1627 if (fw_ddb_entry)
1628 dma_free_coherent(&ha->pdev->dev, sizeof(*fw_ddb_entry),
1629 fw_ddb_entry, fw_ddb_entry_dma);
1571} 1630}
1572 1631
1573static struct iscsi_cls_conn * 1632static struct iscsi_cls_conn *
@@ -2220,6 +2279,9 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2220 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues, 2279 dma_free_coherent(&ha->pdev->dev, ha->queues_len, ha->queues,
2221 ha->queues_dma); 2280 ha->queues_dma);
2222 2281
2282 if (ha->fw_dump)
2283 vfree(ha->fw_dump);
2284
2223 ha->queues_len = 0; 2285 ha->queues_len = 0;
2224 ha->queues = NULL; 2286 ha->queues = NULL;
2225 ha->queues_dma = 0; 2287 ha->queues_dma = 0;
@@ -2229,6 +2291,8 @@ static void qla4xxx_mem_free(struct scsi_qla_host *ha)
2229 ha->response_dma = 0; 2291 ha->response_dma = 0;
2230 ha->shadow_regs = NULL; 2292 ha->shadow_regs = NULL;
2231 ha->shadow_regs_dma = 0; 2293 ha->shadow_regs_dma = 0;
2294 ha->fw_dump = NULL;
2295 ha->fw_dump_size = 0;
2232 2296
2233 /* Free srb pool. */ 2297 /* Free srb pool. */
2234 if (ha->srb_mempool) 2298 if (ha->srb_mempool)
@@ -5023,6 +5087,8 @@ static int __devinit qla4xxx_probe_adapter(struct pci_dev *pdev,
5023 5087
5024 set_bit(AF_INIT_DONE, &ha->flags); 5088 set_bit(AF_INIT_DONE, &ha->flags);
5025 5089
5090 qla4_8xxx_alloc_sysfs_attr(ha);
5091
5026 printk(KERN_INFO 5092 printk(KERN_INFO
5027 " QLogic iSCSI HBA Driver version: %s\n" 5093 " QLogic iSCSI HBA Driver version: %s\n"
5028 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n", 5094 " QLogic ISP%04x @ %s, host#=%ld, fw=%02d.%02d.%02d.%02d\n",
@@ -5149,6 +5215,7 @@ static void __devexit qla4xxx_remove_adapter(struct pci_dev *pdev)
5149 iscsi_boot_destroy_kset(ha->boot_kset); 5215 iscsi_boot_destroy_kset(ha->boot_kset);
5150 5216
5151 qla4xxx_destroy_fw_ddb_session(ha); 5217 qla4xxx_destroy_fw_ddb_session(ha);
5218 qla4_8xxx_free_sysfs_attr(ha);
5152 5219
5153 scsi_remove_host(ha->host); 5220 scsi_remove_host(ha->host);
5154 5221
@@ -5217,6 +5284,15 @@ static void qla4xxx_slave_destroy(struct scsi_device *sdev)
5217 scsi_deactivate_tcq(sdev, 1); 5284 scsi_deactivate_tcq(sdev, 1);
5218} 5285}
5219 5286
5287static int qla4xxx_change_queue_depth(struct scsi_device *sdev, int qdepth,
5288 int reason)
5289{
5290 if (!ql4xqfulltracking)
5291 return -EOPNOTSUPP;
5292
5293 return iscsi_change_queue_depth(sdev, qdepth, reason);
5294}
5295
5220/** 5296/**
5221 * qla4xxx_del_from_active_array - returns an active srb 5297 * qla4xxx_del_from_active_array - returns an active srb
5222 * @ha: Pointer to host adapter structure. 5298 * @ha: Pointer to host adapter structure.
diff --git a/drivers/scsi/qla4xxx/ql4_version.h b/drivers/scsi/qla4xxx/ql4_version.h
index 97b30c108e36..cc1cc3518b87 100644
--- a/drivers/scsi/qla4xxx/ql4_version.h
+++ b/drivers/scsi/qla4xxx/ql4_version.h
@@ -5,4 +5,4 @@
5 * See LICENSE.qla4xxx for copyright and licensing details. 5 * See LICENSE.qla4xxx for copyright and licensing details.
6 */ 6 */
7 7
8#define QLA4XXX_DRIVER_VERSION "5.02.00-k16" 8#define QLA4XXX_DRIVER_VERSION "5.02.00-k17"
diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
index 61c82a345f82..bbbc9c918d4c 100644
--- a/drivers/scsi/scsi.c
+++ b/drivers/scsi/scsi.c
@@ -90,11 +90,9 @@ unsigned int scsi_logging_level;
90EXPORT_SYMBOL(scsi_logging_level); 90EXPORT_SYMBOL(scsi_logging_level);
91#endif 91#endif
92 92
93#if IS_ENABLED(CONFIG_PM) || IS_ENABLED(CONFIG_BLK_DEV_SD) 93/* sd, scsi core and power management need to coordinate flushing async actions */
94/* sd and scsi_pm need to coordinate flushing async actions */
95LIST_HEAD(scsi_sd_probe_domain); 94LIST_HEAD(scsi_sd_probe_domain);
96EXPORT_SYMBOL(scsi_sd_probe_domain); 95EXPORT_SYMBOL(scsi_sd_probe_domain);
97#endif
98 96
99/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI. 97/* NB: These are exposed through /proc/scsi/scsi and form part of the ABI.
100 * You may not alter any existing entry (although adding new ones is 98 * You may not alter any existing entry (although adding new ones is
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 62ddfd31d4ce..6dfb9785d345 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -1378,16 +1378,19 @@ static int scsi_lld_busy(struct request_queue *q)
1378{ 1378{
1379 struct scsi_device *sdev = q->queuedata; 1379 struct scsi_device *sdev = q->queuedata;
1380 struct Scsi_Host *shost; 1380 struct Scsi_Host *shost;
1381 struct scsi_target *starget;
1382 1381
1383 if (!sdev) 1382 if (!sdev)
1384 return 0; 1383 return 0;
1385 1384
1386 shost = sdev->host; 1385 shost = sdev->host;
1387 starget = scsi_target(sdev);
1388 1386
1389 if (scsi_host_in_recovery(shost) || scsi_host_is_busy(shost) || 1387 /*
1390 scsi_target_is_busy(starget) || scsi_device_is_busy(sdev)) 1388 * Ignore host/starget busy state.
1389 * Since block layer does not have a concept of fairness across
1390 * multiple queues, congestion of host/starget needs to be handled
1391 * in SCSI layer.
1392 */
1393 if (scsi_host_in_recovery(shost) || scsi_device_is_busy(sdev))
1391 return 1; 1394 return 1;
1392 1395
1393 return 0; 1396 return 0;
diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c
index f661a41fa4c6..d4201ded3b22 100644
--- a/drivers/scsi/scsi_pm.c
+++ b/drivers/scsi/scsi_pm.c
@@ -24,8 +24,11 @@ static int scsi_dev_type_suspend(struct device *dev, pm_message_t msg)
24 err = scsi_device_quiesce(to_scsi_device(dev)); 24 err = scsi_device_quiesce(to_scsi_device(dev));
25 if (err == 0) { 25 if (err == 0) {
26 drv = dev->driver; 26 drv = dev->driver;
27 if (drv && drv->suspend) 27 if (drv && drv->suspend) {
28 err = drv->suspend(dev, msg); 28 err = drv->suspend(dev, msg);
29 if (err)
30 scsi_device_resume(to_scsi_device(dev));
31 }
29 } 32 }
30 dev_dbg(dev, "scsi suspend: %d\n", err); 33 dev_dbg(dev, "scsi suspend: %d\n", err);
31 return err; 34 return err;
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 01b03744f1f9..2e5fe584aad3 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -147,7 +147,7 @@ int scsi_complete_async_scans(void)
147 147
148 do { 148 do {
149 if (list_empty(&scanning_hosts)) 149 if (list_empty(&scanning_hosts))
150 return 0; 150 goto out;
151 /* If we can't get memory immediately, that's OK. Just 151 /* If we can't get memory immediately, that's OK. Just
152 * sleep a little. Even if we never get memory, the async 152 * sleep a little. Even if we never get memory, the async
153 * scans will finish eventually. 153 * scans will finish eventually.
@@ -179,8 +179,11 @@ int scsi_complete_async_scans(void)
179 } 179 }
180 done: 180 done:
181 spin_unlock(&async_scan_lock); 181 spin_unlock(&async_scan_lock);
182
183 kfree(data); 182 kfree(data);
183
184 out:
185 async_synchronize_full_domain(&scsi_sd_probe_domain);
186
184 return 0; 187 return 0;
185} 188}
186 189
diff --git a/drivers/scsi/scsi_wait_scan.c b/drivers/scsi/scsi_wait_scan.c
index 74708fcaf82f..ae7814874618 100644
--- a/drivers/scsi/scsi_wait_scan.c
+++ b/drivers/scsi/scsi_wait_scan.c
@@ -12,7 +12,7 @@
12 12
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/device.h> 14#include <linux/device.h>
15#include <scsi/scsi_scan.h> 15#include "scsi_priv.h"
16 16
17static int __init wait_scan_init(void) 17static int __init wait_scan_init(void)
18{ 18{
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 4e010b727818..6a4fd00117ca 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1836,7 +1836,7 @@ ufshcd_probe(struct pci_dev *pdev, const struct pci_device_id *id)
1836 err = pci_request_regions(pdev, UFSHCD); 1836 err = pci_request_regions(pdev, UFSHCD);
1837 if (err < 0) { 1837 if (err < 0) {
1838 dev_err(&pdev->dev, "request regions failed\n"); 1838 dev_err(&pdev->dev, "request regions failed\n");
1839 goto out_disable; 1839 goto out_host_put;
1840 } 1840 }
1841 1841
1842 hba->mmio_base = pci_ioremap_bar(pdev, 0); 1842 hba->mmio_base = pci_ioremap_bar(pdev, 0);
@@ -1925,8 +1925,9 @@ out_iounmap:
1925 iounmap(hba->mmio_base); 1925 iounmap(hba->mmio_base);
1926out_release_regions: 1926out_release_regions:
1927 pci_release_regions(pdev); 1927 pci_release_regions(pdev);
1928out_disable: 1928out_host_put:
1929 scsi_host_put(host); 1929 scsi_host_put(host);
1930out_disable:
1930 pci_clear_master(pdev); 1931 pci_clear_master(pdev);
1931 pci_disable_device(pdev); 1932 pci_disable_device(pdev);
1932out_error: 1933out_error:
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 46ef5fe51db5..0c73dd4f43a0 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -801,7 +801,7 @@ static int omap2_mcspi_setup(struct spi_device *spi)
801 mcspi_dma = &mcspi->dma_channels[spi->chip_select]; 801 mcspi_dma = &mcspi->dma_channels[spi->chip_select];
802 802
803 if (!cs) { 803 if (!cs) {
804 cs = devm_kzalloc(&spi->dev , sizeof *cs, GFP_KERNEL); 804 cs = kzalloc(sizeof *cs, GFP_KERNEL);
805 if (!cs) 805 if (!cs)
806 return -ENOMEM; 806 return -ENOMEM;
807 cs->base = mcspi->base + spi->chip_select * 0x14; 807 cs->base = mcspi->base + spi->chip_select * 0x14;
@@ -842,6 +842,7 @@ static void omap2_mcspi_cleanup(struct spi_device *spi)
842 cs = spi->controller_state; 842 cs = spi->controller_state;
843 list_del(&cs->node); 843 list_del(&cs->node);
844 844
845 kfree(cs);
845 } 846 }
846 847
847 if (spi->chip_select < spi->master->num_chipselect) { 848 if (spi->chip_select < spi->master->num_chipselect) {
diff --git a/drivers/staging/comedi/drivers.c b/drivers/staging/comedi/drivers.c
index 1c3d6386ea36..aeac1caba3f9 100644
--- a/drivers/staging/comedi/drivers.c
+++ b/drivers/staging/comedi/drivers.c
@@ -30,6 +30,7 @@
30#include <linux/pci.h> 30#include <linux/pci.h>
31#include <linux/usb.h> 31#include <linux/usb.h>
32#include <linux/errno.h> 32#include <linux/errno.h>
33#include <linux/kconfig.h>
33#include <linux/kernel.h> 34#include <linux/kernel.h>
34#include <linux/sched.h> 35#include <linux/sched.h>
35#include <linux/fcntl.h> 36#include <linux/fcntl.h>
@@ -981,6 +982,8 @@ void comedi_pci_driver_unregister(struct comedi_driver *comedi_driver,
981} 982}
982EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister); 983EXPORT_SYMBOL_GPL(comedi_pci_driver_unregister);
983 984
985#if IS_ENABLED(CONFIG_USB)
986
984static int comedi_old_usb_auto_config(struct usb_interface *intf, 987static int comedi_old_usb_auto_config(struct usb_interface *intf,
985 struct comedi_driver *driver) 988 struct comedi_driver *driver)
986{ 989{
@@ -1043,3 +1046,5 @@ void comedi_usb_driver_unregister(struct comedi_driver *comedi_driver,
1043 comedi_driver_unregister(comedi_driver); 1046 comedi_driver_unregister(comedi_driver);
1044} 1047}
1045EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister); 1048EXPORT_SYMBOL_GPL(comedi_usb_driver_unregister);
1049
1050#endif
diff --git a/drivers/staging/gdm72xx/netlink_k.c b/drivers/staging/gdm72xx/netlink_k.c
index 292af0f7f451..51665132c61b 100644
--- a/drivers/staging/gdm72xx/netlink_k.c
+++ b/drivers/staging/gdm72xx/netlink_k.c
@@ -104,7 +104,7 @@ struct sock *netlink_init(int unit, void (*cb)(struct net_device *dev, u16 type,
104 104
105void netlink_exit(struct sock *sock) 105void netlink_exit(struct sock *sock)
106{ 106{
107 sock_release(sock->sk_socket); 107 netlink_kernel_release(sock);
108} 108}
109 109
110int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len) 110int netlink_send(struct sock *sock, int group, u16 type, void *msg, int len)
diff --git a/drivers/staging/iio/Documentation/device.txt b/drivers/staging/iio/Documentation/device.txt
index 0338c7cd0a8b..f03fbd3bb454 100644
--- a/drivers/staging/iio/Documentation/device.txt
+++ b/drivers/staging/iio/Documentation/device.txt
@@ -29,8 +29,6 @@ Then fill in the following:
29 * info->driver_module: 29 * info->driver_module:
30 Set to THIS_MODULE. Used to ensure correct ownership 30 Set to THIS_MODULE. Used to ensure correct ownership
31 of various resources allocate by the core. 31 of various resources allocate by the core.
32 * info->num_interrupt_lines:
33 Number of event triggering hardware lines the device has.
34 * info->event_attrs: 32 * info->event_attrs:
35 Attributes used to enable / disable hardware events. 33 Attributes used to enable / disable hardware events.
36 * info->attrs: 34 * info->attrs:
diff --git a/drivers/staging/iio/adc/Kconfig b/drivers/staging/iio/adc/Kconfig
index 2490dd25093b..8f1b3af02f29 100644
--- a/drivers/staging/iio/adc/Kconfig
+++ b/drivers/staging/iio/adc/Kconfig
@@ -13,6 +13,7 @@ config AD7291
13config AD7298 13config AD7298
14 tristate "Analog Devices AD7298 ADC driver" 14 tristate "Analog Devices AD7298 ADC driver"
15 depends on SPI 15 depends on SPI
16 select IIO_KFIFO_BUF if IIO_BUFFER
16 help 17 help
17 Say yes here to build support for Analog Devices AD7298 18 Say yes here to build support for Analog Devices AD7298
18 8 Channel ADC with temperature sensor. 19 8 Channel ADC with temperature sensor.
diff --git a/drivers/staging/iio/adc/ad7606_core.c b/drivers/staging/iio/adc/ad7606_core.c
index 10ab6dc823b9..a13afff2dfe6 100644
--- a/drivers/staging/iio/adc/ad7606_core.c
+++ b/drivers/staging/iio/adc/ad7606_core.c
@@ -235,7 +235,8 @@ static const struct attribute_group ad7606_attribute_group_range = {
235 .indexed = 1, \ 235 .indexed = 1, \
236 .channel = num, \ 236 .channel = num, \
237 .address = num, \ 237 .address = num, \
238 .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT, \ 238 .info_mask = IIO_CHAN_INFO_RAW_SEPARATE_BIT | \
239 IIO_CHAN_INFO_SCALE_SHARED_BIT, \
239 .scan_index = num, \ 240 .scan_index = num, \
240 .scan_type = IIO_ST('s', 16, 16, 0), \ 241 .scan_type = IIO_ST('s', 16, 16, 0), \
241 } 242 }
diff --git a/drivers/staging/omapdrm/omap_fbdev.c b/drivers/staging/omapdrm/omap_fbdev.c
index 11acd4c35ed2..8c6ed3b0c6f6 100644
--- a/drivers/staging/omapdrm/omap_fbdev.c
+++ b/drivers/staging/omapdrm/omap_fbdev.c
@@ -208,7 +208,8 @@ static int omap_fbdev_create(struct drm_fb_helper *helper,
208 */ 208 */
209 ret = omap_gem_get_paddr(fbdev->bo, &paddr, true); 209 ret = omap_gem_get_paddr(fbdev->bo, &paddr, true);
210 if (ret) { 210 if (ret) {
211 dev_err(dev->dev, "could not map (paddr)!\n"); 211 dev_err(dev->dev,
212 "could not map (paddr)! Skipping framebuffer alloc\n");
212 ret = -ENOMEM; 213 ret = -ENOMEM;
213 goto fail; 214 goto fail;
214 } 215 }
@@ -388,8 +389,11 @@ void omap_fbdev_free(struct drm_device *dev)
388 389
389 fbi = helper->fbdev; 390 fbi = helper->fbdev;
390 391
391 unregister_framebuffer(fbi); 392 /* only cleanup framebuffer if it is present */
392 framebuffer_release(fbi); 393 if (fbi) {
394 unregister_framebuffer(fbi);
395 framebuffer_release(fbi);
396 }
393 397
394 drm_fb_helper_fini(helper); 398 drm_fb_helper_fini(helper);
395 399
diff --git a/drivers/staging/ramster/zcache-main.c b/drivers/staging/ramster/zcache-main.c
index 4e7ef0e6b79c..d46764b5aaba 100644
--- a/drivers/staging/ramster/zcache-main.c
+++ b/drivers/staging/ramster/zcache-main.c
@@ -3002,7 +3002,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
3002 return oid; 3002 return oid;
3003} 3003}
3004 3004
3005static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, 3005static int zcache_frontswap_store(unsigned type, pgoff_t offset,
3006 struct page *page) 3006 struct page *page)
3007{ 3007{
3008 u64 ind64 = (u64)offset; 3008 u64 ind64 = (u64)offset;
@@ -3025,7 +3025,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
3025 3025
3026/* returns 0 if the page was successfully gotten from frontswap, -1 if 3026/* returns 0 if the page was successfully gotten from frontswap, -1 if
3027 * was not present (should never happen!) */ 3027 * was not present (should never happen!) */
3028static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, 3028static int zcache_frontswap_load(unsigned type, pgoff_t offset,
3029 struct page *page) 3029 struct page *page)
3030{ 3030{
3031 u64 ind64 = (u64)offset; 3031 u64 ind64 = (u64)offset;
@@ -3080,8 +3080,8 @@ static void zcache_frontswap_init(unsigned ignored)
3080} 3080}
3081 3081
3082static struct frontswap_ops zcache_frontswap_ops = { 3082static struct frontswap_ops zcache_frontswap_ops = {
3083 .put_page = zcache_frontswap_put_page, 3083 .store = zcache_frontswap_store,
3084 .get_page = zcache_frontswap_get_page, 3084 .load = zcache_frontswap_load,
3085 .invalidate_page = zcache_frontswap_flush_page, 3085 .invalidate_page = zcache_frontswap_flush_page,
3086 .invalidate_area = zcache_frontswap_flush_area, 3086 .invalidate_area = zcache_frontswap_flush_area,
3087 .init = zcache_frontswap_init 3087 .init = zcache_frontswap_init
diff --git a/drivers/staging/rtl8712/usb_intf.c b/drivers/staging/rtl8712/usb_intf.c
index 9bd18e2d0513..69f616c6964e 100644
--- a/drivers/staging/rtl8712/usb_intf.c
+++ b/drivers/staging/rtl8712/usb_intf.c
@@ -102,6 +102,8 @@ static struct usb_device_id rtl871x_usb_id_tbl[] = {
102 /* - */ 102 /* - */
103 {USB_DEVICE(0x20F4, 0x646B)}, 103 {USB_DEVICE(0x20F4, 0x646B)},
104 {USB_DEVICE(0x083A, 0xC512)}, 104 {USB_DEVICE(0x083A, 0xC512)},
105 {USB_DEVICE(0x25D4, 0x4CA1)},
106 {USB_DEVICE(0x25D4, 0x4CAB)},
105 107
106/* RTL8191SU */ 108/* RTL8191SU */
107 /* Realtek */ 109 /* Realtek */
diff --git a/drivers/staging/zcache/zcache-main.c b/drivers/staging/zcache/zcache-main.c
index 2734dacacbaf..784c796b9848 100644
--- a/drivers/staging/zcache/zcache-main.c
+++ b/drivers/staging/zcache/zcache-main.c
@@ -1835,7 +1835,7 @@ static int zcache_frontswap_poolid = -1;
1835 * Swizzling increases objects per swaptype, increasing tmem concurrency 1835 * Swizzling increases objects per swaptype, increasing tmem concurrency
1836 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS 1836 * for heavy swaploads. Later, larger nr_cpus -> larger SWIZ_BITS
1837 * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from 1837 * Setting SWIZ_BITS to 27 basically reconstructs the swap entry from
1838 * frontswap_get_page(), but has side-effects. Hence using 8. 1838 * frontswap_load(), but has side-effects. Hence using 8.
1839 */ 1839 */
1840#define SWIZ_BITS 8 1840#define SWIZ_BITS 8
1841#define SWIZ_MASK ((1 << SWIZ_BITS) - 1) 1841#define SWIZ_MASK ((1 << SWIZ_BITS) - 1)
@@ -1849,7 +1849,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
1849 return oid; 1849 return oid;
1850} 1850}
1851 1851
1852static int zcache_frontswap_put_page(unsigned type, pgoff_t offset, 1852static int zcache_frontswap_store(unsigned type, pgoff_t offset,
1853 struct page *page) 1853 struct page *page)
1854{ 1854{
1855 u64 ind64 = (u64)offset; 1855 u64 ind64 = (u64)offset;
@@ -1870,7 +1870,7 @@ static int zcache_frontswap_put_page(unsigned type, pgoff_t offset,
1870 1870
1871/* returns 0 if the page was successfully gotten from frontswap, -1 if 1871/* returns 0 if the page was successfully gotten from frontswap, -1 if
1872 * was not present (should never happen!) */ 1872 * was not present (should never happen!) */
1873static int zcache_frontswap_get_page(unsigned type, pgoff_t offset, 1873static int zcache_frontswap_load(unsigned type, pgoff_t offset,
1874 struct page *page) 1874 struct page *page)
1875{ 1875{
1876 u64 ind64 = (u64)offset; 1876 u64 ind64 = (u64)offset;
@@ -1919,8 +1919,8 @@ static void zcache_frontswap_init(unsigned ignored)
1919} 1919}
1920 1920
1921static struct frontswap_ops zcache_frontswap_ops = { 1921static struct frontswap_ops zcache_frontswap_ops = {
1922 .put_page = zcache_frontswap_put_page, 1922 .store = zcache_frontswap_store,
1923 .get_page = zcache_frontswap_get_page, 1923 .load = zcache_frontswap_load,
1924 .invalidate_page = zcache_frontswap_flush_page, 1924 .invalidate_page = zcache_frontswap_flush_page,
1925 .invalidate_area = zcache_frontswap_flush_area, 1925 .invalidate_area = zcache_frontswap_flush_area,
1926 .init = zcache_frontswap_init 1926 .init = zcache_frontswap_init
diff --git a/drivers/target/sbp/sbp_target.c b/drivers/target/sbp/sbp_target.c
index 37c609898f84..7e6136e2ce81 100644
--- a/drivers/target/sbp/sbp_target.c
+++ b/drivers/target/sbp/sbp_target.c
@@ -587,14 +587,14 @@ static void sbp_management_request_logout(
587{ 587{
588 struct sbp_tport *tport = agent->tport; 588 struct sbp_tport *tport = agent->tport;
589 struct sbp_tpg *tpg = tport->tpg; 589 struct sbp_tpg *tpg = tport->tpg;
590 int login_id; 590 int id;
591 struct sbp_login_descriptor *login; 591 struct sbp_login_descriptor *login;
592 592
593 login_id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)); 593 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
594 594
595 login = sbp_login_find_by_id(tpg, login_id); 595 login = sbp_login_find_by_id(tpg, id);
596 if (!login) { 596 if (!login) {
597 pr_warn("cannot find login: %d\n", login_id); 597 pr_warn("cannot find login: %d\n", id);
598 598
599 req->status.status = cpu_to_be32( 599 req->status.status = cpu_to_be32(
600 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) | 600 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index e624b836469c..91799973081a 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -374,8 +374,9 @@ int target_emulate_set_target_port_groups(struct se_cmd *cmd)
374 374
375out: 375out:
376 transport_kunmap_data_sg(cmd); 376 transport_kunmap_data_sg(cmd);
377 target_complete_cmd(cmd, GOOD); 377 if (!rc)
378 return 0; 378 target_complete_cmd(cmd, GOOD);
379 return rc;
379} 380}
380 381
381static inline int core_alua_state_nonoptimized( 382static inline int core_alua_state_nonoptimized(
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 686dba189f8e..9f99d0404908 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -133,16 +133,11 @@ static struct se_device *fd_create_virtdevice(
133 ret = PTR_ERR(dev_p); 133 ret = PTR_ERR(dev_p);
134 goto fail; 134 goto fail;
135 } 135 }
136
137 /* O_DIRECT too? */
138 flags = O_RDWR | O_CREAT | O_LARGEFILE;
139
140 /* 136 /*
141 * If fd_buffered_io=1 has not been set explicitly (the default), 137 * Use O_DSYNC by default instead of O_SYNC to forgo syncing
142 * use O_SYNC to force FILEIO writes to disk. 138 * of pure timestamp updates.
143 */ 139 */
144 if (!(fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO)) 140 flags = O_RDWR | O_CREAT | O_LARGEFILE | O_DSYNC;
145 flags |= O_SYNC;
146 141
147 file = filp_open(dev_p, flags, 0600); 142 file = filp_open(dev_p, flags, 0600);
148 if (IS_ERR(file)) { 143 if (IS_ERR(file)) {
@@ -380,23 +375,6 @@ static void fd_emulate_sync_cache(struct se_cmd *cmd)
380 } 375 }
381} 376}
382 377
383static void fd_emulate_write_fua(struct se_cmd *cmd)
384{
385 struct se_device *dev = cmd->se_dev;
386 struct fd_dev *fd_dev = dev->dev_ptr;
387 loff_t start = cmd->t_task_lba *
388 dev->se_sub_dev->se_dev_attrib.block_size;
389 loff_t end = start + cmd->data_length;
390 int ret;
391
392 pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
393 cmd->t_task_lba, cmd->data_length);
394
395 ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
396 if (ret != 0)
397 pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
398}
399
400static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl, 378static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
401 u32 sgl_nents, enum dma_data_direction data_direction) 379 u32 sgl_nents, enum dma_data_direction data_direction)
402{ 380{
@@ -411,19 +389,21 @@ static int fd_execute_cmd(struct se_cmd *cmd, struct scatterlist *sgl,
411 ret = fd_do_readv(cmd, sgl, sgl_nents); 389 ret = fd_do_readv(cmd, sgl, sgl_nents);
412 } else { 390 } else {
413 ret = fd_do_writev(cmd, sgl, sgl_nents); 391 ret = fd_do_writev(cmd, sgl, sgl_nents);
414 392 /*
393 * Perform implict vfs_fsync_range() for fd_do_writev() ops
394 * for SCSI WRITEs with Forced Unit Access (FUA) set.
395 * Allow this to happen independent of WCE=0 setting.
396 */
415 if (ret > 0 && 397 if (ret > 0 &&
416 dev->se_sub_dev->se_dev_attrib.emulate_write_cache > 0 &&
417 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 && 398 dev->se_sub_dev->se_dev_attrib.emulate_fua_write > 0 &&
418 (cmd->se_cmd_flags & SCF_FUA)) { 399 (cmd->se_cmd_flags & SCF_FUA)) {
419 /* 400 struct fd_dev *fd_dev = dev->dev_ptr;
420 * We might need to be a bit smarter here 401 loff_t start = cmd->t_task_lba *
421 * and return some sense data to let the initiator 402 dev->se_sub_dev->se_dev_attrib.block_size;
422 * know the FUA WRITE cache sync failed..? 403 loff_t end = start + cmd->data_length;
423 */
424 fd_emulate_write_fua(cmd);
425 }
426 404
405 vfs_fsync_range(fd_dev->fd_file, start, end, 1);
406 }
427 } 407 }
428 408
429 if (ret < 0) { 409 if (ret < 0) {
@@ -442,7 +422,6 @@ enum {
442static match_table_t tokens = { 422static match_table_t tokens = {
443 {Opt_fd_dev_name, "fd_dev_name=%s"}, 423 {Opt_fd_dev_name, "fd_dev_name=%s"},
444 {Opt_fd_dev_size, "fd_dev_size=%s"}, 424 {Opt_fd_dev_size, "fd_dev_size=%s"},
445 {Opt_fd_buffered_io, "fd_buffered_io=%d"},
446 {Opt_err, NULL} 425 {Opt_err, NULL}
447}; 426};
448 427
@@ -454,7 +433,7 @@ static ssize_t fd_set_configfs_dev_params(
454 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr; 433 struct fd_dev *fd_dev = se_dev->se_dev_su_ptr;
455 char *orig, *ptr, *arg_p, *opts; 434 char *orig, *ptr, *arg_p, *opts;
456 substring_t args[MAX_OPT_ARGS]; 435 substring_t args[MAX_OPT_ARGS];
457 int ret = 0, arg, token; 436 int ret = 0, token;
458 437
459 opts = kstrdup(page, GFP_KERNEL); 438 opts = kstrdup(page, GFP_KERNEL);
460 if (!opts) 439 if (!opts)
@@ -498,19 +477,6 @@ static ssize_t fd_set_configfs_dev_params(
498 " bytes\n", fd_dev->fd_dev_size); 477 " bytes\n", fd_dev->fd_dev_size);
499 fd_dev->fbd_flags |= FBDF_HAS_SIZE; 478 fd_dev->fbd_flags |= FBDF_HAS_SIZE;
500 break; 479 break;
501 case Opt_fd_buffered_io:
502 match_int(args, &arg);
503 if (arg != 1) {
504 pr_err("bogus fd_buffered_io=%d value\n", arg);
505 ret = -EINVAL;
506 goto out;
507 }
508
509 pr_debug("FILEIO: Using buffered I/O"
510 " operations for struct fd_dev\n");
511
512 fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
513 break;
514 default: 480 default:
515 break; 481 break;
516 } 482 }
@@ -542,10 +508,8 @@ static ssize_t fd_show_configfs_dev_params(
542 ssize_t bl = 0; 508 ssize_t bl = 0;
543 509
544 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id); 510 bl = sprintf(b + bl, "TCM FILEIO ID: %u", fd_dev->fd_dev_id);
545 bl += sprintf(b + bl, " File: %s Size: %llu Mode: %s\n", 511 bl += sprintf(b + bl, " File: %s Size: %llu Mode: O_DSYNC\n",
546 fd_dev->fd_dev_name, fd_dev->fd_dev_size, 512 fd_dev->fd_dev_name, fd_dev->fd_dev_size);
547 (fd_dev->fbd_flags & FDBD_USE_BUFFERED_IO) ?
548 "Buffered" : "Synchronous");
549 return bl; 513 return bl;
550} 514}
551 515
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index fbd59ef7d8be..70ce7fd7111d 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -14,7 +14,6 @@
14 14
15#define FBDF_HAS_PATH 0x01 15#define FBDF_HAS_PATH 0x01
16#define FBDF_HAS_SIZE 0x02 16#define FBDF_HAS_SIZE 0x02
17#define FDBD_USE_BUFFERED_IO 0x04
18 17
19struct fd_dev { 18struct fd_dev {
20 u32 fbd_flags; 19 u32 fbd_flags;
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index b05fdc0c05d3..634d0f31a28c 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -315,7 +315,7 @@ void transport_register_session(
315} 315}
316EXPORT_SYMBOL(transport_register_session); 316EXPORT_SYMBOL(transport_register_session);
317 317
318static void target_release_session(struct kref *kref) 318void target_release_session(struct kref *kref)
319{ 319{
320 struct se_session *se_sess = container_of(kref, 320 struct se_session *se_sess = container_of(kref,
321 struct se_session, sess_kref); 321 struct se_session, sess_kref);
@@ -332,6 +332,12 @@ EXPORT_SYMBOL(target_get_session);
332 332
333void target_put_session(struct se_session *se_sess) 333void target_put_session(struct se_session *se_sess)
334{ 334{
335 struct se_portal_group *tpg = se_sess->se_tpg;
336
337 if (tpg->se_tpg_tfo->put_session != NULL) {
338 tpg->se_tpg_tfo->put_session(se_sess);
339 return;
340 }
335 kref_put(&se_sess->sess_kref, target_release_session); 341 kref_put(&se_sess->sess_kref, target_release_session);
336} 342}
337EXPORT_SYMBOL(target_put_session); 343EXPORT_SYMBOL(target_put_session);
diff --git a/drivers/tty/amiserial.c b/drivers/tty/amiserial.c
index 35819e312624..6cc4358f68c1 100644
--- a/drivers/tty/amiserial.c
+++ b/drivers/tty/amiserial.c
@@ -1033,7 +1033,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
1033 if (!retinfo) 1033 if (!retinfo)
1034 return -EFAULT; 1034 return -EFAULT;
1035 memset(&tmp, 0, sizeof(tmp)); 1035 memset(&tmp, 0, sizeof(tmp));
1036 tty_lock(tty); 1036 tty_lock();
1037 tmp.line = tty->index; 1037 tmp.line = tty->index;
1038 tmp.port = state->port; 1038 tmp.port = state->port;
1039 tmp.flags = state->tport.flags; 1039 tmp.flags = state->tport.flags;
@@ -1042,7 +1042,7 @@ static int get_serial_info(struct tty_struct *tty, struct serial_state *state,
1042 tmp.close_delay = state->tport.close_delay; 1042 tmp.close_delay = state->tport.close_delay;
1043 tmp.closing_wait = state->tport.closing_wait; 1043 tmp.closing_wait = state->tport.closing_wait;
1044 tmp.custom_divisor = state->custom_divisor; 1044 tmp.custom_divisor = state->custom_divisor;
1045 tty_unlock(tty); 1045 tty_unlock();
1046 if (copy_to_user(retinfo,&tmp,sizeof(*retinfo))) 1046 if (copy_to_user(retinfo,&tmp,sizeof(*retinfo)))
1047 return -EFAULT; 1047 return -EFAULT;
1048 return 0; 1048 return 0;
@@ -1059,12 +1059,12 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
1059 if (copy_from_user(&new_serial,new_info,sizeof(new_serial))) 1059 if (copy_from_user(&new_serial,new_info,sizeof(new_serial)))
1060 return -EFAULT; 1060 return -EFAULT;
1061 1061
1062 tty_lock(tty); 1062 tty_lock();
1063 change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) || 1063 change_spd = ((new_serial.flags ^ port->flags) & ASYNC_SPD_MASK) ||
1064 new_serial.custom_divisor != state->custom_divisor; 1064 new_serial.custom_divisor != state->custom_divisor;
1065 if (new_serial.irq || new_serial.port != state->port || 1065 if (new_serial.irq || new_serial.port != state->port ||
1066 new_serial.xmit_fifo_size != state->xmit_fifo_size) { 1066 new_serial.xmit_fifo_size != state->xmit_fifo_size) {
1067 tty_unlock(tty); 1067 tty_unlock();
1068 return -EINVAL; 1068 return -EINVAL;
1069 } 1069 }
1070 1070
@@ -1074,7 +1074,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
1074 (new_serial.xmit_fifo_size != state->xmit_fifo_size) || 1074 (new_serial.xmit_fifo_size != state->xmit_fifo_size) ||
1075 ((new_serial.flags & ~ASYNC_USR_MASK) != 1075 ((new_serial.flags & ~ASYNC_USR_MASK) !=
1076 (port->flags & ~ASYNC_USR_MASK))) { 1076 (port->flags & ~ASYNC_USR_MASK))) {
1077 tty_unlock(tty); 1077 tty_unlock();
1078 return -EPERM; 1078 return -EPERM;
1079 } 1079 }
1080 port->flags = ((port->flags & ~ASYNC_USR_MASK) | 1080 port->flags = ((port->flags & ~ASYNC_USR_MASK) |
@@ -1084,7 +1084,7 @@ static int set_serial_info(struct tty_struct *tty, struct serial_state *state,
1084 } 1084 }
1085 1085
1086 if (new_serial.baud_base < 9600) { 1086 if (new_serial.baud_base < 9600) {
1087 tty_unlock(tty); 1087 tty_unlock();
1088 return -EINVAL; 1088 return -EINVAL;
1089 } 1089 }
1090 1090
@@ -1116,7 +1116,7 @@ check_and_exit:
1116 } 1116 }
1117 } else 1117 } else
1118 retval = startup(tty, state); 1118 retval = startup(tty, state);
1119 tty_unlock(tty); 1119 tty_unlock();
1120 return retval; 1120 return retval;
1121} 1121}
1122 1122
diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
index 6984e1a2686a..e61cabdd69df 100644
--- a/drivers/tty/cyclades.c
+++ b/drivers/tty/cyclades.c
@@ -1599,7 +1599,7 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
1599 * If the port is the middle of closing, bail out now 1599 * If the port is the middle of closing, bail out now
1600 */ 1600 */
1601 if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) { 1601 if (tty_hung_up_p(filp) || (info->port.flags & ASYNC_CLOSING)) {
1602 wait_event_interruptible_tty(tty, info->port.close_wait, 1602 wait_event_interruptible_tty(info->port.close_wait,
1603 !(info->port.flags & ASYNC_CLOSING)); 1603 !(info->port.flags & ASYNC_CLOSING));
1604 return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS; 1604 return (info->port.flags & ASYNC_HUP_NOTIFY) ? -EAGAIN: -ERESTARTSYS;
1605 } 1605 }
diff --git a/drivers/tty/hvc/hvc_xen.c b/drivers/tty/hvc/hvc_xen.c
index d3d91dae065c..944eaeb8e0cf 100644
--- a/drivers/tty/hvc/hvc_xen.c
+++ b/drivers/tty/hvc/hvc_xen.c
@@ -214,24 +214,24 @@ static int xen_hvm_console_init(void)
214 /* already configured */ 214 /* already configured */
215 if (info->intf != NULL) 215 if (info->intf != NULL)
216 return 0; 216 return 0;
217 217 /*
218 * If the toolstack (or the hypervisor) hasn't set these values, the
219 * default value is 0. Even though mfn = 0 and evtchn = 0 are
220 * theoretically correct values, in practice they never are and they
221 * mean that a legacy toolstack hasn't initialized the pv console correctly.
222 */
218 r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v); 223 r = hvm_get_parameter(HVM_PARAM_CONSOLE_EVTCHN, &v);
219 if (r < 0) { 224 if (r < 0 || v == 0)
220 kfree(info); 225 goto err;
221 return -ENODEV;
222 }
223 info->evtchn = v; 226 info->evtchn = v;
224 hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v); 227 v = 0;
225 if (r < 0) { 228 r = hvm_get_parameter(HVM_PARAM_CONSOLE_PFN, &v);
226 kfree(info); 229 if (r < 0 || v == 0)
227 return -ENODEV; 230 goto err;
228 }
229 mfn = v; 231 mfn = v;
230 info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE); 232 info->intf = ioremap(mfn << PAGE_SHIFT, PAGE_SIZE);
231 if (info->intf == NULL) { 233 if (info->intf == NULL)
232 kfree(info); 234 goto err;
233 return -ENODEV;
234 }
235 info->vtermno = HVC_COOKIE; 235 info->vtermno = HVC_COOKIE;
236 236
237 spin_lock(&xencons_lock); 237 spin_lock(&xencons_lock);
@@ -239,6 +239,9 @@ static int xen_hvm_console_init(void)
239 spin_unlock(&xencons_lock); 239 spin_unlock(&xencons_lock);
240 240
241 return 0; 241 return 0;
242err:
243 kfree(info);
244 return -ENODEV;
242} 245}
243 246
244static int xen_pv_console_init(void) 247static int xen_pv_console_init(void)
diff --git a/drivers/tty/n_r3964.c b/drivers/tty/n_r3964.c
index 656ad93bbc96..5c6c31459a2f 100644
--- a/drivers/tty/n_r3964.c
+++ b/drivers/tty/n_r3964.c
@@ -1065,8 +1065,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1065 1065
1066 TRACE_L("read()"); 1066 TRACE_L("read()");
1067 1067
1068 /* FIXME: should use a private lock */ 1068 tty_lock();
1069 tty_lock(tty);
1070 1069
1071 pClient = findClient(pInfo, task_pid(current)); 1070 pClient = findClient(pInfo, task_pid(current));
1072 if (pClient) { 1071 if (pClient) {
@@ -1078,7 +1077,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1078 goto unlock; 1077 goto unlock;
1079 } 1078 }
1080 /* block until there is a message: */ 1079 /* block until there is a message: */
1081 wait_event_interruptible_tty(tty, pInfo->read_wait, 1080 wait_event_interruptible_tty(pInfo->read_wait,
1082 (pMsg = remove_msg(pInfo, pClient))); 1081 (pMsg = remove_msg(pInfo, pClient)));
1083 } 1082 }
1084 1083
@@ -1108,7 +1107,7 @@ static ssize_t r3964_read(struct tty_struct *tty, struct file *file,
1108 } 1107 }
1109 ret = -EPERM; 1108 ret = -EPERM;
1110unlock: 1109unlock:
1111 tty_unlock(tty); 1110 tty_unlock();
1112 return ret; 1111 return ret;
1113} 1112}
1114 1113
@@ -1157,7 +1156,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
1157 pHeader->locks = 0; 1156 pHeader->locks = 0;
1158 pHeader->owner = NULL; 1157 pHeader->owner = NULL;
1159 1158
1160 tty_lock(tty); 1159 tty_lock();
1161 1160
1162 pClient = findClient(pInfo, task_pid(current)); 1161 pClient = findClient(pInfo, task_pid(current));
1163 if (pClient) { 1162 if (pClient) {
@@ -1176,7 +1175,7 @@ static ssize_t r3964_write(struct tty_struct *tty, struct file *file,
1176 add_tx_queue(pInfo, pHeader); 1175 add_tx_queue(pInfo, pHeader);
1177 trigger_transmit(pInfo); 1176 trigger_transmit(pInfo);
1178 1177
1179 tty_unlock(tty); 1178 tty_unlock();
1180 1179
1181 return 0; 1180 return 0;
1182} 1181}
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 65c7c62c7aae..5505ffc91da4 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -47,7 +47,6 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
47 wake_up_interruptible(&tty->read_wait); 47 wake_up_interruptible(&tty->read_wait);
48 wake_up_interruptible(&tty->write_wait); 48 wake_up_interruptible(&tty->write_wait);
49 tty->packet = 0; 49 tty->packet = 0;
50 /* Review - krefs on tty_link ?? */
51 if (!tty->link) 50 if (!tty->link)
52 return; 51 return;
53 tty->link->packet = 0; 52 tty->link->packet = 0;
@@ -63,9 +62,9 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
63 mutex_unlock(&devpts_mutex); 62 mutex_unlock(&devpts_mutex);
64 } 63 }
65#endif 64#endif
66 tty_unlock(tty); 65 tty_unlock();
67 tty_vhangup(tty->link); 66 tty_vhangup(tty->link);
68 tty_lock(tty); 67 tty_lock();
69 } 68 }
70} 69}
71 70
@@ -623,27 +622,26 @@ static int ptmx_open(struct inode *inode, struct file *filp)
623 return retval; 622 return retval;
624 623
625 /* find a device that is not in use. */ 624 /* find a device that is not in use. */
626 mutex_lock(&devpts_mutex); 625 tty_lock();
627 index = devpts_new_index(inode); 626 index = devpts_new_index(inode);
627 tty_unlock();
628 if (index < 0) { 628 if (index < 0) {
629 retval = index; 629 retval = index;
630 goto err_file; 630 goto err_file;
631 } 631 }
632 632
633 mutex_unlock(&devpts_mutex);
634
635 mutex_lock(&tty_mutex); 633 mutex_lock(&tty_mutex);
634 mutex_lock(&devpts_mutex);
636 tty = tty_init_dev(ptm_driver, index); 635 tty = tty_init_dev(ptm_driver, index);
636 mutex_unlock(&devpts_mutex);
637 tty_lock();
638 mutex_unlock(&tty_mutex);
637 639
638 if (IS_ERR(tty)) { 640 if (IS_ERR(tty)) {
639 retval = PTR_ERR(tty); 641 retval = PTR_ERR(tty);
640 goto out; 642 goto out;
641 } 643 }
642 644
643 /* The tty returned here is locked so we can safely
644 drop the mutex */
645 mutex_unlock(&tty_mutex);
646
647 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 645 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
648 646
649 tty_add_file(tty, filp); 647 tty_add_file(tty, filp);
@@ -656,17 +654,16 @@ static int ptmx_open(struct inode *inode, struct file *filp)
656 if (retval) 654 if (retval)
657 goto err_release; 655 goto err_release;
658 656
659 tty_unlock(tty); 657 tty_unlock();
660 return 0; 658 return 0;
661err_release: 659err_release:
662 tty_unlock(tty); 660 tty_unlock();
663 tty_release(inode, filp); 661 tty_release(inode, filp);
664 return retval; 662 return retval;
665out: 663out:
666 mutex_unlock(&tty_mutex);
667 devpts_kill_index(inode, index); 664 devpts_kill_index(inode, index);
665 tty_unlock();
668err_file: 666err_file:
669 mutex_unlock(&devpts_mutex);
670 tty_free_file(filp); 667 tty_free_file(filp);
671 return retval; 668 return retval;
672} 669}
diff --git a/drivers/tty/serial/8250/8250.c b/drivers/tty/serial/8250/8250.c
index 47d061b9ad4d..6e1958a325bd 100644
--- a/drivers/tty/serial/8250/8250.c
+++ b/drivers/tty/serial/8250/8250.c
@@ -3113,7 +3113,7 @@ static struct uart_8250_port *serial8250_find_match_or_unused(struct uart_port *
3113 3113
3114/** 3114/**
3115 * serial8250_register_8250_port - register a serial port 3115 * serial8250_register_8250_port - register a serial port
3116 * @port: serial port template 3116 * @up: serial port template
3117 * 3117 *
3118 * Configure the serial port specified by the request. If the 3118 * Configure the serial port specified by the request. If the
3119 * port exists and is in use, it is hung up and unregistered 3119 * port exists and is in use, it is hung up and unregistered
diff --git a/drivers/tty/serial/amba-pl011.c b/drivers/tty/serial/amba-pl011.c
index 4ad721fb8405..c17923ec6e95 100644
--- a/drivers/tty/serial/amba-pl011.c
+++ b/drivers/tty/serial/amba-pl011.c
@@ -133,6 +133,10 @@ struct pl011_dmatx_data {
133struct uart_amba_port { 133struct uart_amba_port {
134 struct uart_port port; 134 struct uart_port port;
135 struct clk *clk; 135 struct clk *clk;
136 /* Two optional pin states - default & sleep */
137 struct pinctrl *pinctrl;
138 struct pinctrl_state *pins_default;
139 struct pinctrl_state *pins_sleep;
136 const struct vendor_data *vendor; 140 const struct vendor_data *vendor;
137 unsigned int dmacr; /* dma control reg */ 141 unsigned int dmacr; /* dma control reg */
138 unsigned int im; /* interrupt mask */ 142 unsigned int im; /* interrupt mask */
@@ -1312,6 +1316,14 @@ static int pl011_startup(struct uart_port *port)
1312 unsigned int cr; 1316 unsigned int cr;
1313 int retval; 1317 int retval;
1314 1318
1319 /* Optionaly enable pins to be muxed in and configured */
1320 if (!IS_ERR(uap->pins_default)) {
1321 retval = pinctrl_select_state(uap->pinctrl, uap->pins_default);
1322 if (retval)
1323 dev_err(port->dev,
1324 "could not set default pins\n");
1325 }
1326
1315 retval = clk_prepare(uap->clk); 1327 retval = clk_prepare(uap->clk);
1316 if (retval) 1328 if (retval)
1317 goto out; 1329 goto out;
@@ -1420,6 +1432,7 @@ static void pl011_shutdown(struct uart_port *port)
1420{ 1432{
1421 struct uart_amba_port *uap = (struct uart_amba_port *)port; 1433 struct uart_amba_port *uap = (struct uart_amba_port *)port;
1422 unsigned int cr; 1434 unsigned int cr;
1435 int retval;
1423 1436
1424 /* 1437 /*
1425 * disable all interrupts 1438 * disable all interrupts
@@ -1462,6 +1475,14 @@ static void pl011_shutdown(struct uart_port *port)
1462 */ 1475 */
1463 clk_disable(uap->clk); 1476 clk_disable(uap->clk);
1464 clk_unprepare(uap->clk); 1477 clk_unprepare(uap->clk);
1478 /* Optionally let pins go into sleep states */
1479 if (!IS_ERR(uap->pins_sleep)) {
1480 retval = pinctrl_select_state(uap->pinctrl, uap->pins_sleep);
1481 if (retval)
1482 dev_err(port->dev,
1483 "could not set pins to sleep state\n");
1484 }
1485
1465 1486
1466 if (uap->port.dev->platform_data) { 1487 if (uap->port.dev->platform_data) {
1467 struct amba_pl011_data *plat; 1488 struct amba_pl011_data *plat;
@@ -1792,6 +1813,14 @@ static int __init pl011_console_setup(struct console *co, char *options)
1792 if (!uap) 1813 if (!uap)
1793 return -ENODEV; 1814 return -ENODEV;
1794 1815
1816 /* Allow pins to be muxed in and configured */
1817 if (!IS_ERR(uap->pins_default)) {
1818 ret = pinctrl_select_state(uap->pinctrl, uap->pins_default);
1819 if (ret)
1820 dev_err(uap->port.dev,
1821 "could not set default pins\n");
1822 }
1823
1795 ret = clk_prepare(uap->clk); 1824 ret = clk_prepare(uap->clk);
1796 if (ret) 1825 if (ret)
1797 return ret; 1826 return ret;
@@ -1844,7 +1873,6 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1844{ 1873{
1845 struct uart_amba_port *uap; 1874 struct uart_amba_port *uap;
1846 struct vendor_data *vendor = id->data; 1875 struct vendor_data *vendor = id->data;
1847 struct pinctrl *pinctrl;
1848 void __iomem *base; 1876 void __iomem *base;
1849 int i, ret; 1877 int i, ret;
1850 1878
@@ -1869,11 +1897,20 @@ static int pl011_probe(struct amba_device *dev, const struct amba_id *id)
1869 goto free; 1897 goto free;
1870 } 1898 }
1871 1899
1872 pinctrl = devm_pinctrl_get_select_default(&dev->dev); 1900 uap->pinctrl = devm_pinctrl_get(&dev->dev);
1873 if (IS_ERR(pinctrl)) { 1901 if (IS_ERR(uap->pinctrl)) {
1874 ret = PTR_ERR(pinctrl); 1902 ret = PTR_ERR(uap->pinctrl);
1875 goto unmap; 1903 goto unmap;
1876 } 1904 }
1905 uap->pins_default = pinctrl_lookup_state(uap->pinctrl,
1906 PINCTRL_STATE_DEFAULT);
1907 if (IS_ERR(uap->pins_default))
1908 dev_err(&dev->dev, "could not get default pinstate\n");
1909
1910 uap->pins_sleep = pinctrl_lookup_state(uap->pinctrl,
1911 PINCTRL_STATE_SLEEP);
1912 if (IS_ERR(uap->pins_sleep))
1913 dev_dbg(&dev->dev, "could not get sleep pinstate\n");
1877 1914
1878 uap->clk = clk_get(&dev->dev, NULL); 1915 uap->clk = clk_get(&dev->dev, NULL);
1879 if (IS_ERR(uap->clk)) { 1916 if (IS_ERR(uap->clk)) {
diff --git a/drivers/tty/serial/crisv10.c b/drivers/tty/serial/crisv10.c
index 7264d4d26717..80b6b1b1f725 100644
--- a/drivers/tty/serial/crisv10.c
+++ b/drivers/tty/serial/crisv10.c
@@ -3976,7 +3976,7 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
3976 */ 3976 */
3977 if (tty_hung_up_p(filp) || 3977 if (tty_hung_up_p(filp) ||
3978 (info->flags & ASYNC_CLOSING)) { 3978 (info->flags & ASYNC_CLOSING)) {
3979 wait_event_interruptible_tty(tty, info->close_wait, 3979 wait_event_interruptible_tty(info->close_wait,
3980 !(info->flags & ASYNC_CLOSING)); 3980 !(info->flags & ASYNC_CLOSING));
3981#ifdef SERIAL_DO_RESTART 3981#ifdef SERIAL_DO_RESTART
3982 if (info->flags & ASYNC_HUP_NOTIFY) 3982 if (info->flags & ASYNC_HUP_NOTIFY)
@@ -4052,9 +4052,9 @@ block_til_ready(struct tty_struct *tty, struct file * filp,
4052 printk("block_til_ready blocking: ttyS%d, count = %d\n", 4052 printk("block_til_ready blocking: ttyS%d, count = %d\n",
4053 info->line, info->count); 4053 info->line, info->count);
4054#endif 4054#endif
4055 tty_unlock(tty); 4055 tty_unlock();
4056 schedule(); 4056 schedule();
4057 tty_lock(tty); 4057 tty_lock();
4058 } 4058 }
4059 set_current_state(TASK_RUNNING); 4059 set_current_state(TASK_RUNNING);
4060 remove_wait_queue(&info->open_wait, &wait); 4060 remove_wait_queue(&info->open_wait, &wait);
@@ -4115,7 +4115,7 @@ rs_open(struct tty_struct *tty, struct file * filp)
4115 */ 4115 */
4116 if (tty_hung_up_p(filp) || 4116 if (tty_hung_up_p(filp) ||
4117 (info->flags & ASYNC_CLOSING)) { 4117 (info->flags & ASYNC_CLOSING)) {
4118 wait_event_interruptible_tty(tty, info->close_wait, 4118 wait_event_interruptible_tty(info->close_wait,
4119 !(info->flags & ASYNC_CLOSING)); 4119 !(info->flags & ASYNC_CLOSING));
4120#ifdef SERIAL_DO_RESTART 4120#ifdef SERIAL_DO_RESTART
4121 return ((info->flags & ASYNC_HUP_NOTIFY) ? 4121 return ((info->flags & ASYNC_HUP_NOTIFY) ?
diff --git a/drivers/tty/serial/serial_txx9.c b/drivers/tty/serial/serial_txx9.c
index 34bd345da775..6ae2a58d62f2 100644
--- a/drivers/tty/serial/serial_txx9.c
+++ b/drivers/tty/serial/serial_txx9.c
@@ -466,7 +466,7 @@ static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
466 spin_unlock_irqrestore(&up->port.lock, flags); 466 spin_unlock_irqrestore(&up->port.lock, flags);
467} 467}
468 468
469#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || (CONFIG_CONSOLE_POLL) 469#if defined(CONFIG_SERIAL_TXX9_CONSOLE) || defined(CONFIG_CONSOLE_POLL)
470/* 470/*
471 * Wait for transmitter & holding register to empty 471 * Wait for transmitter & holding register to empty
472 */ 472 */
diff --git a/drivers/tty/serial/sh-sci.c b/drivers/tty/serial/sh-sci.c
index 4604153b7954..1bd9163bc118 100644
--- a/drivers/tty/serial/sh-sci.c
+++ b/drivers/tty/serial/sh-sci.c
@@ -2179,6 +2179,16 @@ static int __devinit sci_init_single(struct platform_device *dev,
2179 return 0; 2179 return 0;
2180} 2180}
2181 2181
2182static void sci_cleanup_single(struct sci_port *port)
2183{
2184 sci_free_gpios(port);
2185
2186 clk_put(port->iclk);
2187 clk_put(port->fclk);
2188
2189 pm_runtime_disable(port->port.dev);
2190}
2191
2182#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE 2192#ifdef CONFIG_SERIAL_SH_SCI_CONSOLE
2183static void serial_console_putchar(struct uart_port *port, int ch) 2193static void serial_console_putchar(struct uart_port *port, int ch)
2184{ 2194{
@@ -2360,14 +2370,10 @@ static int sci_remove(struct platform_device *dev)
2360 cpufreq_unregister_notifier(&port->freq_transition, 2370 cpufreq_unregister_notifier(&port->freq_transition,
2361 CPUFREQ_TRANSITION_NOTIFIER); 2371 CPUFREQ_TRANSITION_NOTIFIER);
2362 2372
2363 sci_free_gpios(port);
2364
2365 uart_remove_one_port(&sci_uart_driver, &port->port); 2373 uart_remove_one_port(&sci_uart_driver, &port->port);
2366 2374
2367 clk_put(port->iclk); 2375 sci_cleanup_single(port);
2368 clk_put(port->fclk);
2369 2376
2370 pm_runtime_disable(&dev->dev);
2371 return 0; 2377 return 0;
2372} 2378}
2373 2379
@@ -2385,14 +2391,20 @@ static int __devinit sci_probe_single(struct platform_device *dev,
2385 index+1, SCI_NPORTS); 2391 index+1, SCI_NPORTS);
2386 dev_notice(&dev->dev, "Consider bumping " 2392 dev_notice(&dev->dev, "Consider bumping "
2387 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n"); 2393 "CONFIG_SERIAL_SH_SCI_NR_UARTS!\n");
2388 return 0; 2394 return -EINVAL;
2389 } 2395 }
2390 2396
2391 ret = sci_init_single(dev, sciport, index, p); 2397 ret = sci_init_single(dev, sciport, index, p);
2392 if (ret) 2398 if (ret)
2393 return ret; 2399 return ret;
2394 2400
2395 return uart_add_one_port(&sci_uart_driver, &sciport->port); 2401 ret = uart_add_one_port(&sci_uart_driver, &sciport->port);
2402 if (ret) {
2403 sci_cleanup_single(sciport);
2404 return ret;
2405 }
2406
2407 return 0;
2396} 2408}
2397 2409
2398static int __devinit sci_probe(struct platform_device *dev) 2410static int __devinit sci_probe(struct platform_device *dev)
@@ -2413,24 +2425,22 @@ static int __devinit sci_probe(struct platform_device *dev)
2413 2425
2414 ret = sci_probe_single(dev, dev->id, p, sp); 2426 ret = sci_probe_single(dev, dev->id, p, sp);
2415 if (ret) 2427 if (ret)
2416 goto err_unreg; 2428 return ret;
2417 2429
2418 sp->freq_transition.notifier_call = sci_notifier; 2430 sp->freq_transition.notifier_call = sci_notifier;
2419 2431
2420 ret = cpufreq_register_notifier(&sp->freq_transition, 2432 ret = cpufreq_register_notifier(&sp->freq_transition,
2421 CPUFREQ_TRANSITION_NOTIFIER); 2433 CPUFREQ_TRANSITION_NOTIFIER);
2422 if (unlikely(ret < 0)) 2434 if (unlikely(ret < 0)) {
2423 goto err_unreg; 2435 sci_cleanup_single(sp);
2436 return ret;
2437 }
2424 2438
2425#ifdef CONFIG_SH_STANDARD_BIOS 2439#ifdef CONFIG_SH_STANDARD_BIOS
2426 sh_bios_gdb_detach(); 2440 sh_bios_gdb_detach();
2427#endif 2441#endif
2428 2442
2429 return 0; 2443 return 0;
2430
2431err_unreg:
2432 sci_remove(dev);
2433 return ret;
2434} 2444}
2435 2445
2436static int sci_suspend(struct device *dev) 2446static int sci_suspend(struct device *dev)
diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
index 5ed0daae6564..593d40ad0a6b 100644
--- a/drivers/tty/synclink.c
+++ b/drivers/tty/synclink.c
@@ -3338,9 +3338,9 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
3338 printk("%s(%d):block_til_ready blocking on %s count=%d\n", 3338 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
3339 __FILE__,__LINE__, tty->driver->name, port->count ); 3339 __FILE__,__LINE__, tty->driver->name, port->count );
3340 3340
3341 tty_unlock(tty); 3341 tty_unlock();
3342 schedule(); 3342 schedule();
3343 tty_lock(tty); 3343 tty_lock();
3344 } 3344 }
3345 3345
3346 set_current_state(TASK_RUNNING); 3346 set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
index 45b43f11ca39..aa1debf97cc7 100644
--- a/drivers/tty/synclink_gt.c
+++ b/drivers/tty/synclink_gt.c
@@ -3336,9 +3336,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
3336 } 3336 }
3337 3337
3338 DBGINFO(("%s block_til_ready wait\n", tty->driver->name)); 3338 DBGINFO(("%s block_til_ready wait\n", tty->driver->name));
3339 tty_unlock(tty); 3339 tty_unlock();
3340 schedule(); 3340 schedule();
3341 tty_lock(tty); 3341 tty_lock();
3342 } 3342 }
3343 3343
3344 set_current_state(TASK_RUNNING); 3344 set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
index 4a1e4f07765b..a3dddc12d2fe 100644
--- a/drivers/tty/synclinkmp.c
+++ b/drivers/tty/synclinkmp.c
@@ -3357,9 +3357,9 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
3357 printk("%s(%d):%s block_til_ready() count=%d\n", 3357 printk("%s(%d):%s block_til_ready() count=%d\n",
3358 __FILE__,__LINE__, tty->driver->name, port->count ); 3358 __FILE__,__LINE__, tty->driver->name, port->count );
3359 3359
3360 tty_unlock(tty); 3360 tty_unlock();
3361 schedule(); 3361 schedule();
3362 tty_lock(tty); 3362 tty_lock();
3363 } 3363 }
3364 3364
3365 set_current_state(TASK_RUNNING); 3365 set_current_state(TASK_RUNNING);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 9e930c009bf2..b425c79675ad 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -185,7 +185,6 @@ void free_tty_struct(struct tty_struct *tty)
185 put_device(tty->dev); 185 put_device(tty->dev);
186 kfree(tty->write_buf); 186 kfree(tty->write_buf);
187 tty_buffer_free_all(tty); 187 tty_buffer_free_all(tty);
188 tty->magic = 0xDEADDEAD;
189 kfree(tty); 188 kfree(tty);
190} 189}
191 190
@@ -574,7 +573,7 @@ void __tty_hangup(struct tty_struct *tty)
574 } 573 }
575 spin_unlock(&redirect_lock); 574 spin_unlock(&redirect_lock);
576 575
577 tty_lock(tty); 576 tty_lock();
578 577
579 /* some functions below drop BTM, so we need this bit */ 578 /* some functions below drop BTM, so we need this bit */
580 set_bit(TTY_HUPPING, &tty->flags); 579 set_bit(TTY_HUPPING, &tty->flags);
@@ -667,7 +666,7 @@ void __tty_hangup(struct tty_struct *tty)
667 clear_bit(TTY_HUPPING, &tty->flags); 666 clear_bit(TTY_HUPPING, &tty->flags);
668 tty_ldisc_enable(tty); 667 tty_ldisc_enable(tty);
669 668
670 tty_unlock(tty); 669 tty_unlock();
671 670
672 if (f) 671 if (f)
673 fput(f); 672 fput(f);
@@ -1104,12 +1103,12 @@ void tty_write_message(struct tty_struct *tty, char *msg)
1104{ 1103{
1105 if (tty) { 1104 if (tty) {
1106 mutex_lock(&tty->atomic_write_lock); 1105 mutex_lock(&tty->atomic_write_lock);
1107 tty_lock(tty); 1106 tty_lock();
1108 if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) { 1107 if (tty->ops->write && !test_bit(TTY_CLOSING, &tty->flags)) {
1109 tty_unlock(tty); 1108 tty_unlock();
1110 tty->ops->write(tty, msg, strlen(msg)); 1109 tty->ops->write(tty, msg, strlen(msg));
1111 } else 1110 } else
1112 tty_unlock(tty); 1111 tty_unlock();
1113 tty_write_unlock(tty); 1112 tty_write_unlock(tty);
1114 } 1113 }
1115 return; 1114 return;
@@ -1404,7 +1403,6 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
1404 } 1403 }
1405 initialize_tty_struct(tty, driver, idx); 1404 initialize_tty_struct(tty, driver, idx);
1406 1405
1407 tty_lock(tty);
1408 retval = tty_driver_install_tty(driver, tty); 1406 retval = tty_driver_install_tty(driver, tty);
1409 if (retval < 0) 1407 if (retval < 0)
1410 goto err_deinit_tty; 1408 goto err_deinit_tty;
@@ -1417,11 +1415,9 @@ struct tty_struct *tty_init_dev(struct tty_driver *driver, int idx)
1417 retval = tty_ldisc_setup(tty, tty->link); 1415 retval = tty_ldisc_setup(tty, tty->link);
1418 if (retval) 1416 if (retval)
1419 goto err_release_tty; 1417 goto err_release_tty;
1420 /* Return the tty locked so that it cannot vanish under the caller */
1421 return tty; 1418 return tty;
1422 1419
1423err_deinit_tty: 1420err_deinit_tty:
1424 tty_unlock(tty);
1425 deinitialize_tty_struct(tty); 1421 deinitialize_tty_struct(tty);
1426 free_tty_struct(tty); 1422 free_tty_struct(tty);
1427err_module_put: 1423err_module_put:
@@ -1430,7 +1426,6 @@ err_module_put:
1430 1426
1431 /* call the tty release_tty routine to clean out this slot */ 1427 /* call the tty release_tty routine to clean out this slot */
1432err_release_tty: 1428err_release_tty:
1433 tty_unlock(tty);
1434 printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, " 1429 printk_ratelimited(KERN_INFO "tty_init_dev: ldisc open failed, "
1435 "clearing slot %d\n", idx); 1430 "clearing slot %d\n", idx);
1436 release_tty(tty, idx); 1431 release_tty(tty, idx);
@@ -1633,7 +1628,7 @@ int tty_release(struct inode *inode, struct file *filp)
1633 if (tty_paranoia_check(tty, inode, __func__)) 1628 if (tty_paranoia_check(tty, inode, __func__))
1634 return 0; 1629 return 0;
1635 1630
1636 tty_lock(tty); 1631 tty_lock();
1637 check_tty_count(tty, __func__); 1632 check_tty_count(tty, __func__);
1638 1633
1639 __tty_fasync(-1, filp, 0); 1634 __tty_fasync(-1, filp, 0);
@@ -1642,11 +1637,10 @@ int tty_release(struct inode *inode, struct file *filp)
1642 pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY && 1637 pty_master = (tty->driver->type == TTY_DRIVER_TYPE_PTY &&
1643 tty->driver->subtype == PTY_TYPE_MASTER); 1638 tty->driver->subtype == PTY_TYPE_MASTER);
1644 devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0; 1639 devpts = (tty->driver->flags & TTY_DRIVER_DEVPTS_MEM) != 0;
1645 /* Review: parallel close */
1646 o_tty = tty->link; 1640 o_tty = tty->link;
1647 1641
1648 if (tty_release_checks(tty, o_tty, idx)) { 1642 if (tty_release_checks(tty, o_tty, idx)) {
1649 tty_unlock(tty); 1643 tty_unlock();
1650 return 0; 1644 return 0;
1651 } 1645 }
1652 1646
@@ -1658,7 +1652,7 @@ int tty_release(struct inode *inode, struct file *filp)
1658 if (tty->ops->close) 1652 if (tty->ops->close)
1659 tty->ops->close(tty, filp); 1653 tty->ops->close(tty, filp);
1660 1654
1661 tty_unlock(tty); 1655 tty_unlock();
1662 /* 1656 /*
1663 * Sanity check: if tty->count is going to zero, there shouldn't be 1657 * Sanity check: if tty->count is going to zero, there shouldn't be
1664 * any waiters on tty->read_wait or tty->write_wait. We test the 1658 * any waiters on tty->read_wait or tty->write_wait. We test the
@@ -1681,7 +1675,7 @@ int tty_release(struct inode *inode, struct file *filp)
1681 opens on /dev/tty */ 1675 opens on /dev/tty */
1682 1676
1683 mutex_lock(&tty_mutex); 1677 mutex_lock(&tty_mutex);
1684 tty_lock_pair(tty, o_tty); 1678 tty_lock();
1685 tty_closing = tty->count <= 1; 1679 tty_closing = tty->count <= 1;
1686 o_tty_closing = o_tty && 1680 o_tty_closing = o_tty &&
1687 (o_tty->count <= (pty_master ? 1 : 0)); 1681 (o_tty->count <= (pty_master ? 1 : 0));
@@ -1712,7 +1706,7 @@ int tty_release(struct inode *inode, struct file *filp)
1712 1706
1713 printk(KERN_WARNING "%s: %s: read/write wait queue active!\n", 1707 printk(KERN_WARNING "%s: %s: read/write wait queue active!\n",
1714 __func__, tty_name(tty, buf)); 1708 __func__, tty_name(tty, buf));
1715 tty_unlock_pair(tty, o_tty); 1709 tty_unlock();
1716 mutex_unlock(&tty_mutex); 1710 mutex_unlock(&tty_mutex);
1717 schedule(); 1711 schedule();
1718 } 1712 }
@@ -1775,7 +1769,7 @@ int tty_release(struct inode *inode, struct file *filp)
1775 1769
1776 /* check whether both sides are closing ... */ 1770 /* check whether both sides are closing ... */
1777 if (!tty_closing || (o_tty && !o_tty_closing)) { 1771 if (!tty_closing || (o_tty && !o_tty_closing)) {
1778 tty_unlock_pair(tty, o_tty); 1772 tty_unlock();
1779 return 0; 1773 return 0;
1780 } 1774 }
1781 1775
@@ -1788,16 +1782,14 @@ int tty_release(struct inode *inode, struct file *filp)
1788 tty_ldisc_release(tty, o_tty); 1782 tty_ldisc_release(tty, o_tty);
1789 /* 1783 /*
1790 * The release_tty function takes care of the details of clearing 1784 * The release_tty function takes care of the details of clearing
1791 * the slots and preserving the termios structure. The tty_unlock_pair 1785 * the slots and preserving the termios structure.
1792 * should be safe as we keep a kref while the tty is locked (so the
1793 * unlock never unlocks a freed tty).
1794 */ 1786 */
1795 release_tty(tty, idx); 1787 release_tty(tty, idx);
1796 tty_unlock_pair(tty, o_tty);
1797 1788
1798 /* Make this pty number available for reallocation */ 1789 /* Make this pty number available for reallocation */
1799 if (devpts) 1790 if (devpts)
1800 devpts_kill_index(inode, idx); 1791 devpts_kill_index(inode, idx);
1792 tty_unlock();
1801 return 0; 1793 return 0;
1802} 1794}
1803 1795
@@ -1901,9 +1893,6 @@ static struct tty_driver *tty_lookup_driver(dev_t device, struct file *filp,
1901 * Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev. 1893 * Locking: tty_mutex protects tty, tty_lookup_driver and tty_init_dev.
1902 * tty->count should protect the rest. 1894 * tty->count should protect the rest.
1903 * ->siglock protects ->signal/->sighand 1895 * ->siglock protects ->signal/->sighand
1904 *
1905 * Note: the tty_unlock/lock cases without a ref are only safe due to
1906 * tty_mutex
1907 */ 1896 */
1908 1897
1909static int tty_open(struct inode *inode, struct file *filp) 1898static int tty_open(struct inode *inode, struct file *filp)
@@ -1927,7 +1916,8 @@ retry_open:
1927 retval = 0; 1916 retval = 0;
1928 1917
1929 mutex_lock(&tty_mutex); 1918 mutex_lock(&tty_mutex);
1930 /* This is protected by the tty_mutex */ 1919 tty_lock();
1920
1931 tty = tty_open_current_tty(device, filp); 1921 tty = tty_open_current_tty(device, filp);
1932 if (IS_ERR(tty)) { 1922 if (IS_ERR(tty)) {
1933 retval = PTR_ERR(tty); 1923 retval = PTR_ERR(tty);
@@ -1948,19 +1938,17 @@ retry_open:
1948 } 1938 }
1949 1939
1950 if (tty) { 1940 if (tty) {
1951 tty_lock(tty);
1952 retval = tty_reopen(tty); 1941 retval = tty_reopen(tty);
1953 if (retval < 0) { 1942 if (retval)
1954 tty_unlock(tty);
1955 tty = ERR_PTR(retval); 1943 tty = ERR_PTR(retval);
1956 } 1944 } else
1957 } else /* Returns with the tty_lock held for now */
1958 tty = tty_init_dev(driver, index); 1945 tty = tty_init_dev(driver, index);
1959 1946
1960 mutex_unlock(&tty_mutex); 1947 mutex_unlock(&tty_mutex);
1961 if (driver) 1948 if (driver)
1962 tty_driver_kref_put(driver); 1949 tty_driver_kref_put(driver);
1963 if (IS_ERR(tty)) { 1950 if (IS_ERR(tty)) {
1951 tty_unlock();
1964 retval = PTR_ERR(tty); 1952 retval = PTR_ERR(tty);
1965 goto err_file; 1953 goto err_file;
1966 } 1954 }
@@ -1989,7 +1977,7 @@ retry_open:
1989 printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__, 1977 printk(KERN_DEBUG "%s: error %d in opening %s...\n", __func__,
1990 retval, tty->name); 1978 retval, tty->name);
1991#endif 1979#endif
1992 tty_unlock(tty); /* need to call tty_release without BTM */ 1980 tty_unlock(); /* need to call tty_release without BTM */
1993 tty_release(inode, filp); 1981 tty_release(inode, filp);
1994 if (retval != -ERESTARTSYS) 1982 if (retval != -ERESTARTSYS)
1995 return retval; 1983 return retval;
@@ -2001,15 +1989,17 @@ retry_open:
2001 /* 1989 /*
2002 * Need to reset f_op in case a hangup happened. 1990 * Need to reset f_op in case a hangup happened.
2003 */ 1991 */
1992 tty_lock();
2004 if (filp->f_op == &hung_up_tty_fops) 1993 if (filp->f_op == &hung_up_tty_fops)
2005 filp->f_op = &tty_fops; 1994 filp->f_op = &tty_fops;
1995 tty_unlock();
2006 goto retry_open; 1996 goto retry_open;
2007 } 1997 }
2008 tty_unlock(tty); 1998 tty_unlock();
2009 1999
2010 2000
2011 mutex_lock(&tty_mutex); 2001 mutex_lock(&tty_mutex);
2012 tty_lock(tty); 2002 tty_lock();
2013 spin_lock_irq(&current->sighand->siglock); 2003 spin_lock_irq(&current->sighand->siglock);
2014 if (!noctty && 2004 if (!noctty &&
2015 current->signal->leader && 2005 current->signal->leader &&
@@ -2017,10 +2007,11 @@ retry_open:
2017 tty->session == NULL) 2007 tty->session == NULL)
2018 __proc_set_tty(current, tty); 2008 __proc_set_tty(current, tty);
2019 spin_unlock_irq(&current->sighand->siglock); 2009 spin_unlock_irq(&current->sighand->siglock);
2020 tty_unlock(tty); 2010 tty_unlock();
2021 mutex_unlock(&tty_mutex); 2011 mutex_unlock(&tty_mutex);
2022 return 0; 2012 return 0;
2023err_unlock: 2013err_unlock:
2014 tty_unlock();
2024 mutex_unlock(&tty_mutex); 2015 mutex_unlock(&tty_mutex);
2025 /* after locks to avoid deadlock */ 2016 /* after locks to avoid deadlock */
2026 if (!IS_ERR_OR_NULL(driver)) 2017 if (!IS_ERR_OR_NULL(driver))
@@ -2103,13 +2094,10 @@ out:
2103 2094
2104static int tty_fasync(int fd, struct file *filp, int on) 2095static int tty_fasync(int fd, struct file *filp, int on)
2105{ 2096{
2106 struct tty_struct *tty = file_tty(filp);
2107 int retval; 2097 int retval;
2108 2098 tty_lock();
2109 tty_lock(tty);
2110 retval = __tty_fasync(fd, filp, on); 2099 retval = __tty_fasync(fd, filp, on);
2111 tty_unlock(tty); 2100 tty_unlock();
2112
2113 return retval; 2101 return retval;
2114} 2102}
2115 2103
@@ -2946,7 +2934,6 @@ void initialize_tty_struct(struct tty_struct *tty,
2946 tty->pgrp = NULL; 2934 tty->pgrp = NULL;
2947 tty->overrun_time = jiffies; 2935 tty->overrun_time = jiffies;
2948 tty_buffer_init(tty); 2936 tty_buffer_init(tty);
2949 mutex_init(&tty->legacy_mutex);
2950 mutex_init(&tty->termios_mutex); 2937 mutex_init(&tty->termios_mutex);
2951 mutex_init(&tty->ldisc_mutex); 2938 mutex_init(&tty->ldisc_mutex);
2952 init_waitqueue_head(&tty->write_wait); 2939 init_waitqueue_head(&tty->write_wait);
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index ba8be396a621..9911eb6b34cd 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -568,7 +568,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
568 if (IS_ERR(new_ldisc)) 568 if (IS_ERR(new_ldisc))
569 return PTR_ERR(new_ldisc); 569 return PTR_ERR(new_ldisc);
570 570
571 tty_lock(tty); 571 tty_lock();
572 /* 572 /*
573 * We need to look at the tty locking here for pty/tty pairs 573 * We need to look at the tty locking here for pty/tty pairs
574 * when both sides try to change in parallel. 574 * when both sides try to change in parallel.
@@ -582,12 +582,12 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
582 */ 582 */
583 583
584 if (tty->ldisc->ops->num == ldisc) { 584 if (tty->ldisc->ops->num == ldisc) {
585 tty_unlock(tty); 585 tty_unlock();
586 tty_ldisc_put(new_ldisc); 586 tty_ldisc_put(new_ldisc);
587 return 0; 587 return 0;
588 } 588 }
589 589
590 tty_unlock(tty); 590 tty_unlock();
591 /* 591 /*
592 * Problem: What do we do if this blocks ? 592 * Problem: What do we do if this blocks ?
593 * We could deadlock here 593 * We could deadlock here
@@ -595,7 +595,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
595 595
596 tty_wait_until_sent(tty, 0); 596 tty_wait_until_sent(tty, 0);
597 597
598 tty_lock(tty); 598 tty_lock();
599 mutex_lock(&tty->ldisc_mutex); 599 mutex_lock(&tty->ldisc_mutex);
600 600
601 /* 601 /*
@@ -605,10 +605,10 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
605 605
606 while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) { 606 while (test_bit(TTY_LDISC_CHANGING, &tty->flags)) {
607 mutex_unlock(&tty->ldisc_mutex); 607 mutex_unlock(&tty->ldisc_mutex);
608 tty_unlock(tty); 608 tty_unlock();
609 wait_event(tty_ldisc_wait, 609 wait_event(tty_ldisc_wait,
610 test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0); 610 test_bit(TTY_LDISC_CHANGING, &tty->flags) == 0);
611 tty_lock(tty); 611 tty_lock();
612 mutex_lock(&tty->ldisc_mutex); 612 mutex_lock(&tty->ldisc_mutex);
613 } 613 }
614 614
@@ -623,7 +623,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
623 623
624 o_ldisc = tty->ldisc; 624 o_ldisc = tty->ldisc;
625 625
626 tty_unlock(tty); 626 tty_unlock();
627 /* 627 /*
628 * Make sure we don't change while someone holds a 628 * Make sure we don't change while someone holds a
629 * reference to the line discipline. The TTY_LDISC bit 629 * reference to the line discipline. The TTY_LDISC bit
@@ -650,7 +650,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
650 650
651 retval = tty_ldisc_wait_idle(tty, 5 * HZ); 651 retval = tty_ldisc_wait_idle(tty, 5 * HZ);
652 652
653 tty_lock(tty); 653 tty_lock();
654 mutex_lock(&tty->ldisc_mutex); 654 mutex_lock(&tty->ldisc_mutex);
655 655
656 /* handle wait idle failure locked */ 656 /* handle wait idle failure locked */
@@ -665,7 +665,7 @@ int tty_set_ldisc(struct tty_struct *tty, int ldisc)
665 clear_bit(TTY_LDISC_CHANGING, &tty->flags); 665 clear_bit(TTY_LDISC_CHANGING, &tty->flags);
666 mutex_unlock(&tty->ldisc_mutex); 666 mutex_unlock(&tty->ldisc_mutex);
667 tty_ldisc_put(new_ldisc); 667 tty_ldisc_put(new_ldisc);
668 tty_unlock(tty); 668 tty_unlock();
669 return -EIO; 669 return -EIO;
670 } 670 }
671 671
@@ -708,7 +708,7 @@ enable:
708 if (o_work) 708 if (o_work)
709 schedule_work(&o_tty->buf.work); 709 schedule_work(&o_tty->buf.work);
710 mutex_unlock(&tty->ldisc_mutex); 710 mutex_unlock(&tty->ldisc_mutex);
711 tty_unlock(tty); 711 tty_unlock();
712 return retval; 712 return retval;
713} 713}
714 714
@@ -816,11 +816,11 @@ void tty_ldisc_hangup(struct tty_struct *tty)
816 * need to wait for another function taking the BTM 816 * need to wait for another function taking the BTM
817 */ 817 */
818 clear_bit(TTY_LDISC, &tty->flags); 818 clear_bit(TTY_LDISC, &tty->flags);
819 tty_unlock(tty); 819 tty_unlock();
820 cancel_work_sync(&tty->buf.work); 820 cancel_work_sync(&tty->buf.work);
821 mutex_unlock(&tty->ldisc_mutex); 821 mutex_unlock(&tty->ldisc_mutex);
822retry: 822retry:
823 tty_lock(tty); 823 tty_lock();
824 mutex_lock(&tty->ldisc_mutex); 824 mutex_lock(&tty->ldisc_mutex);
825 825
826 /* At this point we have a closed ldisc and we want to 826 /* At this point we have a closed ldisc and we want to
@@ -831,7 +831,7 @@ retry:
831 if (atomic_read(&tty->ldisc->users) != 1) { 831 if (atomic_read(&tty->ldisc->users) != 1) {
832 char cur_n[TASK_COMM_LEN], tty_n[64]; 832 char cur_n[TASK_COMM_LEN], tty_n[64];
833 long timeout = 3 * HZ; 833 long timeout = 3 * HZ;
834 tty_unlock(tty); 834 tty_unlock();
835 835
836 while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) { 836 while (tty_ldisc_wait_idle(tty, timeout) == -EBUSY) {
837 timeout = MAX_SCHEDULE_TIMEOUT; 837 timeout = MAX_SCHEDULE_TIMEOUT;
@@ -894,23 +894,6 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
894 tty_ldisc_enable(tty); 894 tty_ldisc_enable(tty);
895 return 0; 895 return 0;
896} 896}
897
898static void tty_ldisc_kill(struct tty_struct *tty)
899{
900 mutex_lock(&tty->ldisc_mutex);
901 /*
902 * Now kill off the ldisc
903 */
904 tty_ldisc_close(tty, tty->ldisc);
905 tty_ldisc_put(tty->ldisc);
906 /* Force an oops if we mess this up */
907 tty->ldisc = NULL;
908
909 /* Ensure the next open requests the N_TTY ldisc */
910 tty_set_termios_ldisc(tty, N_TTY);
911 mutex_unlock(&tty->ldisc_mutex);
912}
913
914/** 897/**
915 * tty_ldisc_release - release line discipline 898 * tty_ldisc_release - release line discipline
916 * @tty: tty being shut down 899 * @tty: tty being shut down
@@ -929,19 +912,27 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
929 * race with the set_ldisc code path. 912 * race with the set_ldisc code path.
930 */ 913 */
931 914
932 tty_unlock_pair(tty, o_tty); 915 tty_unlock();
933 tty_ldisc_halt(tty); 916 tty_ldisc_halt(tty);
934 tty_ldisc_flush_works(tty); 917 tty_ldisc_flush_works(tty);
935 if (o_tty) { 918 tty_lock();
936 tty_ldisc_halt(o_tty);
937 tty_ldisc_flush_works(o_tty);
938 }
939 tty_lock_pair(tty, o_tty);
940 919
920 mutex_lock(&tty->ldisc_mutex);
921 /*
922 * Now kill off the ldisc
923 */
924 tty_ldisc_close(tty, tty->ldisc);
925 tty_ldisc_put(tty->ldisc);
926 /* Force an oops if we mess this up */
927 tty->ldisc = NULL;
928
929 /* Ensure the next open requests the N_TTY ldisc */
930 tty_set_termios_ldisc(tty, N_TTY);
931 mutex_unlock(&tty->ldisc_mutex);
941 932
942 tty_ldisc_kill(tty); 933 /* This will need doing differently if we need to lock */
943 if (o_tty) 934 if (o_tty)
944 tty_ldisc_kill(o_tty); 935 tty_ldisc_release(o_tty, NULL);
945 936
946 /* And the memory resources remaining (buffers, termios) will be 937 /* And the memory resources remaining (buffers, termios) will be
947 disposed of when the kref hits zero */ 938 disposed of when the kref hits zero */
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index 69adc80c98cd..9ff986c32a21 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -4,59 +4,29 @@
4#include <linux/semaphore.h> 4#include <linux/semaphore.h>
5#include <linux/sched.h> 5#include <linux/sched.h>
6 6
7/* Legacy tty mutex glue */ 7/*
8 * The 'big tty mutex'
9 *
10 * This mutex is taken and released by tty_lock() and tty_unlock(),
11 * replacing the older big kernel lock.
12 * It can no longer be taken recursively, and does not get
13 * released implicitly while sleeping.
14 *
15 * Don't use in new code.
16 */
17static DEFINE_MUTEX(big_tty_mutex);
8 18
9/* 19/*
10 * Getting the big tty mutex. 20 * Getting the big tty mutex.
11 */ 21 */
12 22void __lockfunc tty_lock(void)
13void __lockfunc tty_lock(struct tty_struct *tty)
14{ 23{
15 if (tty->magic != TTY_MAGIC) { 24 mutex_lock(&big_tty_mutex);
16 printk(KERN_ERR "L Bad %p\n", tty);
17 WARN_ON(1);
18 return;
19 }
20 tty_kref_get(tty);
21 mutex_lock(&tty->legacy_mutex);
22} 25}
23EXPORT_SYMBOL(tty_lock); 26EXPORT_SYMBOL(tty_lock);
24 27
25void __lockfunc tty_unlock(struct tty_struct *tty) 28void __lockfunc tty_unlock(void)
26{ 29{
27 if (tty->magic != TTY_MAGIC) { 30 mutex_unlock(&big_tty_mutex);
28 printk(KERN_ERR "U Bad %p\n", tty);
29 WARN_ON(1);
30 return;
31 }
32 mutex_unlock(&tty->legacy_mutex);
33 tty_kref_put(tty);
34} 31}
35EXPORT_SYMBOL(tty_unlock); 32EXPORT_SYMBOL(tty_unlock);
36
37/*
38 * Getting the big tty mutex for a pair of ttys with lock ordering
39 * On a non pty/tty pair tty2 can be NULL which is just fine.
40 */
41void __lockfunc tty_lock_pair(struct tty_struct *tty,
42 struct tty_struct *tty2)
43{
44 if (tty < tty2) {
45 tty_lock(tty);
46 tty_lock(tty2);
47 } else {
48 if (tty2 && tty2 != tty)
49 tty_lock(tty2);
50 tty_lock(tty);
51 }
52}
53EXPORT_SYMBOL(tty_lock_pair);
54
55void __lockfunc tty_unlock_pair(struct tty_struct *tty,
56 struct tty_struct *tty2)
57{
58 tty_unlock(tty);
59 if (tty2 && tty2 != tty)
60 tty_unlock(tty2);
61}
62EXPORT_SYMBOL(tty_unlock_pair);
diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
index d9cca95a5452..bf6e238146ae 100644
--- a/drivers/tty/tty_port.c
+++ b/drivers/tty/tty_port.c
@@ -230,7 +230,7 @@ int tty_port_block_til_ready(struct tty_port *port,
230 230
231 /* block if port is in the process of being closed */ 231 /* block if port is in the process of being closed */
232 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) { 232 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING) {
233 wait_event_interruptible_tty(tty, port->close_wait, 233 wait_event_interruptible_tty(port->close_wait,
234 !(port->flags & ASYNC_CLOSING)); 234 !(port->flags & ASYNC_CLOSING));
235 if (port->flags & ASYNC_HUP_NOTIFY) 235 if (port->flags & ASYNC_HUP_NOTIFY)
236 return -EAGAIN; 236 return -EAGAIN;
@@ -296,9 +296,9 @@ int tty_port_block_til_ready(struct tty_port *port,
296 retval = -ERESTARTSYS; 296 retval = -ERESTARTSYS;
297 break; 297 break;
298 } 298 }
299 tty_unlock(tty); 299 tty_unlock();
300 schedule(); 300 schedule();
301 tty_lock(tty); 301 tty_lock();
302 } 302 }
303 finish_wait(&port->open_wait, &wait); 303 finish_wait(&port->open_wait, &wait);
304 304
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f2a120eea9d4..36a2a0b7b82c 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -567,6 +567,14 @@ static int acm_port_activate(struct tty_port *port, struct tty_struct *tty)
567 567
568 usb_autopm_put_interface(acm->control); 568 usb_autopm_put_interface(acm->control);
569 569
570 /*
571 * Unthrottle device in case the TTY was closed while throttled.
572 */
573 spin_lock_irq(&acm->read_lock);
574 acm->throttled = 0;
575 acm->throttle_req = 0;
576 spin_unlock_irq(&acm->read_lock);
577
570 if (acm_submit_read_urbs(acm, GFP_KERNEL)) 578 if (acm_submit_read_urbs(acm, GFP_KERNEL))
571 goto error_submit_read_urbs; 579 goto error_submit_read_urbs;
572 580
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index ea8b304f0e85..8fd398dffced 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -55,6 +55,15 @@ static const struct usb_device_id wdm_ids[] = {
55 .bInterfaceSubClass = 1, 55 .bInterfaceSubClass = 1,
56 .bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */ 56 .bInterfaceProtocol = 9, /* NOTE: CDC ECM control interface! */
57 }, 57 },
58 {
59 /* Vodafone/Huawei K5005 (12d1:14c8) and similar modems */
60 .match_flags = USB_DEVICE_ID_MATCH_VENDOR |
61 USB_DEVICE_ID_MATCH_INT_INFO,
62 .idVendor = HUAWEI_VENDOR_ID,
63 .bInterfaceClass = USB_CLASS_VENDOR_SPEC,
64 .bInterfaceSubClass = 1,
65 .bInterfaceProtocol = 57, /* NOTE: CDC ECM control interface! */
66 },
58 { } 67 { }
59}; 68};
60 69
diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c
index 57ed9e400c06..622b4a48e732 100644
--- a/drivers/usb/core/hcd-pci.c
+++ b/drivers/usb/core/hcd-pci.c
@@ -493,15 +493,6 @@ static int hcd_pci_suspend_noirq(struct device *dev)
493 493
494 pci_save_state(pci_dev); 494 pci_save_state(pci_dev);
495 495
496 /*
497 * Some systems crash if an EHCI controller is in D3 during
498 * a sleep transition. We have to leave such controllers in D0.
499 */
500 if (hcd->broken_pci_sleep) {
501 dev_dbg(dev, "Staying in PCI D0\n");
502 return retval;
503 }
504
505 /* If the root hub is dead rather than suspended, disallow remote 496 /* If the root hub is dead rather than suspended, disallow remote
506 * wakeup. usb_hc_died() should ensure that both hosts are marked as 497 * wakeup. usb_hc_died() should ensure that both hosts are marked as
507 * dying, so we only need to check the primary roothub. 498 * dying, so we only need to check the primary roothub.
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 04fb834c3fa1..25a7422ee657 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -3379,7 +3379,7 @@ int usb_disable_lpm(struct usb_device *udev)
3379 return 0; 3379 return 0;
3380 3380
3381 udev->lpm_disable_count++; 3381 udev->lpm_disable_count++;
3382 if ((udev->u1_params.timeout == 0 && udev->u1_params.timeout == 0)) 3382 if ((udev->u1_params.timeout == 0 && udev->u2_params.timeout == 0))
3383 return 0; 3383 return 0;
3384 3384
3385 /* If LPM is enabled, attempt to disable it. */ 3385 /* If LPM is enabled, attempt to disable it. */
diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
index b548cf1dbc62..bdd1c6749d88 100644
--- a/drivers/usb/core/message.c
+++ b/drivers/usb/core/message.c
@@ -1838,7 +1838,6 @@ free_interfaces:
1838 intfc = cp->intf_cache[i]; 1838 intfc = cp->intf_cache[i];
1839 intf->altsetting = intfc->altsetting; 1839 intf->altsetting = intfc->altsetting;
1840 intf->num_altsetting = intfc->num_altsetting; 1840 intf->num_altsetting = intfc->num_altsetting;
1841 intf->intf_assoc = find_iad(dev, cp, i);
1842 kref_get(&intfc->ref); 1841 kref_get(&intfc->ref);
1843 1842
1844 alt = usb_altnum_to_altsetting(intf, 0); 1843 alt = usb_altnum_to_altsetting(intf, 0);
@@ -1851,6 +1850,8 @@ free_interfaces:
1851 if (!alt) 1850 if (!alt)
1852 alt = &intf->altsetting[0]; 1851 alt = &intf->altsetting[0];
1853 1852
1853 intf->intf_assoc =
1854 find_iad(dev, cp, alt->desc.bInterfaceNumber);
1854 intf->cur_altsetting = alt; 1855 intf->cur_altsetting = alt;
1855 usb_enable_interface(dev, intf, true); 1856 usb_enable_interface(dev, intf, true);
1856 intf->dev.parent = &dev->dev; 1857 intf->dev.parent = &dev->dev;
diff --git a/drivers/usb/gadget/atmel_usba_udc.c b/drivers/usb/gadget/atmel_usba_udc.c
index e23bf7984aaf..9a9bced813ed 100644
--- a/drivers/usb/gadget/atmel_usba_udc.c
+++ b/drivers/usb/gadget/atmel_usba_udc.c
@@ -599,12 +599,6 @@ usba_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
599 599
600 spin_lock_irqsave(&ep->udc->lock, flags); 600 spin_lock_irqsave(&ep->udc->lock, flags);
601 601
602 if (ep->ep.desc) {
603 spin_unlock_irqrestore(&ep->udc->lock, flags);
604 DBG(DBG_ERR, "ep%d already enabled\n", ep->index);
605 return -EBUSY;
606 }
607
608 ep->ep.desc = desc; 602 ep->ep.desc = desc;
609 ep->ep.maxpacket = maxpacket; 603 ep->ep.maxpacket = maxpacket;
610 604
diff --git a/drivers/usb/gadget/fsl_qe_udc.c b/drivers/usb/gadget/fsl_qe_udc.c
index 51881f3bd07a..b09452d6f33a 100644
--- a/drivers/usb/gadget/fsl_qe_udc.c
+++ b/drivers/usb/gadget/fsl_qe_udc.c
@@ -1596,7 +1596,7 @@ static int qe_ep_enable(struct usb_ep *_ep,
1596 ep = container_of(_ep, struct qe_ep, ep); 1596 ep = container_of(_ep, struct qe_ep, ep);
1597 1597
1598 /* catch various bogus parameters */ 1598 /* catch various bogus parameters */
1599 if (!_ep || !desc || ep->ep.desc || _ep->name == ep_name[0] || 1599 if (!_ep || !desc || _ep->name == ep_name[0] ||
1600 (desc->bDescriptorType != USB_DT_ENDPOINT)) 1600 (desc->bDescriptorType != USB_DT_ENDPOINT))
1601 return -EINVAL; 1601 return -EINVAL;
1602 1602
diff --git a/drivers/usb/gadget/fsl_udc_core.c b/drivers/usb/gadget/fsl_udc_core.c
index 28316858208b..bc6f9bb9994a 100644
--- a/drivers/usb/gadget/fsl_udc_core.c
+++ b/drivers/usb/gadget/fsl_udc_core.c
@@ -567,7 +567,7 @@ static int fsl_ep_enable(struct usb_ep *_ep,
567 ep = container_of(_ep, struct fsl_ep, ep); 567 ep = container_of(_ep, struct fsl_ep, ep);
568 568
569 /* catch various bogus parameters */ 569 /* catch various bogus parameters */
570 if (!_ep || !desc || ep->ep.desc 570 if (!_ep || !desc
571 || (desc->bDescriptorType != USB_DT_ENDPOINT)) 571 || (desc->bDescriptorType != USB_DT_ENDPOINT))
572 return -EINVAL; 572 return -EINVAL;
573 573
@@ -2575,7 +2575,7 @@ static int __init fsl_udc_probe(struct platform_device *pdev)
2575 /* for ep0: the desc defined here; 2575 /* for ep0: the desc defined here;
2576 * for other eps, gadget layer called ep_enable with defined desc 2576 * for other eps, gadget layer called ep_enable with defined desc
2577 */ 2577 */
2578 udc_controller->eps[0].desc = &fsl_ep0_desc; 2578 udc_controller->eps[0].ep.desc = &fsl_ep0_desc;
2579 udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD; 2579 udc_controller->eps[0].ep.maxpacket = USB_MAX_CTRL_PAYLOAD;
2580 2580
2581 /* setup the udc->eps[] for non-control endpoints and link 2581 /* setup the udc->eps[] for non-control endpoints and link
diff --git a/drivers/usb/gadget/fsl_usb2_udc.h b/drivers/usb/gadget/fsl_usb2_udc.h
index 5cd7b7e7ddb4..f61a967f7082 100644
--- a/drivers/usb/gadget/fsl_usb2_udc.h
+++ b/drivers/usb/gadget/fsl_usb2_udc.h
@@ -568,10 +568,10 @@ static void dump_msg(const char *label, const u8 * buf, unsigned int length)
568/* 568/*
569 * ### internal used help routines. 569 * ### internal used help routines.
570 */ 570 */
571#define ep_index(EP) ((EP)->desc->bEndpointAddress&0xF) 571#define ep_index(EP) ((EP)->ep.desc->bEndpointAddress&0xF)
572#define ep_maxpacket(EP) ((EP)->ep.maxpacket) 572#define ep_maxpacket(EP) ((EP)->ep.maxpacket)
573#define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \ 573#define ep_is_in(EP) ( (ep_index(EP) == 0) ? (EP->udc->ep0_dir == \
574 USB_DIR_IN ):((EP)->desc->bEndpointAddress \ 574 USB_DIR_IN) : ((EP)->ep.desc->bEndpointAddress \
575 & USB_DIR_IN)==USB_DIR_IN) 575 & USB_DIR_IN)==USB_DIR_IN)
576#define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \ 576#define get_ep_by_pipe(udc, pipe) ((pipe == 1)? &udc->eps[0]: \
577 &udc->eps[pipe]) 577 &udc->eps[pipe])
diff --git a/drivers/usb/gadget/goku_udc.c b/drivers/usb/gadget/goku_udc.c
index b241e6c6a7f2..3d28fb976c78 100644
--- a/drivers/usb/gadget/goku_udc.c
+++ b/drivers/usb/gadget/goku_udc.c
@@ -102,7 +102,7 @@ goku_ep_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc)
102 unsigned long flags; 102 unsigned long flags;
103 103
104 ep = container_of(_ep, struct goku_ep, ep); 104 ep = container_of(_ep, struct goku_ep, ep);
105 if (!_ep || !desc || ep->ep.desc 105 if (!_ep || !desc
106 || desc->bDescriptorType != USB_DT_ENDPOINT) 106 || desc->bDescriptorType != USB_DT_ENDPOINT)
107 return -EINVAL; 107 return -EINVAL;
108 dev = ep->dev; 108 dev = ep->dev;
diff --git a/drivers/usb/gadget/mv_udc_core.c b/drivers/usb/gadget/mv_udc_core.c
index dbcd1329495e..117a4bba1b8c 100644
--- a/drivers/usb/gadget/mv_udc_core.c
+++ b/drivers/usb/gadget/mv_udc_core.c
@@ -464,7 +464,7 @@ static int mv_ep_enable(struct usb_ep *_ep,
464 ep = container_of(_ep, struct mv_ep, ep); 464 ep = container_of(_ep, struct mv_ep, ep);
465 udc = ep->udc; 465 udc = ep->udc;
466 466
467 if (!_ep || !desc || ep->ep.desc 467 if (!_ep || !desc
468 || desc->bDescriptorType != USB_DT_ENDPOINT) 468 || desc->bDescriptorType != USB_DT_ENDPOINT)
469 return -EINVAL; 469 return -EINVAL;
470 470
diff --git a/drivers/usb/gadget/omap_udc.c b/drivers/usb/gadget/omap_udc.c
index 7ba32469c5bd..a460e8c204f4 100644
--- a/drivers/usb/gadget/omap_udc.c
+++ b/drivers/usb/gadget/omap_udc.c
@@ -153,7 +153,7 @@ static int omap_ep_enable(struct usb_ep *_ep,
153 u16 maxp; 153 u16 maxp;
154 154
155 /* catch various bogus parameters */ 155 /* catch various bogus parameters */
156 if (!_ep || !desc || ep->ep.desc 156 if (!_ep || !desc
157 || desc->bDescriptorType != USB_DT_ENDPOINT 157 || desc->bDescriptorType != USB_DT_ENDPOINT
158 || ep->bEndpointAddress != desc->bEndpointAddress 158 || ep->bEndpointAddress != desc->bEndpointAddress
159 || ep->maxpacket < usb_endpoint_maxp(desc)) { 159 || ep->maxpacket < usb_endpoint_maxp(desc)) {
diff --git a/drivers/usb/gadget/pxa25x_udc.c b/drivers/usb/gadget/pxa25x_udc.c
index d7c8cb3bf759..f7ff9e8e746a 100644
--- a/drivers/usb/gadget/pxa25x_udc.c
+++ b/drivers/usb/gadget/pxa25x_udc.c
@@ -218,7 +218,7 @@ static int pxa25x_ep_enable (struct usb_ep *_ep,
218 struct pxa25x_udc *dev; 218 struct pxa25x_udc *dev;
219 219
220 ep = container_of (_ep, struct pxa25x_ep, ep); 220 ep = container_of (_ep, struct pxa25x_ep, ep);
221 if (!_ep || !desc || ep->ep.desc || _ep->name == ep0name 221 if (!_ep || !desc || _ep->name == ep0name
222 || desc->bDescriptorType != USB_DT_ENDPOINT 222 || desc->bDescriptorType != USB_DT_ENDPOINT
223 || ep->bEndpointAddress != desc->bEndpointAddress 223 || ep->bEndpointAddress != desc->bEndpointAddress
224 || ep->fifo_size < usb_endpoint_maxp (desc)) { 224 || ep->fifo_size < usb_endpoint_maxp (desc)) {
diff --git a/drivers/usb/gadget/s3c-hsudc.c b/drivers/usb/gadget/s3c-hsudc.c
index 36c6836eeb0f..236b271871a0 100644
--- a/drivers/usb/gadget/s3c-hsudc.c
+++ b/drivers/usb/gadget/s3c-hsudc.c
@@ -760,7 +760,7 @@ static int s3c_hsudc_ep_enable(struct usb_ep *_ep,
760 u32 ecr = 0; 760 u32 ecr = 0;
761 761
762 hsep = our_ep(_ep); 762 hsep = our_ep(_ep);
763 if (!_ep || !desc || hsep->ep.desc || _ep->name == ep0name 763 if (!_ep || !desc || _ep->name == ep0name
764 || desc->bDescriptorType != USB_DT_ENDPOINT 764 || desc->bDescriptorType != USB_DT_ENDPOINT
765 || hsep->bEndpointAddress != desc->bEndpointAddress 765 || hsep->bEndpointAddress != desc->bEndpointAddress
766 || ep_maxpacket(hsep) < usb_endpoint_maxp(desc)) 766 || ep_maxpacket(hsep) < usb_endpoint_maxp(desc))
diff --git a/drivers/usb/gadget/s3c2410_udc.c b/drivers/usb/gadget/s3c2410_udc.c
index 3de71d37d75e..f2e51f50e528 100644
--- a/drivers/usb/gadget/s3c2410_udc.c
+++ b/drivers/usb/gadget/s3c2410_udc.c
@@ -1062,7 +1062,7 @@ static int s3c2410_udc_ep_enable(struct usb_ep *_ep,
1062 1062
1063 ep = to_s3c2410_ep(_ep); 1063 ep = to_s3c2410_ep(_ep);
1064 1064
1065 if (!_ep || !desc || ep->ep.desc 1065 if (!_ep || !desc
1066 || _ep->name == ep0name 1066 || _ep->name == ep0name
1067 || desc->bDescriptorType != USB_DT_ENDPOINT) 1067 || desc->bDescriptorType != USB_DT_ENDPOINT)
1068 return -EINVAL; 1068 return -EINVAL;
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index b100f5f9f4b6..800be38c78b4 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -671,7 +671,9 @@ static int ehci_init(struct usb_hcd *hcd)
671 hw = ehci->async->hw; 671 hw = ehci->async->hw;
672 hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma); 672 hw->hw_next = QH_NEXT(ehci, ehci->async->qh_dma);
673 hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD); 673 hw->hw_info1 = cpu_to_hc32(ehci, QH_HEAD);
674#if defined(CONFIG_PPC_PS3)
674 hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */ 675 hw->hw_info1 |= cpu_to_hc32(ehci, (1 << 7)); /* I = 1 */
676#endif
675 hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT); 677 hw->hw_token = cpu_to_hc32(ehci, QTD_STS_HALT);
676 hw->hw_qtd_next = EHCI_LIST_END(ehci); 678 hw->hw_qtd_next = EHCI_LIST_END(ehci);
677 ehci->async->qh_state = QH_STATE_LINKED; 679 ehci->async->qh_state = QH_STATE_LINKED;
diff --git a/drivers/usb/host/ehci-omap.c b/drivers/usb/host/ehci-omap.c
index a44294d13494..17cfb8a1131c 100644
--- a/drivers/usb/host/ehci-omap.c
+++ b/drivers/usb/host/ehci-omap.c
@@ -43,6 +43,7 @@
43#include <linux/regulator/consumer.h> 43#include <linux/regulator/consumer.h>
44#include <linux/pm_runtime.h> 44#include <linux/pm_runtime.h>
45#include <linux/gpio.h> 45#include <linux/gpio.h>
46#include <linux/clk.h>
46 47
47/* EHCI Register Set */ 48/* EHCI Register Set */
48#define EHCI_INSNREG04 (0xA0) 49#define EHCI_INSNREG04 (0xA0)
@@ -55,6 +56,15 @@
55#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8 56#define EHCI_INSNREG05_ULPI_EXTREGADD_SHIFT 8
56#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0 57#define EHCI_INSNREG05_ULPI_WRDATA_SHIFT 0
57 58
59/* Errata i693 */
60static struct clk *utmi_p1_fck;
61static struct clk *utmi_p2_fck;
62static struct clk *xclk60mhsp1_ck;
63static struct clk *xclk60mhsp2_ck;
64static struct clk *usbhost_p1_fck;
65static struct clk *usbhost_p2_fck;
66static struct clk *init_60m_fclk;
67
58/*-------------------------------------------------------------------------*/ 68/*-------------------------------------------------------------------------*/
59 69
60static const struct hc_driver ehci_omap_hc_driver; 70static const struct hc_driver ehci_omap_hc_driver;
@@ -70,6 +80,41 @@ static inline u32 ehci_read(void __iomem *base, u32 reg)
70 return __raw_readl(base + reg); 80 return __raw_readl(base + reg);
71} 81}
72 82
83/* Erratum i693 workaround sequence */
84static void omap_ehci_erratum_i693(struct ehci_hcd *ehci)
85{
86 int ret = 0;
87
88 /* Switch to the internal 60 MHz clock */
89 ret = clk_set_parent(utmi_p1_fck, init_60m_fclk);
90 if (ret != 0)
91 ehci_err(ehci, "init_60m_fclk set parent"
92 "failed error:%d\n", ret);
93
94 ret = clk_set_parent(utmi_p2_fck, init_60m_fclk);
95 if (ret != 0)
96 ehci_err(ehci, "init_60m_fclk set parent"
97 "failed error:%d\n", ret);
98
99 clk_enable(usbhost_p1_fck);
100 clk_enable(usbhost_p2_fck);
101
102 /* Wait 1ms and switch back to the external clock */
103 mdelay(1);
104 ret = clk_set_parent(utmi_p1_fck, xclk60mhsp1_ck);
105 if (ret != 0)
106 ehci_err(ehci, "xclk60mhsp1_ck set parent"
107 "failed error:%d\n", ret);
108
109 ret = clk_set_parent(utmi_p2_fck, xclk60mhsp2_ck);
110 if (ret != 0)
111 ehci_err(ehci, "xclk60mhsp2_ck set parent"
112 "failed error:%d\n", ret);
113
114 clk_disable(usbhost_p1_fck);
115 clk_disable(usbhost_p2_fck);
116}
117
73static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port) 118static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
74{ 119{
75 struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev); 120 struct usb_hcd *hcd = dev_get_drvdata(&pdev->dev);
@@ -100,6 +145,50 @@ static void omap_ehci_soft_phy_reset(struct platform_device *pdev, u8 port)
100 } 145 }
101} 146}
102 147
148static int omap_ehci_hub_control(
149 struct usb_hcd *hcd,
150 u16 typeReq,
151 u16 wValue,
152 u16 wIndex,
153 char *buf,
154 u16 wLength
155)
156{
157 struct ehci_hcd *ehci = hcd_to_ehci(hcd);
158 u32 __iomem *status_reg = &ehci->regs->port_status[
159 (wIndex & 0xff) - 1];
160 u32 temp;
161 unsigned long flags;
162 int retval = 0;
163
164 spin_lock_irqsave(&ehci->lock, flags);
165
166 if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
167 temp = ehci_readl(ehci, status_reg);
168 if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
169 retval = -EPIPE;
170 goto done;
171 }
172
173 temp &= ~PORT_WKCONN_E;
174 temp |= PORT_WKDISC_E | PORT_WKOC_E;
175 ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
176
177 omap_ehci_erratum_i693(ehci);
178
179 set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
180 goto done;
181 }
182
183 spin_unlock_irqrestore(&ehci->lock, flags);
184
185 /* Handle the hub control events here */
186 return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
187done:
188 spin_unlock_irqrestore(&ehci->lock, flags);
189 return retval;
190}
191
103static void disable_put_regulator( 192static void disable_put_regulator(
104 struct ehci_hcd_omap_platform_data *pdata) 193 struct ehci_hcd_omap_platform_data *pdata)
105{ 194{
@@ -264,8 +353,76 @@ static int ehci_hcd_omap_probe(struct platform_device *pdev)
264 /* root ports should always stay powered */ 353 /* root ports should always stay powered */
265 ehci_port_power(omap_ehci, 1); 354 ehci_port_power(omap_ehci, 1);
266 355
356 /* get clocks */
357 utmi_p1_fck = clk_get(dev, "utmi_p1_gfclk");
358 if (IS_ERR(utmi_p1_fck)) {
359 ret = PTR_ERR(utmi_p1_fck);
360 dev_err(dev, "utmi_p1_gfclk failed error:%d\n", ret);
361 goto err_add_hcd;
362 }
363
364 xclk60mhsp1_ck = clk_get(dev, "xclk60mhsp1_ck");
365 if (IS_ERR(xclk60mhsp1_ck)) {
366 ret = PTR_ERR(xclk60mhsp1_ck);
367 dev_err(dev, "xclk60mhsp1_ck failed error:%d\n", ret);
368 goto err_utmi_p1_fck;
369 }
370
371 utmi_p2_fck = clk_get(dev, "utmi_p2_gfclk");
372 if (IS_ERR(utmi_p2_fck)) {
373 ret = PTR_ERR(utmi_p2_fck);
374 dev_err(dev, "utmi_p2_gfclk failed error:%d\n", ret);
375 goto err_xclk60mhsp1_ck;
376 }
377
378 xclk60mhsp2_ck = clk_get(dev, "xclk60mhsp2_ck");
379 if (IS_ERR(xclk60mhsp2_ck)) {
380 ret = PTR_ERR(xclk60mhsp2_ck);
381 dev_err(dev, "xclk60mhsp2_ck failed error:%d\n", ret);
382 goto err_utmi_p2_fck;
383 }
384
385 usbhost_p1_fck = clk_get(dev, "usb_host_hs_utmi_p1_clk");
386 if (IS_ERR(usbhost_p1_fck)) {
387 ret = PTR_ERR(usbhost_p1_fck);
388 dev_err(dev, "usbhost_p1_fck failed error:%d\n", ret);
389 goto err_xclk60mhsp2_ck;
390 }
391
392 usbhost_p2_fck = clk_get(dev, "usb_host_hs_utmi_p2_clk");
393 if (IS_ERR(usbhost_p2_fck)) {
394 ret = PTR_ERR(usbhost_p2_fck);
395 dev_err(dev, "usbhost_p2_fck failed error:%d\n", ret);
396 goto err_usbhost_p1_fck;
397 }
398
399 init_60m_fclk = clk_get(dev, "init_60m_fclk");
400 if (IS_ERR(init_60m_fclk)) {
401 ret = PTR_ERR(init_60m_fclk);
402 dev_err(dev, "init_60m_fclk failed error:%d\n", ret);
403 goto err_usbhost_p2_fck;
404 }
405
267 return 0; 406 return 0;
268 407
408err_usbhost_p2_fck:
409 clk_put(usbhost_p2_fck);
410
411err_usbhost_p1_fck:
412 clk_put(usbhost_p1_fck);
413
414err_xclk60mhsp2_ck:
415 clk_put(xclk60mhsp2_ck);
416
417err_utmi_p2_fck:
418 clk_put(utmi_p2_fck);
419
420err_xclk60mhsp1_ck:
421 clk_put(xclk60mhsp1_ck);
422
423err_utmi_p1_fck:
424 clk_put(utmi_p1_fck);
425
269err_add_hcd: 426err_add_hcd:
270 disable_put_regulator(pdata); 427 disable_put_regulator(pdata);
271 pm_runtime_put_sync(dev); 428 pm_runtime_put_sync(dev);
@@ -294,6 +451,15 @@ static int ehci_hcd_omap_remove(struct platform_device *pdev)
294 disable_put_regulator(dev->platform_data); 451 disable_put_regulator(dev->platform_data);
295 iounmap(hcd->regs); 452 iounmap(hcd->regs);
296 usb_put_hcd(hcd); 453 usb_put_hcd(hcd);
454
455 clk_put(utmi_p1_fck);
456 clk_put(utmi_p2_fck);
457 clk_put(xclk60mhsp1_ck);
458 clk_put(xclk60mhsp2_ck);
459 clk_put(usbhost_p1_fck);
460 clk_put(usbhost_p2_fck);
461 clk_put(init_60m_fclk);
462
297 pm_runtime_put_sync(dev); 463 pm_runtime_put_sync(dev);
298 pm_runtime_disable(dev); 464 pm_runtime_disable(dev);
299 465
@@ -364,7 +530,7 @@ static const struct hc_driver ehci_omap_hc_driver = {
364 * root hub support 530 * root hub support
365 */ 531 */
366 .hub_status_data = ehci_hub_status_data, 532 .hub_status_data = ehci_hub_status_data,
367 .hub_control = ehci_hub_control, 533 .hub_control = omap_ehci_hub_control,
368 .bus_suspend = ehci_bus_suspend, 534 .bus_suspend = ehci_bus_suspend,
369 .bus_resume = ehci_bus_resume, 535 .bus_resume = ehci_bus_resume,
370 536
diff --git a/drivers/usb/host/ehci-pci.c b/drivers/usb/host/ehci-pci.c
index bc94d7bf072d..123481793a47 100644
--- a/drivers/usb/host/ehci-pci.c
+++ b/drivers/usb/host/ehci-pci.c
@@ -144,14 +144,6 @@ static int ehci_pci_setup(struct usb_hcd *hcd)
144 hcd->has_tt = 1; 144 hcd->has_tt = 1;
145 tdi_reset(ehci); 145 tdi_reset(ehci);
146 } 146 }
147 if (pdev->subsystem_vendor == PCI_VENDOR_ID_ASUSTEK) {
148 /* EHCI #1 or #2 on 6 Series/C200 Series chipset */
149 if (pdev->device == 0x1c26 || pdev->device == 0x1c2d) {
150 ehci_info(ehci, "broken D3 during system sleep on ASUS\n");
151 hcd->broken_pci_sleep = 1;
152 device_set_wakeup_capable(&pdev->dev, false);
153 }
154 }
155 break; 147 break;
156 case PCI_VENDOR_ID_TDI: 148 case PCI_VENDOR_ID_TDI:
157 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) { 149 if (pdev->device == PCI_DEVICE_ID_TDI_EHCI) {
diff --git a/drivers/usb/host/ehci-sh.c b/drivers/usb/host/ehci-sh.c
index ca819cdd0c5e..e7cb3925abf8 100644
--- a/drivers/usb/host/ehci-sh.c
+++ b/drivers/usb/host/ehci-sh.c
@@ -126,8 +126,7 @@ static int ehci_hcd_sh_probe(struct platform_device *pdev)
126 goto fail_create_hcd; 126 goto fail_create_hcd;
127 } 127 }
128 128
129 if (pdev->dev.platform_data != NULL) 129 pdata = pdev->dev.platform_data;
130 pdata = pdev->dev.platform_data;
131 130
132 /* initialize hcd */ 131 /* initialize hcd */
133 hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev, 132 hcd = usb_create_hcd(&ehci_sh_hc_driver, &pdev->dev,
diff --git a/drivers/usb/host/ehci-xilinx-of.c b/drivers/usb/host/ehci-xilinx-of.c
index 9c2cc4633894..e9713d589e30 100644
--- a/drivers/usb/host/ehci-xilinx-of.c
+++ b/drivers/usb/host/ehci-xilinx-of.c
@@ -270,14 +270,12 @@ static int ehci_hcd_xilinx_of_remove(struct platform_device *op)
270 * 270 *
271 * Properly shutdown the hcd, call driver's shutdown routine. 271 * Properly shutdown the hcd, call driver's shutdown routine.
272 */ 272 */
273static int ehci_hcd_xilinx_of_shutdown(struct platform_device *op) 273static void ehci_hcd_xilinx_of_shutdown(struct platform_device *op)
274{ 274{
275 struct usb_hcd *hcd = dev_get_drvdata(&op->dev); 275 struct usb_hcd *hcd = dev_get_drvdata(&op->dev);
276 276
277 if (hcd->driver->shutdown) 277 if (hcd->driver->shutdown)
278 hcd->driver->shutdown(hcd); 278 hcd->driver->shutdown(hcd);
279
280 return 0;
281} 279}
282 280
283 281
diff --git a/drivers/usb/host/ohci-hub.c b/drivers/usb/host/ohci-hub.c
index 836772dfabd3..2f3619eefefa 100644
--- a/drivers/usb/host/ohci-hub.c
+++ b/drivers/usb/host/ohci-hub.c
@@ -317,7 +317,7 @@ static int ohci_bus_resume (struct usb_hcd *hcd)
317} 317}
318 318
319/* Carry out the final steps of resuming the controller device */ 319/* Carry out the final steps of resuming the controller device */
320static void ohci_finish_controller_resume(struct usb_hcd *hcd) 320static void __maybe_unused ohci_finish_controller_resume(struct usb_hcd *hcd)
321{ 321{
322 struct ohci_hcd *ohci = hcd_to_ohci(hcd); 322 struct ohci_hcd *ohci = hcd_to_ohci(hcd);
323 int port; 323 int port;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ec4338eec826..77689bd64cac 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -793,10 +793,9 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
793 struct xhci_virt_device *virt_dev, 793 struct xhci_virt_device *virt_dev,
794 int slot_id) 794 int slot_id)
795{ 795{
796 struct list_head *tt;
797 struct list_head *tt_list_head; 796 struct list_head *tt_list_head;
798 struct list_head *tt_next; 797 struct xhci_tt_bw_info *tt_info, *next;
799 struct xhci_tt_bw_info *tt_info; 798 bool slot_found = false;
800 799
801 /* If the device never made it past the Set Address stage, 800 /* If the device never made it past the Set Address stage,
802 * it may not have the real_port set correctly. 801 * it may not have the real_port set correctly.
@@ -808,34 +807,16 @@ static void xhci_free_tt_info(struct xhci_hcd *xhci,
808 } 807 }
809 808
810 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts); 809 tt_list_head = &(xhci->rh_bw[virt_dev->real_port - 1].tts);
811 if (list_empty(tt_list_head)) 810 list_for_each_entry_safe(tt_info, next, tt_list_head, tt_list) {
812 return; 811 /* Multi-TT hubs will have more than one entry */
813 812 if (tt_info->slot_id == slot_id) {
814 list_for_each(tt, tt_list_head) { 813 slot_found = true;
815 tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); 814 list_del(&tt_info->tt_list);
816 if (tt_info->slot_id == slot_id) 815 kfree(tt_info);
816 } else if (slot_found) {
817 break; 817 break;
818 }
818 } 819 }
819 /* Cautionary measure in case the hub was disconnected before we
820 * stored the TT information.
821 */
822 if (tt_info->slot_id != slot_id)
823 return;
824
825 tt_next = tt->next;
826 tt_info = list_entry(tt, struct xhci_tt_bw_info,
827 tt_list);
828 /* Multi-TT hubs will have more than one entry */
829 do {
830 list_del(tt);
831 kfree(tt_info);
832 tt = tt_next;
833 if (list_empty(tt_list_head))
834 break;
835 tt_next = tt->next;
836 tt_info = list_entry(tt, struct xhci_tt_bw_info,
837 tt_list);
838 } while (tt_info->slot_id == slot_id);
839} 820}
840 821
841int xhci_alloc_tt_info(struct xhci_hcd *xhci, 822int xhci_alloc_tt_info(struct xhci_hcd *xhci,
@@ -1791,17 +1772,9 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1791{ 1772{
1792 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); 1773 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
1793 struct dev_info *dev_info, *next; 1774 struct dev_info *dev_info, *next;
1794 struct list_head *tt_list_head;
1795 struct list_head *tt;
1796 struct list_head *endpoints;
1797 struct list_head *ep, *q;
1798 struct xhci_tt_bw_info *tt_info;
1799 struct xhci_interval_bw_table *bwt;
1800 struct xhci_virt_ep *virt_ep;
1801
1802 unsigned long flags; 1775 unsigned long flags;
1803 int size; 1776 int size;
1804 int i; 1777 int i, j, num_ports;
1805 1778
1806 /* Free the Event Ring Segment Table and the actual Event Ring */ 1779 /* Free the Event Ring Segment Table and the actual Event Ring */
1807 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries); 1780 size = sizeof(struct xhci_erst_entry)*(xhci->erst.num_entries);
@@ -1860,21 +1833,22 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci)
1860 } 1833 }
1861 spin_unlock_irqrestore(&xhci->lock, flags); 1834 spin_unlock_irqrestore(&xhci->lock, flags);
1862 1835
1863 bwt = &xhci->rh_bw->bw_table; 1836 num_ports = HCS_MAX_PORTS(xhci->hcs_params1);
1864 for (i = 0; i < XHCI_MAX_INTERVAL; i++) { 1837 for (i = 0; i < num_ports; i++) {
1865 endpoints = &bwt->interval_bw[i].endpoints; 1838 struct xhci_interval_bw_table *bwt = &xhci->rh_bw[i].bw_table;
1866 list_for_each_safe(ep, q, endpoints) { 1839 for (j = 0; j < XHCI_MAX_INTERVAL; j++) {
1867 virt_ep = list_entry(ep, struct xhci_virt_ep, bw_endpoint_list); 1840 struct list_head *ep = &bwt->interval_bw[j].endpoints;
1868 list_del(&virt_ep->bw_endpoint_list); 1841 while (!list_empty(ep))
1869 kfree(virt_ep); 1842 list_del_init(ep->next);
1870 } 1843 }
1871 } 1844 }
1872 1845
1873 tt_list_head = &xhci->rh_bw->tts; 1846 for (i = 0; i < num_ports; i++) {
1874 list_for_each_safe(tt, q, tt_list_head) { 1847 struct xhci_tt_bw_info *tt, *n;
1875 tt_info = list_entry(tt, struct xhci_tt_bw_info, tt_list); 1848 list_for_each_entry_safe(tt, n, &xhci->rh_bw[i].tts, tt_list) {
1876 list_del(tt); 1849 list_del(&tt->tt_list);
1877 kfree(tt_info); 1850 kfree(tt);
1851 }
1878 } 1852 }
1879 1853
1880 xhci->num_usb2_ports = 0; 1854 xhci->num_usb2_ports = 0;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index afdc73ee84a6..a979cd0dbe0f 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -795,8 +795,8 @@ int xhci_suspend(struct xhci_hcd *xhci)
795 command = xhci_readl(xhci, &xhci->op_regs->command); 795 command = xhci_readl(xhci, &xhci->op_regs->command);
796 command |= CMD_CSS; 796 command |= CMD_CSS;
797 xhci_writel(xhci, command, &xhci->op_regs->command); 797 xhci_writel(xhci, command, &xhci->op_regs->command);
798 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) { 798 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10 * 1000)) {
799 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n"); 799 xhci_warn(xhci, "WARN: xHC save state timeout\n");
800 spin_unlock_irq(&xhci->lock); 800 spin_unlock_irq(&xhci->lock);
801 return -ETIMEDOUT; 801 return -ETIMEDOUT;
802 } 802 }
@@ -848,8 +848,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
848 command |= CMD_CRS; 848 command |= CMD_CRS;
849 xhci_writel(xhci, command, &xhci->op_regs->command); 849 xhci_writel(xhci, command, &xhci->op_regs->command);
850 if (handshake(xhci, &xhci->op_regs->status, 850 if (handshake(xhci, &xhci->op_regs->status,
851 STS_RESTORE, 0, 10*100)) { 851 STS_RESTORE, 0, 10 * 1000)) {
852 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n"); 852 xhci_warn(xhci, "WARN: xHC restore state timeout\n");
853 spin_unlock_irq(&xhci->lock); 853 spin_unlock_irq(&xhci->lock);
854 return -ETIMEDOUT; 854 return -ETIMEDOUT;
855 } 855 }
@@ -3906,7 +3906,7 @@ static u16 xhci_get_timeout_no_hub_lpm(struct usb_device *udev,
3906 default: 3906 default:
3907 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n", 3907 dev_warn(&udev->dev, "%s: Can't get timeout for non-U1 or U2 state.\n",
3908 __func__); 3908 __func__);
3909 return -EINVAL; 3909 return USB3_LPM_DISABLED;
3910 } 3910 }
3911 3911
3912 if (sel <= max_sel_pel && pel <= max_sel_pel) 3912 if (sel <= max_sel_pel && pel <= max_sel_pel)
diff --git a/drivers/usb/musb/davinci.c b/drivers/usb/musb/davinci.c
index 768b4b55c816..9d63ba4d10d6 100644
--- a/drivers/usb/musb/davinci.c
+++ b/drivers/usb/musb/davinci.c
@@ -34,6 +34,7 @@
34#include <linux/dma-mapping.h> 34#include <linux/dma-mapping.h>
35 35
36#include <mach/cputype.h> 36#include <mach/cputype.h>
37#include <mach/hardware.h>
37 38
38#include <asm/mach-types.h> 39#include <asm/mach-types.h>
39 40
diff --git a/drivers/usb/musb/davinci.h b/drivers/usb/musb/davinci.h
index 046c84433cad..371baa0ee509 100644
--- a/drivers/usb/musb/davinci.h
+++ b/drivers/usb/musb/davinci.h
@@ -15,7 +15,7 @@
15 */ 15 */
16 16
17/* Integrated highspeed/otg PHY */ 17/* Integrated highspeed/otg PHY */
18#define USBPHY_CTL_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x34) 18#define USBPHY_CTL_PADDR 0x01c40034
19#define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */ 19#define USBPHY_DATAPOL BIT(11) /* (dm355) switch D+/D- */
20#define USBPHY_PHYCLKGD BIT(8) 20#define USBPHY_PHYCLKGD BIT(8)
21#define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */ 21#define USBPHY_SESNDEN BIT(7) /* v(sess_end) comparator */
@@ -27,7 +27,7 @@
27#define USBPHY_OTGPDWN BIT(1) 27#define USBPHY_OTGPDWN BIT(1)
28#define USBPHY_PHYPDWN BIT(0) 28#define USBPHY_PHYPDWN BIT(0)
29 29
30#define DM355_DEEPSLEEP_PADDR (DAVINCI_SYSTEM_MODULE_BASE + 0x48) 30#define DM355_DEEPSLEEP_PADDR 0x01c40048
31#define DRVVBUS_FORCE BIT(2) 31#define DRVVBUS_FORCE BIT(2)
32#define DRVVBUS_OVERRIDE BIT(1) 32#define DRVVBUS_OVERRIDE BIT(1)
33 33
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f42c29b11f71..95918dacc99a 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -1232,6 +1232,7 @@ static int musb_gadget_disable(struct usb_ep *ep)
1232 } 1232 }
1233 1233
1234 musb_ep->desc = NULL; 1234 musb_ep->desc = NULL;
1235 musb_ep->end_point.desc = NULL;
1235 1236
1236 /* abort all pending DMA and requests */ 1237 /* abort all pending DMA and requests */
1237 nuke(musb_ep, -ESHUTDOWN); 1238 nuke(musb_ep, -ESHUTDOWN);
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 1b1926200ba7..73d25cd8cba5 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -82,6 +82,7 @@ static const struct usb_device_id id_table[] = {
82 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ 82 { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */
83 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ 83 { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */
84 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ 84 { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */
85 { USB_DEVICE(0x10C4, 0x80C4) }, /* Cygnal Integrated Products, Inc., Optris infrared thermometer */
85 { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ 86 { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */
86 { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ 87 { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */
87 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ 88 { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 8c084ea34e26..bc912e5a3beb 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -737,6 +737,7 @@ static struct usb_device_id id_table_combined [] = {
737 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, 737 { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) },
738 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, 738 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) },
739 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, 739 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) },
740 { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) },
740 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, 741 { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) },
741 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, 742 { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) },
742 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, 743 { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index f3c7c78ede33..5661c7e2d415 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -784,6 +784,7 @@
784#define RTSYSTEMS_VID 0x2100 /* Vendor ID */ 784#define RTSYSTEMS_VID 0x2100 /* Vendor ID */
785#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ 785#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */
786#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ 786#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */
787#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */
787 788
788 789
789/* 790/*
diff --git a/drivers/usb/serial/generic.c b/drivers/usb/serial/generic.c
index 105a6d898ca4..9b026bf7afef 100644
--- a/drivers/usb/serial/generic.c
+++ b/drivers/usb/serial/generic.c
@@ -39,13 +39,6 @@ MODULE_PARM_DESC(product, "User specified USB idProduct");
39 39
40static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */ 40static struct usb_device_id generic_device_ids[2]; /* Initially all zeroes. */
41 41
42/* we want to look at all devices, as the vendor/product id can change
43 * depending on the command line argument */
44static const struct usb_device_id generic_serial_ids[] = {
45 {.driver_info = 42},
46 {}
47};
48
49/* All of the device info needed for the Generic Serial Converter */ 42/* All of the device info needed for the Generic Serial Converter */
50struct usb_serial_driver usb_serial_generic_device = { 43struct usb_serial_driver usb_serial_generic_device = {
51 .driver = { 44 .driver = {
@@ -79,7 +72,8 @@ int usb_serial_generic_register(int _debug)
79 USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT; 72 USB_DEVICE_ID_MATCH_VENDOR | USB_DEVICE_ID_MATCH_PRODUCT;
80 73
81 /* register our generic driver with ourselves */ 74 /* register our generic driver with ourselves */
82 retval = usb_serial_register_drivers(serial_drivers, "usbserial_generic", generic_serial_ids); 75 retval = usb_serial_register_drivers(serial_drivers,
76 "usbserial_generic", generic_device_ids);
83#endif 77#endif
84 return retval; 78 return retval;
85} 79}
diff --git a/drivers/usb/serial/mct_u232.c b/drivers/usb/serial/mct_u232.c
index d0ec1aa52719..a71fa0aa0406 100644
--- a/drivers/usb/serial/mct_u232.c
+++ b/drivers/usb/serial/mct_u232.c
@@ -309,13 +309,16 @@ static int mct_u232_set_modem_ctrl(struct usb_serial *serial,
309 MCT_U232_SET_REQUEST_TYPE, 309 MCT_U232_SET_REQUEST_TYPE,
310 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE, 310 0, 0, buf, MCT_U232_SET_MODEM_CTRL_SIZE,
311 WDR_TIMEOUT); 311 WDR_TIMEOUT);
312 if (rc < 0) 312 kfree(buf);
313 dev_err(&serial->dev->dev, 313
314 "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
315 dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr); 314 dbg("set_modem_ctrl: state=0x%x ==> mcr=0x%x", control_state, mcr);
316 315
317 kfree(buf); 316 if (rc < 0) {
318 return rc; 317 dev_err(&serial->dev->dev,
318 "Set MODEM CTRL 0x%x failed (error = %d)\n", mcr, rc);
319 return rc;
320 }
321 return 0;
319} /* mct_u232_set_modem_ctrl */ 322} /* mct_u232_set_modem_ctrl */
320 323
321static int mct_u232_get_modem_stat(struct usb_serial *serial, 324static int mct_u232_get_modem_stat(struct usb_serial *serial,
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index 29160f8b5101..57eca2448424 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -190,7 +190,7 @@
190 190
191static int device_type; 191static int device_type;
192 192
193static const struct usb_device_id id_table[] __devinitconst = { 193static const struct usb_device_id id_table[] = {
194 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)}, 194 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7840)},
195 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)}, 195 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7820)},
196 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)}, 196 {USB_DEVICE(USB_VENDOR_ID_MOSCHIP, MOSCHIP_DEVICE_ID_7810)},
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 1aae9028cd0b..e668a2460bd4 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -47,6 +47,7 @@
47/* Function prototypes */ 47/* Function prototypes */
48static int option_probe(struct usb_serial *serial, 48static int option_probe(struct usb_serial *serial,
49 const struct usb_device_id *id); 49 const struct usb_device_id *id);
50static void option_release(struct usb_serial *serial);
50static int option_send_setup(struct usb_serial_port *port); 51static int option_send_setup(struct usb_serial_port *port);
51static void option_instat_callback(struct urb *urb); 52static void option_instat_callback(struct urb *urb);
52 53
@@ -150,6 +151,7 @@ static void option_instat_callback(struct urb *urb);
150#define HUAWEI_PRODUCT_E14AC 0x14AC 151#define HUAWEI_PRODUCT_E14AC 0x14AC
151#define HUAWEI_PRODUCT_K3806 0x14AE 152#define HUAWEI_PRODUCT_K3806 0x14AE
152#define HUAWEI_PRODUCT_K4605 0x14C6 153#define HUAWEI_PRODUCT_K4605 0x14C6
154#define HUAWEI_PRODUCT_K5005 0x14C8
153#define HUAWEI_PRODUCT_K3770 0x14C9 155#define HUAWEI_PRODUCT_K3770 0x14C9
154#define HUAWEI_PRODUCT_K3771 0x14CA 156#define HUAWEI_PRODUCT_K3771 0x14CA
155#define HUAWEI_PRODUCT_K4510 0x14CB 157#define HUAWEI_PRODUCT_K4510 0x14CB
@@ -425,7 +427,7 @@ static void option_instat_callback(struct urb *urb);
425#define SAMSUNG_VENDOR_ID 0x04e8 427#define SAMSUNG_VENDOR_ID 0x04e8
426#define SAMSUNG_PRODUCT_GT_B3730 0x6889 428#define SAMSUNG_PRODUCT_GT_B3730 0x6889
427 429
428/* YUGA products www.yuga-info.com*/ 430/* YUGA products www.yuga-info.com gavin.kx@qq.com */
429#define YUGA_VENDOR_ID 0x257A 431#define YUGA_VENDOR_ID 0x257A
430#define YUGA_PRODUCT_CEM600 0x1601 432#define YUGA_PRODUCT_CEM600 0x1601
431#define YUGA_PRODUCT_CEM610 0x1602 433#define YUGA_PRODUCT_CEM610 0x1602
@@ -442,6 +444,8 @@ static void option_instat_callback(struct urb *urb);
442#define YUGA_PRODUCT_CEU516 0x160C 444#define YUGA_PRODUCT_CEU516 0x160C
443#define YUGA_PRODUCT_CEU528 0x160D 445#define YUGA_PRODUCT_CEU528 0x160D
444#define YUGA_PRODUCT_CEU526 0x160F 446#define YUGA_PRODUCT_CEU526 0x160F
447#define YUGA_PRODUCT_CEU881 0x161F
448#define YUGA_PRODUCT_CEU882 0x162F
445 449
446#define YUGA_PRODUCT_CWM600 0x2601 450#define YUGA_PRODUCT_CWM600 0x2601
447#define YUGA_PRODUCT_CWM610 0x2602 451#define YUGA_PRODUCT_CWM610 0x2602
@@ -457,23 +461,26 @@ static void option_instat_callback(struct urb *urb);
457#define YUGA_PRODUCT_CWU518 0x260B 461#define YUGA_PRODUCT_CWU518 0x260B
458#define YUGA_PRODUCT_CWU516 0x260C 462#define YUGA_PRODUCT_CWU516 0x260C
459#define YUGA_PRODUCT_CWU528 0x260D 463#define YUGA_PRODUCT_CWU528 0x260D
464#define YUGA_PRODUCT_CWU581 0x260E
460#define YUGA_PRODUCT_CWU526 0x260F 465#define YUGA_PRODUCT_CWU526 0x260F
461 466#define YUGA_PRODUCT_CWU582 0x261F
462#define YUGA_PRODUCT_CLM600 0x2601 467#define YUGA_PRODUCT_CWU583 0x262F
463#define YUGA_PRODUCT_CLM610 0x2602 468
464#define YUGA_PRODUCT_CLM500 0x2603 469#define YUGA_PRODUCT_CLM600 0x3601
465#define YUGA_PRODUCT_CLM510 0x2604 470#define YUGA_PRODUCT_CLM610 0x3602
466#define YUGA_PRODUCT_CLM800 0x2605 471#define YUGA_PRODUCT_CLM500 0x3603
467#define YUGA_PRODUCT_CLM900 0x2606 472#define YUGA_PRODUCT_CLM510 0x3604
468 473#define YUGA_PRODUCT_CLM800 0x3605
469#define YUGA_PRODUCT_CLU718 0x2607 474#define YUGA_PRODUCT_CLM900 0x3606
470#define YUGA_PRODUCT_CLU716 0x2608 475
471#define YUGA_PRODUCT_CLU728 0x2609 476#define YUGA_PRODUCT_CLU718 0x3607
472#define YUGA_PRODUCT_CLU726 0x260A 477#define YUGA_PRODUCT_CLU716 0x3608
473#define YUGA_PRODUCT_CLU518 0x260B 478#define YUGA_PRODUCT_CLU728 0x3609
474#define YUGA_PRODUCT_CLU516 0x260C 479#define YUGA_PRODUCT_CLU726 0x360A
475#define YUGA_PRODUCT_CLU528 0x260D 480#define YUGA_PRODUCT_CLU518 0x360B
476#define YUGA_PRODUCT_CLU526 0x260F 481#define YUGA_PRODUCT_CLU516 0x360C
482#define YUGA_PRODUCT_CLU528 0x360D
483#define YUGA_PRODUCT_CLU526 0x360F
477 484
478/* Viettel products */ 485/* Viettel products */
479#define VIETTEL_VENDOR_ID 0x2262 486#define VIETTEL_VENDOR_ID 0x2262
@@ -666,6 +673,11 @@ static const struct usb_device_id option_ids[] = {
666 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) }, 673 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
667 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), 674 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
668 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, 675 .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
676 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
677 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
678 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) },
679 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) },
680 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) },
669 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) }, 681 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
670 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) }, 682 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
671 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) }, 683 { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
@@ -1209,6 +1221,11 @@ static const struct usb_device_id option_ids[] = {
1209 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) }, 1221 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU516) },
1210 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) }, 1222 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU528) },
1211 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) }, 1223 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CLU526) },
1224 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU881) },
1225 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEU882) },
1226 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU581) },
1227 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU582) },
1228 { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CWU583) },
1212 { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) }, 1229 { USB_DEVICE_AND_INTERFACE_INFO(VIETTEL_VENDOR_ID, VIETTEL_PRODUCT_VT1000, 0xff, 0xff, 0xff) },
1213 { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) }, 1230 { USB_DEVICE_AND_INTERFACE_INFO(ZD_VENDOR_ID, ZD_PRODUCT_7000, 0xff, 0xff, 0xff) },
1214 { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */ 1231 { USB_DEVICE(LG_VENDOR_ID, LG_PRODUCT_L02C) }, /* docomo L-02C modem */
@@ -1245,7 +1262,7 @@ static struct usb_serial_driver option_1port_device = {
1245 .ioctl = usb_wwan_ioctl, 1262 .ioctl = usb_wwan_ioctl,
1246 .attach = usb_wwan_startup, 1263 .attach = usb_wwan_startup,
1247 .disconnect = usb_wwan_disconnect, 1264 .disconnect = usb_wwan_disconnect,
1248 .release = usb_wwan_release, 1265 .release = option_release,
1249 .read_int_callback = option_instat_callback, 1266 .read_int_callback = option_instat_callback,
1250#ifdef CONFIG_PM 1267#ifdef CONFIG_PM
1251 .suspend = usb_wwan_suspend, 1268 .suspend = usb_wwan_suspend,
@@ -1259,35 +1276,6 @@ static struct usb_serial_driver * const serial_drivers[] = {
1259 1276
1260static bool debug; 1277static bool debug;
1261 1278
1262/* per port private data */
1263
1264#define N_IN_URB 4
1265#define N_OUT_URB 4
1266#define IN_BUFLEN 4096
1267#define OUT_BUFLEN 4096
1268
1269struct option_port_private {
1270 /* Input endpoints and buffer for this port */
1271 struct urb *in_urbs[N_IN_URB];
1272 u8 *in_buffer[N_IN_URB];
1273 /* Output endpoints and buffer for this port */
1274 struct urb *out_urbs[N_OUT_URB];
1275 u8 *out_buffer[N_OUT_URB];
1276 unsigned long out_busy; /* Bit vector of URBs in use */
1277 int opened;
1278 struct usb_anchor delayed;
1279
1280 /* Settings for the port */
1281 int rts_state; /* Handshaking pins (outputs) */
1282 int dtr_state;
1283 int cts_state; /* Handshaking pins (inputs) */
1284 int dsr_state;
1285 int dcd_state;
1286 int ri_state;
1287
1288 unsigned long tx_start_time[N_OUT_URB];
1289};
1290
1291module_usb_serial_driver(serial_drivers, option_ids); 1279module_usb_serial_driver(serial_drivers, option_ids);
1292 1280
1293static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason, 1281static bool is_blacklisted(const u8 ifnum, enum option_blacklist_reason reason,
@@ -1356,12 +1344,22 @@ static int option_probe(struct usb_serial *serial,
1356 return 0; 1344 return 0;
1357} 1345}
1358 1346
1347static void option_release(struct usb_serial *serial)
1348{
1349 struct usb_wwan_intf_private *priv = usb_get_serial_data(serial);
1350
1351 usb_wwan_release(serial);
1352
1353 kfree(priv);
1354}
1355
1359static void option_instat_callback(struct urb *urb) 1356static void option_instat_callback(struct urb *urb)
1360{ 1357{
1361 int err; 1358 int err;
1362 int status = urb->status; 1359 int status = urb->status;
1363 struct usb_serial_port *port = urb->context; 1360 struct usb_serial_port *port = urb->context;
1364 struct option_port_private *portdata = usb_get_serial_port_data(port); 1361 struct usb_wwan_port_private *portdata =
1362 usb_get_serial_port_data(port);
1365 1363
1366 dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata); 1364 dbg("%s: urb %p port %p has data %p", __func__, urb, port, portdata);
1367 1365
@@ -1421,7 +1419,7 @@ static int option_send_setup(struct usb_serial_port *port)
1421 struct usb_serial *serial = port->serial; 1419 struct usb_serial *serial = port->serial;
1422 struct usb_wwan_intf_private *intfdata = 1420 struct usb_wwan_intf_private *intfdata =
1423 (struct usb_wwan_intf_private *) serial->private; 1421 (struct usb_wwan_intf_private *) serial->private;
1424 struct option_port_private *portdata; 1422 struct usb_wwan_port_private *portdata;
1425 int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber; 1423 int ifNum = serial->interface->cur_altsetting->desc.bInterfaceNumber;
1426 int val = 0; 1424 int val = 0;
1427 1425
diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c
index 0d5fe59ebb9e..996015c5f1ac 100644
--- a/drivers/usb/serial/qcserial.c
+++ b/drivers/usb/serial/qcserial.c
@@ -105,7 +105,13 @@ static const struct usb_device_id id_table[] = {
105 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */ 105 {USB_DEVICE(0x1410, 0xa021)}, /* Novatel Gobi 3000 Composite */
106 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */ 106 {USB_DEVICE(0x413c, 0x8193)}, /* Dell Gobi 3000 QDL */
107 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */ 107 {USB_DEVICE(0x413c, 0x8194)}, /* Dell Gobi 3000 Composite */
108 {USB_DEVICE(0x1199, 0x9010)}, /* Sierra Wireless Gobi 3000 QDL */
109 {USB_DEVICE(0x1199, 0x9012)}, /* Sierra Wireless Gobi 3000 QDL */
108 {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */ 110 {USB_DEVICE(0x1199, 0x9013)}, /* Sierra Wireless Gobi 3000 Modem device (MC8355) */
111 {USB_DEVICE(0x1199, 0x9014)}, /* Sierra Wireless Gobi 3000 QDL */
112 {USB_DEVICE(0x1199, 0x9015)}, /* Sierra Wireless Gobi 3000 Modem device */
113 {USB_DEVICE(0x1199, 0x9018)}, /* Sierra Wireless Gobi 3000 QDL */
114 {USB_DEVICE(0x1199, 0x9019)}, /* Sierra Wireless Gobi 3000 Modem device */
109 {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */ 115 {USB_DEVICE(0x12D1, 0x14F0)}, /* Sony Gobi 3000 QDL */
110 {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */ 116 {USB_DEVICE(0x12D1, 0x14F1)}, /* Sony Gobi 3000 Composite */
111 { } /* Terminating entry */ 117 { } /* Terminating entry */
diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c
index ba54a0a8235c..d423d36acc04 100644
--- a/drivers/usb/serial/sierra.c
+++ b/drivers/usb/serial/sierra.c
@@ -294,6 +294,10 @@ static const struct usb_device_id id_table[] = {
294 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ 294 { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */
295 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 295 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
296 }, 296 },
297 /* AT&T Direct IP LTE modems */
298 { USB_DEVICE_AND_INTERFACE_INFO(0x0F3D, 0x68AA, 0xFF, 0xFF, 0xFF),
299 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
300 },
297 { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */ 301 { USB_DEVICE(0x0f3d, 0x68A3), /* Airprime/Sierra Wireless Direct IP modems */
298 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist 302 .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist
299 }, 303 },
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index 6a1b609a0d94..27483f91a4a3 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -659,12 +659,14 @@ exit:
659static struct usb_serial_driver *search_serial_device( 659static struct usb_serial_driver *search_serial_device(
660 struct usb_interface *iface) 660 struct usb_interface *iface)
661{ 661{
662 const struct usb_device_id *id; 662 const struct usb_device_id *id = NULL;
663 struct usb_serial_driver *drv; 663 struct usb_serial_driver *drv;
664 struct usb_driver *driver = to_usb_driver(iface->dev.driver);
664 665
665 /* Check if the usb id matches a known device */ 666 /* Check if the usb id matches a known device */
666 list_for_each_entry(drv, &usb_serial_driver_list, driver_list) { 667 list_for_each_entry(drv, &usb_serial_driver_list, driver_list) {
667 id = get_iface_id(drv, iface); 668 if (drv->usb_driver == driver)
669 id = get_iface_id(drv, iface);
668 if (id) 670 if (id)
669 return drv; 671 return drv;
670 } 672 }
@@ -755,7 +757,7 @@ static int usb_serial_probe(struct usb_interface *interface,
755 757
756 if (retval) { 758 if (retval) {
757 dbg("sub driver rejected device"); 759 dbg("sub driver rejected device");
758 kfree(serial); 760 usb_serial_put(serial);
759 module_put(type->driver.owner); 761 module_put(type->driver.owner);
760 return retval; 762 return retval;
761 } 763 }
@@ -827,7 +829,7 @@ static int usb_serial_probe(struct usb_interface *interface,
827 */ 829 */
828 if (num_bulk_in == 0 || num_bulk_out == 0) { 830 if (num_bulk_in == 0 || num_bulk_out == 0) {
829 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); 831 dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n");
830 kfree(serial); 832 usb_serial_put(serial);
831 module_put(type->driver.owner); 833 module_put(type->driver.owner);
832 return -ENODEV; 834 return -ENODEV;
833 } 835 }
@@ -841,7 +843,7 @@ static int usb_serial_probe(struct usb_interface *interface,
841 if (num_ports == 0) { 843 if (num_ports == 0) {
842 dev_err(&interface->dev, 844 dev_err(&interface->dev,
843 "Generic device with no bulk out, not allowed.\n"); 845 "Generic device with no bulk out, not allowed.\n");
844 kfree(serial); 846 usb_serial_put(serial);
845 module_put(type->driver.owner); 847 module_put(type->driver.owner);
846 return -EIO; 848 return -EIO;
847 } 849 }
diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h
index 1719886bb9be..caf22bf5f822 100644
--- a/drivers/usb/storage/unusual_devs.h
+++ b/drivers/usb/storage/unusual_devs.h
@@ -1107,6 +1107,13 @@ UNUSUAL_DEV( 0x090a, 0x1200, 0x0000, 0x9999,
1107 USB_SC_RBC, USB_PR_BULK, NULL, 1107 USB_SC_RBC, USB_PR_BULK, NULL,
1108 0 ), 1108 0 ),
1109 1109
1110/* Feiya QDI U2 DISK, reported by Hans de Goede <hdegoede@redhat.com> */
1111UNUSUAL_DEV( 0x090c, 0x1000, 0x0000, 0xffff,
1112 "Feiya",
1113 "QDI U2 DISK",
1114 USB_SC_DEVICE, USB_PR_DEVICE, NULL,
1115 US_FL_NO_READ_CAPACITY_16 ),
1116
1110/* aeb */ 1117/* aeb */
1111UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff, 1118UNUSUAL_DEV( 0x090c, 0x1132, 0x0000, 0xffff,
1112 "Feiya", 1119 "Feiya",
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index a290be51a1f4..0217f7415ef5 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -2210,7 +2210,7 @@ config FB_XILINX
2210 2210
2211config FB_COBALT 2211config FB_COBALT
2212 tristate "Cobalt server LCD frame buffer support" 2212 tristate "Cobalt server LCD frame buffer support"
2213 depends on FB && MIPS_COBALT 2213 depends on FB && (MIPS_COBALT || MIPS_SEAD3)
2214 2214
2215config FB_SH7760 2215config FB_SH7760
2216 bool "SH7760/SH7763/SH7720/SH7721 LCDC support" 2216 bool "SH7760/SH7763/SH7720/SH7721 LCDC support"
@@ -2382,6 +2382,39 @@ config FB_BROADSHEET
2382 and could also have been called by other names when coupled with 2382 and could also have been called by other names when coupled with
2383 a bridge adapter. 2383 a bridge adapter.
2384 2384
2385config FB_AUO_K190X
2386 tristate "AUO-K190X EPD controller support"
2387 depends on FB
2388 select FB_SYS_FILLRECT
2389 select FB_SYS_COPYAREA
2390 select FB_SYS_IMAGEBLIT
2391 select FB_SYS_FOPS
2392 select FB_DEFERRED_IO
2393 help
2394 Provides support for epaper controllers from the K190X series
2395 of AUO. These controllers can be used to drive epaper displays
2396 from Sipix.
2397
2398 This option enables the common support, shared by the individual
2399 controller drivers. You will also have to enable the driver
2400 for the controller type used in your device.
2401
2402config FB_AUO_K1900
2403 tristate "AUO-K1900 EPD controller support"
2404 depends on FB && FB_AUO_K190X
2405 help
2406 This driver implements support for the AUO K1900 epd-controller.
2407 This controller can drive Sipix epaper displays but can only do
2408 serial updates, reducing the number of possible frames per second.
2409
2410config FB_AUO_K1901
2411 tristate "AUO-K1901 EPD controller support"
2412 depends on FB && FB_AUO_K190X
2413 help
2414 This driver implements support for the AUO K1901 epd-controller.
2415 This controller can drive Sipix epaper displays and supports
2416 concurrent updates, making higher frames per second possible.
2417
2385config FB_JZ4740 2418config FB_JZ4740
2386 tristate "JZ4740 LCD framebuffer support" 2419 tristate "JZ4740 LCD framebuffer support"
2387 depends on FB && MACH_JZ4740 2420 depends on FB && MACH_JZ4740
diff --git a/drivers/video/Makefile b/drivers/video/Makefile
index 9356add945b3..ee8dafb69e36 100644
--- a/drivers/video/Makefile
+++ b/drivers/video/Makefile
@@ -118,6 +118,9 @@ obj-$(CONFIG_FB_PMAGB_B) += pmagb-b-fb.o
118obj-$(CONFIG_FB_MAXINE) += maxinefb.o 118obj-$(CONFIG_FB_MAXINE) += maxinefb.o
119obj-$(CONFIG_FB_METRONOME) += metronomefb.o 119obj-$(CONFIG_FB_METRONOME) += metronomefb.o
120obj-$(CONFIG_FB_BROADSHEET) += broadsheetfb.o 120obj-$(CONFIG_FB_BROADSHEET) += broadsheetfb.o
121obj-$(CONFIG_FB_AUO_K190X) += auo_k190x.o
122obj-$(CONFIG_FB_AUO_K1900) += auo_k1900fb.o
123obj-$(CONFIG_FB_AUO_K1901) += auo_k1901fb.o
121obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o 124obj-$(CONFIG_FB_S1D13XXX) += s1d13xxxfb.o
122obj-$(CONFIG_FB_SH7760) += sh7760fb.o 125obj-$(CONFIG_FB_SH7760) += sh7760fb.o
123obj-$(CONFIG_FB_IMX) += imxfb.o 126obj-$(CONFIG_FB_IMX) += imxfb.o
diff --git a/drivers/video/auo_k1900fb.c b/drivers/video/auo_k1900fb.c
new file mode 100644
index 000000000000..c36cf961dcb2
--- /dev/null
+++ b/drivers/video/auo_k1900fb.c
@@ -0,0 +1,198 @@
1/*
2 * auok190xfb.c -- FB driver for AUO-K1900 controllers
3 *
4 * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
5 *
6 * based on broadsheetfb.c
7 *
8 * Copyright (C) 2008, Jaya Kumar
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
15 *
16 * This driver is written to be used with the AUO-K1900 display controller.
17 *
18 * It is intended to be architecture independent. A board specific driver
19 * must be used to perform all the physical IO interactions.
20 *
21 * The controller supports different update modes:
22 * mode0+1 16 step gray (4bit)
23 * mode2 4 step gray (2bit) - FIXME: add strange refresh
24 * mode3 2 step gray (1bit) - FIXME: add strange refresh
25 * mode4 handwriting mode (strange behaviour)
26 * mode5 automatic selection of update mode
27 */
28
29#include <linux/module.h>
30#include <linux/kernel.h>
31#include <linux/errno.h>
32#include <linux/string.h>
33#include <linux/mm.h>
34#include <linux/slab.h>
35#include <linux/delay.h>
36#include <linux/interrupt.h>
37#include <linux/fb.h>
38#include <linux/init.h>
39#include <linux/platform_device.h>
40#include <linux/list.h>
41#include <linux/firmware.h>
42#include <linux/gpio.h>
43#include <linux/pm_runtime.h>
44
45#include <video/auo_k190xfb.h>
46
47#include "auo_k190x.h"
48
49/*
50 * AUO-K1900 specific commands
51 */
52
53#define AUOK1900_CMD_PARTIALDISP 0x1001
54#define AUOK1900_CMD_ROTATION 0x1006
55#define AUOK1900_CMD_LUT_STOP 0x1009
56
57#define AUOK1900_INIT_TEMP_AVERAGE (1 << 13)
58#define AUOK1900_INIT_ROTATE(_x) ((_x & 0x3) << 10)
59#define AUOK1900_INIT_RESOLUTION(_res) ((_res & 0x7) << 2)
60
61static void auok1900_init(struct auok190xfb_par *par)
62{
63 struct auok190x_board *board = par->board;
64 u16 init_param = 0;
65
66 init_param |= AUOK1900_INIT_TEMP_AVERAGE;
67 init_param |= AUOK1900_INIT_ROTATE(par->rotation);
68 init_param |= AUOK190X_INIT_INVERSE_WHITE;
69 init_param |= AUOK190X_INIT_FORMAT0;
70 init_param |= AUOK1900_INIT_RESOLUTION(par->resolution);
71 init_param |= AUOK190X_INIT_SHIFT_RIGHT;
72
73 auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
74
75 /* let the controller finish */
76 board->wait_for_rdy(par);
77}
78
79static void auok1900_update_region(struct auok190xfb_par *par, int mode,
80 u16 y1, u16 y2)
81{
82 struct device *dev = par->info->device;
83 unsigned char *buf = (unsigned char *)par->info->screen_base;
84 int xres = par->info->var.xres;
85 u16 args[4];
86
87 pm_runtime_get_sync(dev);
88
89 mutex_lock(&(par->io_lock));
90
91 /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
92 y1 &= 0xfffe;
93 y2 &= 0xfffe;
94
95 dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
96 1, y1+1, xres, y2-y1, mode);
97
98 /* to FIX handle different partial update modes */
99 args[0] = mode | 1;
100 args[1] = y1 + 1;
101 args[2] = xres;
102 args[3] = y2 - y1;
103 buf += y1 * xres;
104 auok190x_send_cmdargs_pixels(par, AUOK1900_CMD_PARTIALDISP, 4, args,
105 ((y2 - y1) * xres)/2, (u16 *) buf);
106 auok190x_send_command(par, AUOK190X_CMD_DATA_STOP);
107
108 par->update_cnt++;
109
110 mutex_unlock(&(par->io_lock));
111
112 pm_runtime_mark_last_busy(dev);
113 pm_runtime_put_autosuspend(dev);
114}
115
116static void auok1900fb_dpy_update_pages(struct auok190xfb_par *par,
117 u16 y1, u16 y2)
118{
119 int mode;
120
121 if (par->update_mode < 0) {
122 mode = AUOK190X_UPDATE_MODE(1);
123 par->last_mode = -1;
124 } else {
125 mode = AUOK190X_UPDATE_MODE(par->update_mode);
126 par->last_mode = par->update_mode;
127 }
128
129 if (par->flash)
130 mode |= AUOK190X_UPDATE_NONFLASH;
131
132 auok1900_update_region(par, mode, y1, y2);
133}
134
135static void auok1900fb_dpy_update(struct auok190xfb_par *par)
136{
137 int mode;
138
139 if (par->update_mode < 0) {
140 mode = AUOK190X_UPDATE_MODE(0);
141 par->last_mode = -1;
142 } else {
143 mode = AUOK190X_UPDATE_MODE(par->update_mode);
144 par->last_mode = par->update_mode;
145 }
146
147 if (par->flash)
148 mode |= AUOK190X_UPDATE_NONFLASH;
149
150 auok1900_update_region(par, mode, 0, par->info->var.yres);
151 par->update_cnt = 0;
152}
153
154static bool auok1900fb_need_refresh(struct auok190xfb_par *par)
155{
156 return (par->update_cnt > 10);
157}
158
159static int __devinit auok1900fb_probe(struct platform_device *pdev)
160{
161 struct auok190x_init_data init;
162 struct auok190x_board *board;
163
164 /* pick up board specific routines */
165 board = pdev->dev.platform_data;
166 if (!board)
167 return -EINVAL;
168
169 /* fill temporary init struct for common init */
170 init.id = "auo_k1900fb";
171 init.board = board;
172 init.update_partial = auok1900fb_dpy_update_pages;
173 init.update_all = auok1900fb_dpy_update;
174 init.need_refresh = auok1900fb_need_refresh;
175 init.init = auok1900_init;
176
177 return auok190x_common_probe(pdev, &init);
178}
179
180static int __devexit auok1900fb_remove(struct platform_device *pdev)
181{
182 return auok190x_common_remove(pdev);
183}
184
185static struct platform_driver auok1900fb_driver = {
186 .probe = auok1900fb_probe,
187 .remove = __devexit_p(auok1900fb_remove),
188 .driver = {
189 .owner = THIS_MODULE,
190 .name = "auo_k1900fb",
191 .pm = &auok190x_pm,
192 },
193};
194module_platform_driver(auok1900fb_driver);
195
196MODULE_DESCRIPTION("framebuffer driver for the AUO-K1900 EPD controller");
197MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
198MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k1901fb.c b/drivers/video/auo_k1901fb.c
new file mode 100644
index 000000000000..1c054c18616e
--- /dev/null
+++ b/drivers/video/auo_k1901fb.c
@@ -0,0 +1,251 @@
1/*
2 * auok190xfb.c -- FB driver for AUO-K1901 controllers
3 *
4 * Copyright (C) 2011, 2012 Heiko Stuebner <heiko@sntech.de>
5 *
6 * based on broadsheetfb.c
7 *
8 * Copyright (C) 2008, Jaya Kumar
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * Layout is based on skeletonfb.c by James Simmons and Geert Uytterhoeven.
15 *
16 * This driver is written to be used with the AUO-K1901 display controller.
17 *
18 * It is intended to be architecture independent. A board specific driver
19 * must be used to perform all the physical IO interactions.
20 *
21 * The controller supports different update modes:
22 * mode0+1 16 step gray (4bit)
23 * mode2+3 4 step gray (2bit)
24 * mode4+5 2 step gray (1bit)
25 * - mode4 is described as "without LUT"
26 * mode7 automatic selection of update mode
27 *
28 * The most interesting difference to the K1900 is the ability to do screen
29 * updates in an asynchronous fashion. Where the K1900 needs to wait for the
30 * current update to complete, the K1901 can process later updates already.
31 */
32
33#include <linux/module.h>
34#include <linux/kernel.h>
35#include <linux/errno.h>
36#include <linux/string.h>
37#include <linux/mm.h>
38#include <linux/slab.h>
39#include <linux/delay.h>
40#include <linux/interrupt.h>
41#include <linux/fb.h>
42#include <linux/init.h>
43#include <linux/platform_device.h>
44#include <linux/list.h>
45#include <linux/firmware.h>
46#include <linux/gpio.h>
47#include <linux/pm_runtime.h>
48
49#include <video/auo_k190xfb.h>
50
51#include "auo_k190x.h"
52
53/*
54 * AUO-K1901 specific commands
55 */
56
57#define AUOK1901_CMD_LUT_INTERFACE 0x0005
58#define AUOK1901_CMD_DMA_START 0x1001
59#define AUOK1901_CMD_CURSOR_START 0x1007
60#define AUOK1901_CMD_CURSOR_STOP AUOK190X_CMD_DATA_STOP
61#define AUOK1901_CMD_DDMA_START 0x1009
62
63#define AUOK1901_INIT_GATE_PULSE_LOW (0 << 14)
64#define AUOK1901_INIT_GATE_PULSE_HIGH (1 << 14)
65#define AUOK1901_INIT_SINGLE_GATE (0 << 13)
66#define AUOK1901_INIT_DOUBLE_GATE (1 << 13)
67
68/* Bits to pixels
69 * Mode 15-12 11-8 7-4 3-0
70 * format2 2 T 1 T
71 * format3 1 T 2 T
72 * format4 T 2 T 1
73 * format5 T 1 T 2
74 *
75 * halftone modes:
76 * format6 2 2 1 1
77 * format7 1 1 2 2
78 */
79#define AUOK1901_INIT_FORMAT2 (1 << 7)
80#define AUOK1901_INIT_FORMAT3 ((1 << 7) | (1 << 6))
81#define AUOK1901_INIT_FORMAT4 (1 << 8)
82#define AUOK1901_INIT_FORMAT5 ((1 << 8) | (1 << 6))
83#define AUOK1901_INIT_FORMAT6 ((1 << 8) | (1 << 7))
84#define AUOK1901_INIT_FORMAT7 ((1 << 8) | (1 << 7) | (1 << 6))
85
86/* res[4] to bit 10
87 * res[3-0] to bits 5-2
88 */
89#define AUOK1901_INIT_RESOLUTION(_res) (((_res & (1 << 4)) << 6) \
90 | ((_res & 0xf) << 2))
91
92/*
93 * portrait / landscape orientation in AUOK1901_CMD_DMA_START
94 */
95#define AUOK1901_DMA_ROTATE90(_rot) ((_rot & 1) << 13)
96
97/*
98 * equivalent to 1 << 11, needs the ~ to have same rotation like K1900
99 */
100#define AUOK1901_DDMA_ROTATE180(_rot) ((~_rot & 2) << 10)
101
102static void auok1901_init(struct auok190xfb_par *par)
103{
104 struct auok190x_board *board = par->board;
105 u16 init_param = 0;
106
107 init_param |= AUOK190X_INIT_INVERSE_WHITE;
108 init_param |= AUOK190X_INIT_FORMAT0;
109 init_param |= AUOK1901_INIT_RESOLUTION(par->resolution);
110 init_param |= AUOK190X_INIT_SHIFT_LEFT;
111
112 auok190x_send_cmdargs(par, AUOK190X_CMD_INIT, 1, &init_param);
113
114 /* let the controller finish */
115 board->wait_for_rdy(par);
116}
117
118static void auok1901_update_region(struct auok190xfb_par *par, int mode,
119 u16 y1, u16 y2)
120{
121 struct device *dev = par->info->device;
122 unsigned char *buf = (unsigned char *)par->info->screen_base;
123 int xres = par->info->var.xres;
124 u16 args[5];
125
126 pm_runtime_get_sync(dev);
127
128 mutex_lock(&(par->io_lock));
129
130 /* y1 and y2 must be a multiple of 2 so drop the lowest bit */
131 y1 &= 0xfffe;
132 y2 &= 0xfffe;
133
134 dev_dbg(dev, "update (x,y,w,h,mode)=(%d,%d,%d,%d,%d)\n",
135 1, y1+1, xres, y2-y1, mode);
136
137 /* K1901: first transfer the region data */
138 args[0] = AUOK1901_DMA_ROTATE90(par->rotation) | 1;
139 args[1] = y1 + 1;
140 args[2] = xres;
141 args[3] = y2 - y1;
142 buf += y1 * xres;
143 auok190x_send_cmdargs_pixels_nowait(par, AUOK1901_CMD_DMA_START, 4,
144 args, ((y2 - y1) * xres)/2,
145 (u16 *) buf);
146 auok190x_send_command_nowait(par, AUOK190X_CMD_DATA_STOP);
147
148 /* K1901: second tell the controller to update the region with mode */
149 args[0] = mode | AUOK1901_DDMA_ROTATE180(par->rotation);
150 args[1] = 1;
151 args[2] = y1 + 1;
152 args[3] = xres;
153 args[4] = y2 - y1;
154 auok190x_send_cmdargs_nowait(par, AUOK1901_CMD_DDMA_START, 5, args);
155
156 par->update_cnt++;
157
158 mutex_unlock(&(par->io_lock));
159
160 pm_runtime_mark_last_busy(dev);
161 pm_runtime_put_autosuspend(dev);
162}
163
164static void auok1901fb_dpy_update_pages(struct auok190xfb_par *par,
165 u16 y1, u16 y2)
166{
167 int mode;
168
169 if (par->update_mode < 0) {
170 mode = AUOK190X_UPDATE_MODE(1);
171 par->last_mode = -1;
172 } else {
173 mode = AUOK190X_UPDATE_MODE(par->update_mode);
174 par->last_mode = par->update_mode;
175 }
176
177 if (par->flash)
178 mode |= AUOK190X_UPDATE_NONFLASH;
179
180 auok1901_update_region(par, mode, y1, y2);
181}
182
183static void auok1901fb_dpy_update(struct auok190xfb_par *par)
184{
185 int mode;
186
187 /* When doing full updates, wait for the controller to be ready
188 * This will hopefully catch some hangs of the K1901
189 */
190 par->board->wait_for_rdy(par);
191
192 if (par->update_mode < 0) {
193 mode = AUOK190X_UPDATE_MODE(0);
194 par->last_mode = -1;
195 } else {
196 mode = AUOK190X_UPDATE_MODE(par->update_mode);
197 par->last_mode = par->update_mode;
198 }
199
200 if (par->flash)
201 mode |= AUOK190X_UPDATE_NONFLASH;
202
203 auok1901_update_region(par, mode, 0, par->info->var.yres);
204 par->update_cnt = 0;
205}
206
207static bool auok1901fb_need_refresh(struct auok190xfb_par *par)
208{
209 return (par->update_cnt > 10);
210}
211
212static int __devinit auok1901fb_probe(struct platform_device *pdev)
213{
214 struct auok190x_init_data init;
215 struct auok190x_board *board;
216
217 /* pick up board specific routines */
218 board = pdev->dev.platform_data;
219 if (!board)
220 return -EINVAL;
221
222 /* fill temporary init struct for common init */
223 init.id = "auo_k1901fb";
224 init.board = board;
225 init.update_partial = auok1901fb_dpy_update_pages;
226 init.update_all = auok1901fb_dpy_update;
227 init.need_refresh = auok1901fb_need_refresh;
228 init.init = auok1901_init;
229
230 return auok190x_common_probe(pdev, &init);
231}
232
233static int __devexit auok1901fb_remove(struct platform_device *pdev)
234{
235 return auok190x_common_remove(pdev);
236}
237
238static struct platform_driver auok1901fb_driver = {
239 .probe = auok1901fb_probe,
240 .remove = __devexit_p(auok1901fb_remove),
241 .driver = {
242 .owner = THIS_MODULE,
243 .name = "auo_k1901fb",
244 .pm = &auok190x_pm,
245 },
246};
247module_platform_driver(auok1901fb_driver);
248
249MODULE_DESCRIPTION("framebuffer driver for the AUO-K1901 EPD controller");
250MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
251MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.c b/drivers/video/auo_k190x.c
new file mode 100644
index 000000000000..77da6a2f43dc
--- /dev/null
+++ b/drivers/video/auo_k190x.c
@@ -0,0 +1,1046 @@
1/*
2 * Common code for AUO-K190X framebuffer drivers
3 *
4 * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/module.h>
12#include <linux/kernel.h>
13#include <linux/gpio.h>
14#include <linux/pm_runtime.h>
15#include <linux/fb.h>
16#include <linux/delay.h>
17#include <linux/uaccess.h>
18#include <linux/vmalloc.h>
19#include <linux/regulator/consumer.h>
20
21#include <video/auo_k190xfb.h>
22
23#include "auo_k190x.h"
24
25struct panel_info {
26 int w;
27 int h;
28};
29
30/* table of panel specific parameters to be indexed into by the board drivers */
31static struct panel_info panel_table[] = {
32 /* standard 6" */
33 [AUOK190X_RESOLUTION_800_600] = {
34 .w = 800,
35 .h = 600,
36 },
37 /* standard 9" */
38 [AUOK190X_RESOLUTION_1024_768] = {
39 .w = 1024,
40 .h = 768,
41 },
42};
43
44/*
45 * private I80 interface to the board driver
46 */
47
48static void auok190x_issue_data(struct auok190xfb_par *par, u16 data)
49{
50 par->board->set_ctl(par, AUOK190X_I80_WR, 0);
51 par->board->set_hdb(par, data);
52 par->board->set_ctl(par, AUOK190X_I80_WR, 1);
53}
54
55static void auok190x_issue_cmd(struct auok190xfb_par *par, u16 data)
56{
57 par->board->set_ctl(par, AUOK190X_I80_DC, 0);
58 auok190x_issue_data(par, data);
59 par->board->set_ctl(par, AUOK190X_I80_DC, 1);
60}
61
62static int auok190x_issue_pixels(struct auok190xfb_par *par, int size,
63 u16 *data)
64{
65 struct device *dev = par->info->device;
66 int i;
67 u16 tmp;
68
69 if (size & 3) {
70 dev_err(dev, "issue_pixels: size %d must be a multiple of 4\n",
71 size);
72 return -EINVAL;
73 }
74
75 for (i = 0; i < (size >> 1); i++) {
76 par->board->set_ctl(par, AUOK190X_I80_WR, 0);
77
78 /* simple reduction of 8bit staticgray to 4bit gray
79 * combines 4 * 4bit pixel values into a 16bit value
80 */
81 tmp = (data[2*i] & 0xF0) >> 4;
82 tmp |= (data[2*i] & 0xF000) >> 8;
83 tmp |= (data[2*i+1] & 0xF0) << 4;
84 tmp |= (data[2*i+1] & 0xF000);
85
86 par->board->set_hdb(par, tmp);
87 par->board->set_ctl(par, AUOK190X_I80_WR, 1);
88 }
89
90 return 0;
91}
92
93static u16 auok190x_read_data(struct auok190xfb_par *par)
94{
95 u16 data;
96
97 par->board->set_ctl(par, AUOK190X_I80_OE, 0);
98 data = par->board->get_hdb(par);
99 par->board->set_ctl(par, AUOK190X_I80_OE, 1);
100
101 return data;
102}
103
104/*
105 * Command interface for the controller drivers
106 */
107
108void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data)
109{
110 par->board->set_ctl(par, AUOK190X_I80_CS, 0);
111 auok190x_issue_cmd(par, data);
112 par->board->set_ctl(par, AUOK190X_I80_CS, 1);
113}
114EXPORT_SYMBOL_GPL(auok190x_send_command_nowait);
115
116void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
117 int argc, u16 *argv)
118{
119 int i;
120
121 par->board->set_ctl(par, AUOK190X_I80_CS, 0);
122 auok190x_issue_cmd(par, cmd);
123
124 for (i = 0; i < argc; i++)
125 auok190x_issue_data(par, argv[i]);
126 par->board->set_ctl(par, AUOK190X_I80_CS, 1);
127}
128EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_nowait);
129
130int auok190x_send_command(struct auok190xfb_par *par, u16 data)
131{
132 int ret;
133
134 ret = par->board->wait_for_rdy(par);
135 if (ret)
136 return ret;
137
138 auok190x_send_command_nowait(par, data);
139 return 0;
140}
141EXPORT_SYMBOL_GPL(auok190x_send_command);
142
143int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
144 int argc, u16 *argv)
145{
146 int ret;
147
148 ret = par->board->wait_for_rdy(par);
149 if (ret)
150 return ret;
151
152 auok190x_send_cmdargs_nowait(par, cmd, argc, argv);
153 return 0;
154}
155EXPORT_SYMBOL_GPL(auok190x_send_cmdargs);
156
157int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
158 int argc, u16 *argv)
159{
160 int i, ret;
161
162 ret = par->board->wait_for_rdy(par);
163 if (ret)
164 return ret;
165
166 par->board->set_ctl(par, AUOK190X_I80_CS, 0);
167 auok190x_issue_cmd(par, cmd);
168
169 for (i = 0; i < argc; i++)
170 argv[i] = auok190x_read_data(par);
171 par->board->set_ctl(par, AUOK190X_I80_CS, 1);
172
173 return 0;
174}
175EXPORT_SYMBOL_GPL(auok190x_read_cmdargs);
176
177void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par, u16 cmd,
178 int argc, u16 *argv, int size, u16 *data)
179{
180 int i;
181
182 par->board->set_ctl(par, AUOK190X_I80_CS, 0);
183
184 auok190x_issue_cmd(par, cmd);
185
186 for (i = 0; i < argc; i++)
187 auok190x_issue_data(par, argv[i]);
188
189 auok190x_issue_pixels(par, size, data);
190
191 par->board->set_ctl(par, AUOK190X_I80_CS, 1);
192}
193EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels_nowait);
194
195int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
196 int argc, u16 *argv, int size, u16 *data)
197{
198 int ret;
199
200 ret = par->board->wait_for_rdy(par);
201 if (ret)
202 return ret;
203
204 auok190x_send_cmdargs_pixels_nowait(par, cmd, argc, argv, size, data);
205
206 return 0;
207}
208EXPORT_SYMBOL_GPL(auok190x_send_cmdargs_pixels);
209
210/*
211 * fbdefio callbacks - common on both controllers.
212 */
213
214static void auok190xfb_dpy_first_io(struct fb_info *info)
215{
216 /* tell runtime-pm that we wish to use the device in a short time */
217 pm_runtime_get(info->device);
218}
219
220/* this is called back from the deferred io workqueue */
221static void auok190xfb_dpy_deferred_io(struct fb_info *info,
222 struct list_head *pagelist)
223{
224 struct fb_deferred_io *fbdefio = info->fbdefio;
225 struct auok190xfb_par *par = info->par;
226 u16 yres = info->var.yres;
227 u16 xres = info->var.xres;
228 u16 y1 = 0, h = 0;
229 int prev_index = -1;
230 struct page *cur;
231 int h_inc;
232 int threshold;
233
234 if (!list_empty(pagelist))
235 /* the device resume should've been requested through first_io,
236 * if the resume did not finish until now, wait for it.
237 */
238 pm_runtime_barrier(info->device);
239 else
240 /* We reached this via the fsync or some other way.
241 * In either case the first_io function did not run,
242 * so we runtime_resume the device here synchronously.
243 */
244 pm_runtime_get_sync(info->device);
245
246 /* Do a full screen update every n updates to prevent
247 * excessive darkening of the Sipix display.
248 * If we do this, there is no need to walk the pages.
249 */
250 if (par->need_refresh(par)) {
251 par->update_all(par);
252 goto out;
253 }
254
255 /* height increment is fixed per page */
256 h_inc = DIV_ROUND_UP(PAGE_SIZE , xres);
257
258 /* calculate number of pages from pixel height */
259 threshold = par->consecutive_threshold / h_inc;
260 if (threshold < 1)
261 threshold = 1;
262
263 /* walk the written page list and swizzle the data */
264 list_for_each_entry(cur, &fbdefio->pagelist, lru) {
265 if (prev_index < 0) {
266 /* just starting so assign first page */
267 y1 = (cur->index << PAGE_SHIFT) / xres;
268 h = h_inc;
269 } else if ((cur->index - prev_index) <= threshold) {
270 /* page is within our threshold for single updates */
271 h += h_inc * (cur->index - prev_index);
272 } else {
273 /* page not consecutive, issue previous update first */
274 par->update_partial(par, y1, y1 + h);
275
276 /* start over with our non consecutive page */
277 y1 = (cur->index << PAGE_SHIFT) / xres;
278 h = h_inc;
279 }
280 prev_index = cur->index;
281 }
282
283 /* if we still have any pages to update we do so now */
284 if (h >= yres)
285 /* its a full screen update, just do it */
286 par->update_all(par);
287 else
288 par->update_partial(par, y1, min((u16) (y1 + h), yres));
289
290out:
291 pm_runtime_mark_last_busy(info->device);
292 pm_runtime_put_autosuspend(info->device);
293}
294
295/*
296 * framebuffer operations
297 */
298
299/*
300 * this is the slow path from userspace. they can seek and write to
301 * the fb. it's inefficient to do anything less than a full screen draw
302 */
303static ssize_t auok190xfb_write(struct fb_info *info, const char __user *buf,
304 size_t count, loff_t *ppos)
305{
306 struct auok190xfb_par *par = info->par;
307 unsigned long p = *ppos;
308 void *dst;
309 int err = 0;
310 unsigned long total_size;
311
312 if (info->state != FBINFO_STATE_RUNNING)
313 return -EPERM;
314
315 total_size = info->fix.smem_len;
316
317 if (p > total_size)
318 return -EFBIG;
319
320 if (count > total_size) {
321 err = -EFBIG;
322 count = total_size;
323 }
324
325 if (count + p > total_size) {
326 if (!err)
327 err = -ENOSPC;
328
329 count = total_size - p;
330 }
331
332 dst = (void *)(info->screen_base + p);
333
334 if (copy_from_user(dst, buf, count))
335 err = -EFAULT;
336
337 if (!err)
338 *ppos += count;
339
340 par->update_all(par);
341
342 return (err) ? err : count;
343}
344
345static void auok190xfb_fillrect(struct fb_info *info,
346 const struct fb_fillrect *rect)
347{
348 struct auok190xfb_par *par = info->par;
349
350 sys_fillrect(info, rect);
351
352 par->update_all(par);
353}
354
355static void auok190xfb_copyarea(struct fb_info *info,
356 const struct fb_copyarea *area)
357{
358 struct auok190xfb_par *par = info->par;
359
360 sys_copyarea(info, area);
361
362 par->update_all(par);
363}
364
365static void auok190xfb_imageblit(struct fb_info *info,
366 const struct fb_image *image)
367{
368 struct auok190xfb_par *par = info->par;
369
370 sys_imageblit(info, image);
371
372 par->update_all(par);
373}
374
375static int auok190xfb_check_var(struct fb_var_screeninfo *var,
376 struct fb_info *info)
377{
378 if (info->var.xres != var->xres || info->var.yres != var->yres ||
379 info->var.xres_virtual != var->xres_virtual ||
380 info->var.yres_virtual != var->yres_virtual) {
381 pr_info("%s: Resolution not supported: X%u x Y%u\n",
382 __func__, var->xres, var->yres);
383 return -EINVAL;
384 }
385
386 /*
387 * Memory limit
388 */
389
390 if ((info->fix.line_length * var->yres_virtual) > info->fix.smem_len) {
391 pr_info("%s: Memory Limit requested yres_virtual = %u\n",
392 __func__, var->yres_virtual);
393 return -ENOMEM;
394 }
395
396 return 0;
397}
398
399static struct fb_ops auok190xfb_ops = {
400 .owner = THIS_MODULE,
401 .fb_read = fb_sys_read,
402 .fb_write = auok190xfb_write,
403 .fb_fillrect = auok190xfb_fillrect,
404 .fb_copyarea = auok190xfb_copyarea,
405 .fb_imageblit = auok190xfb_imageblit,
406 .fb_check_var = auok190xfb_check_var,
407};
408
409/*
410 * Controller-functions common to both K1900 and K1901
411 */
412
413static int auok190x_read_temperature(struct auok190xfb_par *par)
414{
415 struct device *dev = par->info->device;
416 u16 data[4];
417 int temp;
418
419 pm_runtime_get_sync(dev);
420
421 mutex_lock(&(par->io_lock));
422
423 auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
424
425 mutex_unlock(&(par->io_lock));
426
427 pm_runtime_mark_last_busy(dev);
428 pm_runtime_put_autosuspend(dev);
429
430 /* sanitize and split of half-degrees for now */
431 temp = ((data[0] & AUOK190X_VERSION_TEMP_MASK) >> 1);
432
433 /* handle positive and negative temperatures */
434 if (temp >= 201)
435 return (255 - temp + 1) * (-1);
436 else
437 return temp;
438}
439
440static void auok190x_identify(struct auok190xfb_par *par)
441{
442 struct device *dev = par->info->device;
443 u16 data[4];
444
445 pm_runtime_get_sync(dev);
446
447 mutex_lock(&(par->io_lock));
448
449 auok190x_read_cmdargs(par, AUOK190X_CMD_READ_VERSION, 4, data);
450
451 mutex_unlock(&(par->io_lock));
452
453 par->epd_type = data[1] & AUOK190X_VERSION_TEMP_MASK;
454
455 par->panel_size_int = AUOK190X_VERSION_SIZE_INT(data[2]);
456 par->panel_size_float = AUOK190X_VERSION_SIZE_FLOAT(data[2]);
457 par->panel_model = AUOK190X_VERSION_MODEL(data[2]);
458
459 par->tcon_version = AUOK190X_VERSION_TCON(data[3]);
460 par->lut_version = AUOK190X_VERSION_LUT(data[3]);
461
462 dev_dbg(dev, "panel %d.%din, model 0x%x, EPD 0x%x TCON-rev 0x%x, LUT-rev 0x%x",
463 par->panel_size_int, par->panel_size_float, par->panel_model,
464 par->epd_type, par->tcon_version, par->lut_version);
465
466 pm_runtime_mark_last_busy(dev);
467 pm_runtime_put_autosuspend(dev);
468}
469
470/*
471 * Sysfs functions
472 */
473
474static ssize_t update_mode_show(struct device *dev,
475 struct device_attribute *attr, char *buf)
476{
477 struct fb_info *info = dev_get_drvdata(dev);
478 struct auok190xfb_par *par = info->par;
479
480 return sprintf(buf, "%d\n", par->update_mode);
481}
482
483static ssize_t update_mode_store(struct device *dev,
484 struct device_attribute *attr,
485 const char *buf, size_t count)
486{
487 struct fb_info *info = dev_get_drvdata(dev);
488 struct auok190xfb_par *par = info->par;
489 int mode, ret;
490
491 ret = kstrtoint(buf, 10, &mode);
492 if (ret)
493 return ret;
494
495 par->update_mode = mode;
496
497 /* if we enter a better mode, do a full update */
498 if (par->last_mode > 1 && mode < par->last_mode)
499 par->update_all(par);
500
501 return count;
502}
503
504static ssize_t flash_show(struct device *dev, struct device_attribute *attr,
505 char *buf)
506{
507 struct fb_info *info = dev_get_drvdata(dev);
508 struct auok190xfb_par *par = info->par;
509
510 return sprintf(buf, "%d\n", par->flash);
511}
512
513static ssize_t flash_store(struct device *dev, struct device_attribute *attr,
514 const char *buf, size_t count)
515{
516 struct fb_info *info = dev_get_drvdata(dev);
517 struct auok190xfb_par *par = info->par;
518 int flash, ret;
519
520 ret = kstrtoint(buf, 10, &flash);
521 if (ret)
522 return ret;
523
524 if (flash > 0)
525 par->flash = 1;
526 else
527 par->flash = 0;
528
529 return count;
530}
531
532static ssize_t temp_show(struct device *dev, struct device_attribute *attr,
533 char *buf)
534{
535 struct fb_info *info = dev_get_drvdata(dev);
536 struct auok190xfb_par *par = info->par;
537 int temp;
538
539 temp = auok190x_read_temperature(par);
540 return sprintf(buf, "%d\n", temp);
541}
542
543static DEVICE_ATTR(update_mode, 0644, update_mode_show, update_mode_store);
544static DEVICE_ATTR(flash, 0644, flash_show, flash_store);
545static DEVICE_ATTR(temp, 0644, temp_show, NULL);
546
547static struct attribute *auok190x_attributes[] = {
548 &dev_attr_update_mode.attr,
549 &dev_attr_flash.attr,
550 &dev_attr_temp.attr,
551 NULL
552};
553
554static const struct attribute_group auok190x_attr_group = {
555 .attrs = auok190x_attributes,
556};
557
558static int auok190x_power(struct auok190xfb_par *par, bool on)
559{
560 struct auok190x_board *board = par->board;
561 int ret;
562
563 if (on) {
564 /* We should maintain POWER up for at least 80ms before set
565 * RST_N and SLP_N to high (TCON spec 20100803_v35 p59)
566 */
567 ret = regulator_enable(par->regulator);
568 if (ret)
569 return ret;
570
571 msleep(200);
572 gpio_set_value(board->gpio_nrst, 1);
573 gpio_set_value(board->gpio_nsleep, 1);
574 msleep(200);
575 } else {
576 regulator_disable(par->regulator);
577 gpio_set_value(board->gpio_nrst, 0);
578 gpio_set_value(board->gpio_nsleep, 0);
579 }
580
581 return 0;
582}
583
584/*
585 * Recovery - powercycle the controller
586 */
587
588static void auok190x_recover(struct auok190xfb_par *par)
589{
590 auok190x_power(par, 0);
591 msleep(100);
592 auok190x_power(par, 1);
593
594 par->init(par);
595
596 /* wait for init to complete */
597 par->board->wait_for_rdy(par);
598}
599
600/*
601 * Power-management
602 */
603
604#ifdef CONFIG_PM
605static int auok190x_runtime_suspend(struct device *dev)
606{
607 struct platform_device *pdev = to_platform_device(dev);
608 struct fb_info *info = platform_get_drvdata(pdev);
609 struct auok190xfb_par *par = info->par;
610 struct auok190x_board *board = par->board;
611 u16 standby_param;
612
613 /* take and keep the lock until we are resumed, as the controller
614 * will never reach the non-busy state when in standby mode
615 */
616 mutex_lock(&(par->io_lock));
617
618 if (par->standby) {
619 dev_warn(dev, "already in standby, runtime-pm pairing mismatch\n");
620 mutex_unlock(&(par->io_lock));
621 return 0;
622 }
623
624 /* according to runtime_pm.txt runtime_suspend only means, that the
625 * device will not process data and will not communicate with the CPU
626 * As we hold the lock, this stays true even without standby
627 */
628 if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
629 dev_dbg(dev, "runtime suspend without standby\n");
630 goto finish;
631 } else if (board->quirks & AUOK190X_QUIRK_STANDBYPARAM) {
632 /* for some TCON versions STANDBY expects a parameter (0) but
633 * it seems the real tcon version has to be determined yet.
634 */
635 dev_dbg(dev, "runtime suspend with additional empty param\n");
636 standby_param = 0;
637 auok190x_send_cmdargs(par, AUOK190X_CMD_STANDBY, 1,
638 &standby_param);
639 } else {
640 dev_dbg(dev, "runtime suspend without param\n");
641 auok190x_send_command(par, AUOK190X_CMD_STANDBY);
642 }
643
644 msleep(64);
645
646finish:
647 par->standby = 1;
648
649 return 0;
650}
651
652static int auok190x_runtime_resume(struct device *dev)
653{
654 struct platform_device *pdev = to_platform_device(dev);
655 struct fb_info *info = platform_get_drvdata(pdev);
656 struct auok190xfb_par *par = info->par;
657 struct auok190x_board *board = par->board;
658
659 if (!par->standby) {
660 dev_warn(dev, "not in standby, runtime-pm pairing mismatch\n");
661 return 0;
662 }
663
664 if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
665 dev_dbg(dev, "runtime resume without standby\n");
666 } else {
667 /* when in standby, controller is always busy
668 * and only accepts the wakeup command
669 */
670 dev_dbg(dev, "runtime resume from standby\n");
671 auok190x_send_command_nowait(par, AUOK190X_CMD_WAKEUP);
672
673 msleep(160);
674
675 /* wait for the controller to be ready and release the lock */
676 board->wait_for_rdy(par);
677 }
678
679 par->standby = 0;
680
681 mutex_unlock(&(par->io_lock));
682
683 return 0;
684}
685
686static int auok190x_suspend(struct device *dev)
687{
688 struct platform_device *pdev = to_platform_device(dev);
689 struct fb_info *info = platform_get_drvdata(pdev);
690 struct auok190xfb_par *par = info->par;
691 struct auok190x_board *board = par->board;
692 int ret;
693
694 dev_dbg(dev, "suspend\n");
695 if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
696 /* suspend via powering off the ic */
697 dev_dbg(dev, "suspend with broken standby\n");
698
699 auok190x_power(par, 0);
700 } else {
701 dev_dbg(dev, "suspend using sleep\n");
702
703 /* the sleep state can only be entered from the standby state.
704 * pm_runtime_get_noresume gets called before the suspend call.
705 * So the devices usage count is >0 but it is not necessarily
706 * active.
707 */
708 if (!pm_runtime_status_suspended(dev)) {
709 ret = auok190x_runtime_suspend(dev);
710 if (ret < 0) {
711 dev_err(dev, "auok190x_runtime_suspend failed with %d\n",
712 ret);
713 return ret;
714 }
715 par->manual_standby = 1;
716 }
717
718 gpio_direction_output(board->gpio_nsleep, 0);
719 }
720
721 msleep(100);
722
723 return 0;
724}
725
726static int auok190x_resume(struct device *dev)
727{
728 struct platform_device *pdev = to_platform_device(dev);
729 struct fb_info *info = platform_get_drvdata(pdev);
730 struct auok190xfb_par *par = info->par;
731 struct auok190x_board *board = par->board;
732
733 dev_dbg(dev, "resume\n");
734 if (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN) {
735 dev_dbg(dev, "resume with broken standby\n");
736
737 auok190x_power(par, 1);
738
739 par->init(par);
740 } else {
741 dev_dbg(dev, "resume from sleep\n");
742
743 /* device should be in runtime suspend when we were suspended
744 * and pm_runtime_put_sync gets called after this function.
745 * So there is no need to touch the standby mode here at all.
746 */
747 gpio_direction_output(board->gpio_nsleep, 1);
748 msleep(100);
749
750 /* an additional init call seems to be necessary after sleep */
751 auok190x_runtime_resume(dev);
752 par->init(par);
753
754 /* if we were runtime-suspended before, suspend again*/
755 if (!par->manual_standby)
756 auok190x_runtime_suspend(dev);
757 else
758 par->manual_standby = 0;
759 }
760
761 return 0;
762}
763#endif
764
765const struct dev_pm_ops auok190x_pm = {
766 SET_RUNTIME_PM_OPS(auok190x_runtime_suspend, auok190x_runtime_resume,
767 NULL)
768 SET_SYSTEM_SLEEP_PM_OPS(auok190x_suspend, auok190x_resume)
769};
770EXPORT_SYMBOL_GPL(auok190x_pm);
771
772/*
773 * Common probe and remove code
774 */
775
776int __devinit auok190x_common_probe(struct platform_device *pdev,
777 struct auok190x_init_data *init)
778{
779 struct auok190x_board *board = init->board;
780 struct auok190xfb_par *par;
781 struct fb_info *info;
782 struct panel_info *panel;
783 int videomemorysize, ret;
784 unsigned char *videomemory;
785
786 /* check board contents */
787 if (!board->init || !board->cleanup || !board->wait_for_rdy
788 || !board->set_ctl || !board->set_hdb || !board->get_hdb
789 || !board->setup_irq)
790 return -EINVAL;
791
792 info = framebuffer_alloc(sizeof(struct auok190xfb_par), &pdev->dev);
793 if (!info)
794 return -ENOMEM;
795
796 par = info->par;
797 par->info = info;
798 par->board = board;
799 par->recover = auok190x_recover;
800 par->update_partial = init->update_partial;
801 par->update_all = init->update_all;
802 par->need_refresh = init->need_refresh;
803 par->init = init->init;
804
805 /* init update modes */
806 par->update_cnt = 0;
807 par->update_mode = -1;
808 par->last_mode = -1;
809 par->flash = 0;
810
811 par->regulator = regulator_get(info->device, "vdd");
812 if (IS_ERR(par->regulator)) {
813 ret = PTR_ERR(par->regulator);
814 dev_err(info->device, "Failed to get regulator: %d\n", ret);
815 goto err_reg;
816 }
817
818 ret = board->init(par);
819 if (ret) {
820 dev_err(info->device, "board init failed, %d\n", ret);
821 goto err_board;
822 }
823
824 ret = gpio_request(board->gpio_nsleep, "AUOK190x sleep");
825 if (ret) {
826 dev_err(info->device, "could not request sleep gpio, %d\n",
827 ret);
828 goto err_gpio1;
829 }
830
831 ret = gpio_direction_output(board->gpio_nsleep, 0);
832 if (ret) {
833 dev_err(info->device, "could not set sleep gpio, %d\n", ret);
834 goto err_gpio2;
835 }
836
837 ret = gpio_request(board->gpio_nrst, "AUOK190x reset");
838 if (ret) {
839 dev_err(info->device, "could not request reset gpio, %d\n",
840 ret);
841 goto err_gpio2;
842 }
843
844 ret = gpio_direction_output(board->gpio_nrst, 0);
845 if (ret) {
846 dev_err(info->device, "could not set reset gpio, %d\n", ret);
847 goto err_gpio3;
848 }
849
850 ret = auok190x_power(par, 1);
851 if (ret) {
852 dev_err(info->device, "could not power on the device, %d\n",
853 ret);
854 goto err_gpio3;
855 }
856
857 mutex_init(&par->io_lock);
858
859 init_waitqueue_head(&par->waitq);
860
861 ret = par->board->setup_irq(par->info);
862 if (ret) {
863 dev_err(info->device, "could not setup ready-irq, %d\n", ret);
864 goto err_irq;
865 }
866
867 /* wait for init to complete */
868 par->board->wait_for_rdy(par);
869
870 /*
871 * From here on the controller can talk to us
872 */
873
874 /* initialise fix, var, resolution and rotation */
875
876 strlcpy(info->fix.id, init->id, 16);
877 info->fix.type = FB_TYPE_PACKED_PIXELS;
878 info->fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR;
879 info->fix.xpanstep = 0;
880 info->fix.ypanstep = 0;
881 info->fix.ywrapstep = 0;
882 info->fix.accel = FB_ACCEL_NONE;
883
884 info->var.bits_per_pixel = 8;
885 info->var.grayscale = 1;
886 info->var.red.length = 8;
887 info->var.green.length = 8;
888 info->var.blue.length = 8;
889
890 panel = &panel_table[board->resolution];
891
892 /* if 90 degree rotation, switch width and height */
893 if (board->rotation & 1) {
894 info->var.xres = panel->h;
895 info->var.yres = panel->w;
896 info->var.xres_virtual = panel->h;
897 info->var.yres_virtual = panel->w;
898 info->fix.line_length = panel->h;
899 } else {
900 info->var.xres = panel->w;
901 info->var.yres = panel->h;
902 info->var.xres_virtual = panel->w;
903 info->var.yres_virtual = panel->h;
904 info->fix.line_length = panel->w;
905 }
906
907 par->resolution = board->resolution;
908 par->rotation = board->rotation;
909
910 /* videomemory handling */
911
912 videomemorysize = roundup((panel->w * panel->h), PAGE_SIZE);
913 videomemory = vmalloc(videomemorysize);
914 if (!videomemory) {
915 ret = -ENOMEM;
916 goto err_irq;
917 }
918
919 memset(videomemory, 0, videomemorysize);
920 info->screen_base = (char *)videomemory;
921 info->fix.smem_len = videomemorysize;
922
923 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_VIRTFB;
924 info->fbops = &auok190xfb_ops;
925
926 /* deferred io init */
927
928 info->fbdefio = devm_kzalloc(info->device,
929 sizeof(struct fb_deferred_io),
930 GFP_KERNEL);
931 if (!info->fbdefio) {
932 dev_err(info->device, "Failed to allocate memory\n");
933 ret = -ENOMEM;
934 goto err_defio;
935 }
936
937 dev_dbg(info->device, "targetting %d frames per second\n", board->fps);
938 info->fbdefio->delay = HZ / board->fps;
939 info->fbdefio->first_io = auok190xfb_dpy_first_io,
940 info->fbdefio->deferred_io = auok190xfb_dpy_deferred_io,
941 fb_deferred_io_init(info);
942
943 /* color map */
944
945 ret = fb_alloc_cmap(&info->cmap, 256, 0);
946 if (ret < 0) {
947 dev_err(info->device, "Failed to allocate colormap\n");
948 goto err_cmap;
949 }
950
951 /* controller init */
952
953 par->consecutive_threshold = 100;
954 par->init(par);
955 auok190x_identify(par);
956
957 platform_set_drvdata(pdev, info);
958
959 ret = register_framebuffer(info);
960 if (ret < 0)
961 goto err_regfb;
962
963 ret = sysfs_create_group(&info->device->kobj, &auok190x_attr_group);
964 if (ret)
965 goto err_sysfs;
966
967 dev_info(info->device, "fb%d: %dx%d using %dK of video memory\n",
968 info->node, info->var.xres, info->var.yres,
969 videomemorysize >> 10);
970
971 /* increase autosuspend_delay when we use alternative methods
972 * for runtime_pm
973 */
974 par->autosuspend_delay = (board->quirks & AUOK190X_QUIRK_STANDBYBROKEN)
975 ? 1000 : 200;
976
977 pm_runtime_set_active(info->device);
978 pm_runtime_enable(info->device);
979 pm_runtime_set_autosuspend_delay(info->device, par->autosuspend_delay);
980 pm_runtime_use_autosuspend(info->device);
981
982 return 0;
983
984err_sysfs:
985 unregister_framebuffer(info);
986err_regfb:
987 fb_dealloc_cmap(&info->cmap);
988err_cmap:
989 fb_deferred_io_cleanup(info);
990 kfree(info->fbdefio);
991err_defio:
992 vfree((void *)info->screen_base);
993err_irq:
994 auok190x_power(par, 0);
995err_gpio3:
996 gpio_free(board->gpio_nrst);
997err_gpio2:
998 gpio_free(board->gpio_nsleep);
999err_gpio1:
1000 board->cleanup(par);
1001err_board:
1002 regulator_put(par->regulator);
1003err_reg:
1004 framebuffer_release(info);
1005
1006 return ret;
1007}
1008EXPORT_SYMBOL_GPL(auok190x_common_probe);
1009
1010int __devexit auok190x_common_remove(struct platform_device *pdev)
1011{
1012 struct fb_info *info = platform_get_drvdata(pdev);
1013 struct auok190xfb_par *par = info->par;
1014 struct auok190x_board *board = par->board;
1015
1016 pm_runtime_disable(info->device);
1017
1018 sysfs_remove_group(&info->device->kobj, &auok190x_attr_group);
1019
1020 unregister_framebuffer(info);
1021
1022 fb_dealloc_cmap(&info->cmap);
1023
1024 fb_deferred_io_cleanup(info);
1025 kfree(info->fbdefio);
1026
1027 vfree((void *)info->screen_base);
1028
1029 auok190x_power(par, 0);
1030
1031 gpio_free(board->gpio_nrst);
1032 gpio_free(board->gpio_nsleep);
1033
1034 board->cleanup(par);
1035
1036 regulator_put(par->regulator);
1037
1038 framebuffer_release(info);
1039
1040 return 0;
1041}
1042EXPORT_SYMBOL_GPL(auok190x_common_remove);
1043
1044MODULE_DESCRIPTION("Common code for AUO-K190X controllers");
1045MODULE_AUTHOR("Heiko Stuebner <heiko@sntech.de>");
1046MODULE_LICENSE("GPL");
diff --git a/drivers/video/auo_k190x.h b/drivers/video/auo_k190x.h
new file mode 100644
index 000000000000..e35af1f51b28
--- /dev/null
+++ b/drivers/video/auo_k190x.h
@@ -0,0 +1,129 @@
1/*
2 * Private common definitions for AUO-K190X framebuffer drivers
3 *
4 * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11/*
12 * I80 interface specific defines
13 */
14
15#define AUOK190X_I80_CS 0x01
16#define AUOK190X_I80_DC 0x02
17#define AUOK190X_I80_WR 0x03
18#define AUOK190X_I80_OE 0x04
19
20/*
21 * AUOK190x commands, common to both controllers
22 */
23
24#define AUOK190X_CMD_INIT 0x0000
25#define AUOK190X_CMD_STANDBY 0x0001
26#define AUOK190X_CMD_WAKEUP 0x0002
27#define AUOK190X_CMD_TCON_RESET 0x0003
28#define AUOK190X_CMD_DATA_STOP 0x1002
29#define AUOK190X_CMD_LUT_START 0x1003
30#define AUOK190X_CMD_DISP_REFRESH 0x1004
31#define AUOK190X_CMD_DISP_RESET 0x1005
32#define AUOK190X_CMD_PRE_DISPLAY_START 0x100D
33#define AUOK190X_CMD_PRE_DISPLAY_STOP 0x100F
34#define AUOK190X_CMD_FLASH_W 0x2000
35#define AUOK190X_CMD_FLASH_E 0x2001
36#define AUOK190X_CMD_FLASH_STS 0x2002
37#define AUOK190X_CMD_FRAMERATE 0x3000
38#define AUOK190X_CMD_READ_VERSION 0x4000
39#define AUOK190X_CMD_READ_STATUS 0x4001
40#define AUOK190X_CMD_READ_LUT 0x4003
41#define AUOK190X_CMD_DRIVERTIMING 0x5000
42#define AUOK190X_CMD_LBALANCE 0x5001
43#define AUOK190X_CMD_AGINGMODE 0x6000
44#define AUOK190X_CMD_AGINGEXIT 0x6001
45
46/*
47 * Common settings for AUOK190X_CMD_INIT
48 */
49
50#define AUOK190X_INIT_DATA_FILTER (0 << 12)
51#define AUOK190X_INIT_DATA_BYPASS (1 << 12)
52#define AUOK190X_INIT_INVERSE_WHITE (0 << 9)
53#define AUOK190X_INIT_INVERSE_BLACK (1 << 9)
54#define AUOK190X_INIT_SCAN_DOWN (0 << 1)
55#define AUOK190X_INIT_SCAN_UP (1 << 1)
56#define AUOK190X_INIT_SHIFT_LEFT (0 << 0)
57#define AUOK190X_INIT_SHIFT_RIGHT (1 << 0)
58
59/* Common bits to pixels
60 * Mode 15-12 11-8 7-4 3-0
61 * format0 4 3 2 1
62 * format1 3 4 1 2
63 */
64
65#define AUOK190X_INIT_FORMAT0 0
66#define AUOK190X_INIT_FORMAT1 (1 << 6)
67
68/*
69 * settings for AUOK190X_CMD_RESET
70 */
71
72#define AUOK190X_RESET_TCON (0 << 0)
73#define AUOK190X_RESET_NORMAL (1 << 0)
74#define AUOK190X_RESET_PON (1 << 1)
75
76/*
77 * AUOK190X_CMD_VERSION
78 */
79
80#define AUOK190X_VERSION_TEMP_MASK (0x1ff)
81#define AUOK190X_VERSION_EPD_MASK (0xff)
82#define AUOK190X_VERSION_SIZE_INT(_val) ((_val & 0xfc00) >> 10)
83#define AUOK190X_VERSION_SIZE_FLOAT(_val) ((_val & 0x3c0) >> 6)
84#define AUOK190X_VERSION_MODEL(_val) (_val & 0x3f)
85#define AUOK190X_VERSION_LUT(_val) (_val & 0xff)
86#define AUOK190X_VERSION_TCON(_val) ((_val & 0xff00) >> 8)
87
88/*
89 * update modes for CMD_PARTIALDISP on K1900 and CMD_DDMA on K1901
90 */
91
92#define AUOK190X_UPDATE_MODE(_res) ((_res & 0x7) << 12)
93#define AUOK190X_UPDATE_NONFLASH (1 << 15)
94
95/*
96 * track panel specific parameters for common init
97 */
98
99struct auok190x_init_data {
100 char *id;
101 struct auok190x_board *board;
102
103 void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
104 void (*update_all)(struct auok190xfb_par *par);
105 bool (*need_refresh)(struct auok190xfb_par *par);
106 void (*init)(struct auok190xfb_par *par);
107};
108
109
110extern void auok190x_send_command_nowait(struct auok190xfb_par *par, u16 data);
111extern int auok190x_send_command(struct auok190xfb_par *par, u16 data);
112extern void auok190x_send_cmdargs_nowait(struct auok190xfb_par *par, u16 cmd,
113 int argc, u16 *argv);
114extern int auok190x_send_cmdargs(struct auok190xfb_par *par, u16 cmd,
115 int argc, u16 *argv);
116extern void auok190x_send_cmdargs_pixels_nowait(struct auok190xfb_par *par,
117 u16 cmd, int argc, u16 *argv,
118 int size, u16 *data);
119extern int auok190x_send_cmdargs_pixels(struct auok190xfb_par *par, u16 cmd,
120 int argc, u16 *argv, int size,
121 u16 *data);
122extern int auok190x_read_cmdargs(struct auok190xfb_par *par, u16 cmd,
123 int argc, u16 *argv);
124
125extern int auok190x_common_probe(struct platform_device *pdev,
126 struct auok190x_init_data *init);
127extern int auok190x_common_remove(struct platform_device *pdev);
128
129extern const struct dev_pm_ops auok190x_pm;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index fa2b03750316..2979292650d6 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -88,7 +88,7 @@ config LCD_PLATFORM
88 88
89config LCD_TOSA 89config LCD_TOSA
90 tristate "Sharp SL-6000 LCD Driver" 90 tristate "Sharp SL-6000 LCD Driver"
91 depends on SPI && MACH_TOSA 91 depends on I2C && SPI && MACH_TOSA
92 help 92 help
93 If you have an Sharp SL-6000 Zaurus say Y to enable a driver 93 If you have an Sharp SL-6000 Zaurus say Y to enable a driver
94 for its LCD. 94 for its LCD.
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index 6c9399341bcf..9327cd1b3143 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -263,7 +263,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
263 263
264EXPORT_SYMBOL_GPL(ili9320_probe_spi); 264EXPORT_SYMBOL_GPL(ili9320_probe_spi);
265 265
266int __devexit ili9320_remove(struct ili9320 *ili) 266int ili9320_remove(struct ili9320 *ili)
267{ 267{
268 ili9320_power(ili, FB_BLANK_POWERDOWN); 268 ili9320_power(ili, FB_BLANK_POWERDOWN);
269 269
diff --git a/drivers/video/bfin_adv7393fb.c b/drivers/video/bfin_adv7393fb.c
index 1a268a294478..9bdd4b0c18c8 100644
--- a/drivers/video/bfin_adv7393fb.c
+++ b/drivers/video/bfin_adv7393fb.c
@@ -353,18 +353,16 @@ adv7393_read_proc(char *page, char **start, off_t off,
353 353
354static int 354static int
355adv7393_write_proc(struct file *file, const char __user * buffer, 355adv7393_write_proc(struct file *file, const char __user * buffer,
356 unsigned long count, void *data) 356 size_t count, void *data)
357{ 357{
358 struct adv7393fb_device *fbdev = data; 358 struct adv7393fb_device *fbdev = data;
359 char line[8];
360 unsigned int val; 359 unsigned int val;
361 int ret; 360 int ret;
362 361
363 ret = copy_from_user(line, buffer, count); 362 ret = kstrtouint_from_user(buffer, count, 0, &val);
364 if (ret) 363 if (ret)
365 return -EFAULT; 364 return -EFAULT;
366 365
367 val = simple_strtoul(line, NULL, 0);
368 adv7393_write(fbdev->client, val >> 8, val & 0xff); 366 adv7393_write(fbdev->client, val >> 8, val & 0xff);
369 367
370 return count; 368 return count;
@@ -414,14 +412,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
414 if (ret) { 412 if (ret) {
415 dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n"); 413 dev_err(&client->dev, "PPI0_FS3 GPIO request failed\n");
416 ret = -EBUSY; 414 ret = -EBUSY;
417 goto out_8; 415 goto free_fbdev;
418 } 416 }
419 } 417 }
420 418
421 if (peripheral_request_list(ppi_pins, DRIVER_NAME)) { 419 if (peripheral_request_list(ppi_pins, DRIVER_NAME)) {
422 dev_err(&client->dev, "requesting PPI peripheral failed\n"); 420 dev_err(&client->dev, "requesting PPI peripheral failed\n");
423 ret = -EFAULT; 421 ret = -EFAULT;
424 goto out_8; 422 goto free_gpio;
425 } 423 }
426 424
427 fbdev->fb_mem = 425 fbdev->fb_mem =
@@ -432,7 +430,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
432 dev_err(&client->dev, "couldn't allocate dma buffer (%d bytes)\n", 430 dev_err(&client->dev, "couldn't allocate dma buffer (%d bytes)\n",
433 (u32) fbdev->fb_len); 431 (u32) fbdev->fb_len);
434 ret = -ENOMEM; 432 ret = -ENOMEM;
435 goto out_7; 433 goto free_ppi_pins;
436 } 434 }
437 435
438 fbdev->info.screen_base = (void *)fbdev->fb_mem; 436 fbdev->info.screen_base = (void *)fbdev->fb_mem;
@@ -464,27 +462,27 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
464 if (!fbdev->info.pseudo_palette) { 462 if (!fbdev->info.pseudo_palette) {
465 dev_err(&client->dev, "failed to allocate pseudo_palette\n"); 463 dev_err(&client->dev, "failed to allocate pseudo_palette\n");
466 ret = -ENOMEM; 464 ret = -ENOMEM;
467 goto out_6; 465 goto free_fb_mem;
468 } 466 }
469 467
470 if (fb_alloc_cmap(&fbdev->info.cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) < 0) { 468 if (fb_alloc_cmap(&fbdev->info.cmap, BFIN_LCD_NBR_PALETTE_ENTRIES, 0) < 0) {
471 dev_err(&client->dev, "failed to allocate colormap (%d entries)\n", 469 dev_err(&client->dev, "failed to allocate colormap (%d entries)\n",
472 BFIN_LCD_NBR_PALETTE_ENTRIES); 470 BFIN_LCD_NBR_PALETTE_ENTRIES);
473 ret = -EFAULT; 471 ret = -EFAULT;
474 goto out_5; 472 goto free_palette;
475 } 473 }
476 474
477 if (request_dma(CH_PPI, "BF5xx_PPI_DMA") < 0) { 475 if (request_dma(CH_PPI, "BF5xx_PPI_DMA") < 0) {
478 dev_err(&client->dev, "unable to request PPI DMA\n"); 476 dev_err(&client->dev, "unable to request PPI DMA\n");
479 ret = -EFAULT; 477 ret = -EFAULT;
480 goto out_4; 478 goto free_cmap;
481 } 479 }
482 480
483 if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, 0, 481 if (request_irq(IRQ_PPI_ERROR, ppi_irq_error, 0,
484 "PPI ERROR", fbdev) < 0) { 482 "PPI ERROR", fbdev) < 0) {
485 dev_err(&client->dev, "unable to request PPI ERROR IRQ\n"); 483 dev_err(&client->dev, "unable to request PPI ERROR IRQ\n");
486 ret = -EFAULT; 484 ret = -EFAULT;
487 goto out_3; 485 goto free_ch_ppi;
488 } 486 }
489 487
490 fbdev->open = 0; 488 fbdev->open = 0;
@@ -494,14 +492,14 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
494 492
495 if (ret) { 493 if (ret) {
496 dev_err(&client->dev, "i2c attach: init error\n"); 494 dev_err(&client->dev, "i2c attach: init error\n");
497 goto out_1; 495 goto free_irq_ppi;
498 } 496 }
499 497
500 498
501 if (register_framebuffer(&fbdev->info) < 0) { 499 if (register_framebuffer(&fbdev->info) < 0) {
502 dev_err(&client->dev, "unable to register framebuffer\n"); 500 dev_err(&client->dev, "unable to register framebuffer\n");
503 ret = -EFAULT; 501 ret = -EFAULT;
504 goto out_1; 502 goto free_irq_ppi;
505 } 503 }
506 504
507 dev_info(&client->dev, "fb%d: %s frame buffer device\n", 505 dev_info(&client->dev, "fb%d: %s frame buffer device\n",
@@ -512,7 +510,7 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
512 if (!entry) { 510 if (!entry) {
513 dev_err(&client->dev, "unable to create /proc entry\n"); 511 dev_err(&client->dev, "unable to create /proc entry\n");
514 ret = -EFAULT; 512 ret = -EFAULT;
515 goto out_0; 513 goto free_fb;
516 } 514 }
517 515
518 entry->read_proc = adv7393_read_proc; 516 entry->read_proc = adv7393_read_proc;
@@ -521,22 +519,25 @@ static int __devinit bfin_adv7393_fb_probe(struct i2c_client *client,
521 519
522 return 0; 520 return 0;
523 521
524 out_0: 522free_fb:
525 unregister_framebuffer(&fbdev->info); 523 unregister_framebuffer(&fbdev->info);
526 out_1: 524free_irq_ppi:
527 free_irq(IRQ_PPI_ERROR, fbdev); 525 free_irq(IRQ_PPI_ERROR, fbdev);
528 out_3: 526free_ch_ppi:
529 free_dma(CH_PPI); 527 free_dma(CH_PPI);
530 out_4: 528free_cmap:
531 dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
532 fbdev->dma_handle);
533 out_5:
534 fb_dealloc_cmap(&fbdev->info.cmap); 529 fb_dealloc_cmap(&fbdev->info.cmap);
535 out_6: 530free_palette:
536 kfree(fbdev->info.pseudo_palette); 531 kfree(fbdev->info.pseudo_palette);
537 out_7: 532free_fb_mem:
533 dma_free_coherent(NULL, fbdev->fb_len, fbdev->fb_mem,
534 fbdev->dma_handle);
535free_ppi_pins:
538 peripheral_free_list(ppi_pins); 536 peripheral_free_list(ppi_pins);
539 out_8: 537free_gpio:
538 if (ANOMALY_05000400)
539 gpio_free(P_IDENT(P_PPI0_FS3));
540free_fbdev:
540 kfree(fbdev); 541 kfree(fbdev);
541 542
542 return ret; 543 return ret;
diff --git a/drivers/video/broadsheetfb.c b/drivers/video/broadsheetfb.c
index 377dde3d5bfc..c95b417d0d41 100644
--- a/drivers/video/broadsheetfb.c
+++ b/drivers/video/broadsheetfb.c
@@ -1211,7 +1211,7 @@ static int __devexit broadsheetfb_remove(struct platform_device *dev)
1211 1211
1212static struct platform_driver broadsheetfb_driver = { 1212static struct platform_driver broadsheetfb_driver = {
1213 .probe = broadsheetfb_probe, 1213 .probe = broadsheetfb_probe,
1214 .remove = broadsheetfb_remove, 1214 .remove = __devexit_p(broadsheetfb_remove),
1215 .driver = { 1215 .driver = {
1216 .owner = THIS_MODULE, 1216 .owner = THIS_MODULE,
1217 .name = "broadsheetfb", 1217 .name = "broadsheetfb",
diff --git a/drivers/video/cobalt_lcdfb.c b/drivers/video/cobalt_lcdfb.c
index f56699d8122a..eae46f6457e2 100644
--- a/drivers/video/cobalt_lcdfb.c
+++ b/drivers/video/cobalt_lcdfb.c
@@ -1,7 +1,8 @@
1/* 1/*
2 * Cobalt server LCD frame buffer driver. 2 * Cobalt/SEAD3 LCD frame buffer driver.
3 * 3 *
4 * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> 4 * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
5 * Copyright (C) 2012 MIPS Technologies, Inc.
5 * 6 *
6 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by 8 * it under the terms of the GNU General Public License as published by
@@ -62,6 +63,7 @@
62#define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK) 63#define LCD_CUR_POS(x) ((x) & LCD_CUR_POS_MASK)
63#define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE) 64#define LCD_TEXT_POS(x) ((x) | LCD_TEXT_MODE)
64 65
66#ifdef CONFIG_MIPS_COBALT
65static inline void lcd_write_control(struct fb_info *info, u8 control) 67static inline void lcd_write_control(struct fb_info *info, u8 control)
66{ 68{
67 writel((u32)control << 24, info->screen_base); 69 writel((u32)control << 24, info->screen_base);
@@ -81,6 +83,47 @@ static inline u8 lcd_read_data(struct fb_info *info)
81{ 83{
82 return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24; 84 return readl(info->screen_base + LCD_DATA_REG_OFFSET) >> 24;
83} 85}
86#else
87
88#define LCD_CTL 0x00
89#define LCD_DATA 0x08
90#define CPLD_STATUS 0x10
91#define CPLD_DATA 0x18
92
93static inline void cpld_wait(struct fb_info *info)
94{
95 do {
96 } while (readl(info->screen_base + CPLD_STATUS) & 1);
97}
98
99static inline void lcd_write_control(struct fb_info *info, u8 control)
100{
101 cpld_wait(info);
102 writel(control, info->screen_base + LCD_CTL);
103}
104
105static inline u8 lcd_read_control(struct fb_info *info)
106{
107 cpld_wait(info);
108 readl(info->screen_base + LCD_CTL);
109 cpld_wait(info);
110 return readl(info->screen_base + CPLD_DATA) & 0xff;
111}
112
113static inline void lcd_write_data(struct fb_info *info, u8 data)
114{
115 cpld_wait(info);
116 writel(data, info->screen_base + LCD_DATA);
117}
118
119static inline u8 lcd_read_data(struct fb_info *info)
120{
121 cpld_wait(info);
122 readl(info->screen_base + LCD_DATA);
123 cpld_wait(info);
124 return readl(info->screen_base + CPLD_DATA) & 0xff;
125}
126#endif
84 127
85static int lcd_busy_wait(struct fb_info *info) 128static int lcd_busy_wait(struct fb_info *info)
86{ 129{
diff --git a/drivers/video/console/Kconfig b/drivers/video/console/Kconfig
index c2d11fef114b..e2c96d01d8f5 100644
--- a/drivers/video/console/Kconfig
+++ b/drivers/video/console/Kconfig
@@ -224,5 +224,19 @@ config FONT_10x18
224 big letters. It fits between the sun 12x22 and the normal 8x16 font. 224 big letters. It fits between the sun 12x22 and the normal 8x16 font.
225 If other fonts are too big or too small for you, say Y, otherwise say N. 225 If other fonts are too big or too small for you, say Y, otherwise say N.
226 226
227config FONT_AUTOSELECT
228 def_bool y
229 depends on FRAMEBUFFER_CONSOLE || SGI_NEWPORT_CONSOLE || STI_CONSOLE || USB_SISUSBVGA_CON
230 depends on !FONT_8x8
231 depends on !FONT_6x11
232 depends on !FONT_7x14
233 depends on !FONT_PEARL_8x8
234 depends on !FONT_ACORN_8x8
235 depends on !FONT_MINI_4x6
236 depends on !FONT_SUN8x16
237 depends on !FONT_SUN12x22
238 depends on !FONT_10x18
239 select FONT_8x16
240
227endmenu 241endmenu
228 242
diff --git a/drivers/video/ep93xx-fb.c b/drivers/video/ep93xx-fb.c
index f8babbeee275..345d96230978 100644
--- a/drivers/video/ep93xx-fb.c
+++ b/drivers/video/ep93xx-fb.c
@@ -507,16 +507,16 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
507 507
508 err = fb_alloc_cmap(&info->cmap, 256, 0); 508 err = fb_alloc_cmap(&info->cmap, 256, 0);
509 if (err) 509 if (err)
510 goto failed; 510 goto failed_cmap;
511 511
512 err = ep93xxfb_alloc_videomem(info); 512 err = ep93xxfb_alloc_videomem(info);
513 if (err) 513 if (err)
514 goto failed; 514 goto failed_videomem;
515 515
516 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 516 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
517 if (!res) { 517 if (!res) {
518 err = -ENXIO; 518 err = -ENXIO;
519 goto failed; 519 goto failed_resource;
520 } 520 }
521 521
522 /* 522 /*
@@ -532,7 +532,7 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
532 fbi->mmio_base = ioremap(res->start, resource_size(res)); 532 fbi->mmio_base = ioremap(res->start, resource_size(res));
533 if (!fbi->mmio_base) { 533 if (!fbi->mmio_base) {
534 err = -ENXIO; 534 err = -ENXIO;
535 goto failed; 535 goto failed_resource;
536 } 536 }
537 537
538 strcpy(info->fix.id, pdev->name); 538 strcpy(info->fix.id, pdev->name);
@@ -553,24 +553,24 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
553 if (err == 0) { 553 if (err == 0) {
554 dev_err(info->dev, "No suitable video mode found\n"); 554 dev_err(info->dev, "No suitable video mode found\n");
555 err = -EINVAL; 555 err = -EINVAL;
556 goto failed; 556 goto failed_mode;
557 } 557 }
558 558
559 if (mach_info->setup) { 559 if (mach_info->setup) {
560 err = mach_info->setup(pdev); 560 err = mach_info->setup(pdev);
561 if (err) 561 if (err)
562 return err; 562 goto failed_mode;
563 } 563 }
564 564
565 err = ep93xxfb_check_var(&info->var, info); 565 err = ep93xxfb_check_var(&info->var, info);
566 if (err) 566 if (err)
567 goto failed; 567 goto failed_check;
568 568
569 fbi->clk = clk_get(info->dev, NULL); 569 fbi->clk = clk_get(info->dev, NULL);
570 if (IS_ERR(fbi->clk)) { 570 if (IS_ERR(fbi->clk)) {
571 err = PTR_ERR(fbi->clk); 571 err = PTR_ERR(fbi->clk);
572 fbi->clk = NULL; 572 fbi->clk = NULL;
573 goto failed; 573 goto failed_check;
574 } 574 }
575 575
576 ep93xxfb_set_par(info); 576 ep93xxfb_set_par(info);
@@ -585,15 +585,17 @@ static int __devinit ep93xxfb_probe(struct platform_device *pdev)
585 return 0; 585 return 0;
586 586
587failed: 587failed:
588 if (fbi->clk) 588 clk_put(fbi->clk);
589 clk_put(fbi->clk); 589failed_check:
590 if (fbi->mmio_base)
591 iounmap(fbi->mmio_base);
592 ep93xxfb_dealloc_videomem(info);
593 if (&info->cmap)
594 fb_dealloc_cmap(&info->cmap);
595 if (fbi->mach_info->teardown) 590 if (fbi->mach_info->teardown)
596 fbi->mach_info->teardown(pdev); 591 fbi->mach_info->teardown(pdev);
592failed_mode:
593 iounmap(fbi->mmio_base);
594failed_resource:
595 ep93xxfb_dealloc_videomem(info);
596failed_videomem:
597 fb_dealloc_cmap(&info->cmap);
598failed_cmap:
597 kfree(info); 599 kfree(info);
598 platform_set_drvdata(pdev, NULL); 600 platform_set_drvdata(pdev, NULL);
599 601
diff --git a/drivers/video/exynos/exynos_dp_core.c b/drivers/video/exynos/exynos_dp_core.c
index 2a4481cf260c..a36b2d28280e 100644
--- a/drivers/video/exynos/exynos_dp_core.c
+++ b/drivers/video/exynos/exynos_dp_core.c
@@ -21,14 +21,14 @@
21 21
22#include <video/exynos_dp.h> 22#include <video/exynos_dp.h>
23 23
24#include <plat/cpu.h>
25
26#include "exynos_dp_core.h" 24#include "exynos_dp_core.h"
27 25
28static int exynos_dp_init_dp(struct exynos_dp_device *dp) 26static int exynos_dp_init_dp(struct exynos_dp_device *dp)
29{ 27{
30 exynos_dp_reset(dp); 28 exynos_dp_reset(dp);
31 29
30 exynos_dp_swreset(dp);
31
32 /* SW defined function Normal operation */ 32 /* SW defined function Normal operation */
33 exynos_dp_enable_sw_function(dp); 33 exynos_dp_enable_sw_function(dp);
34 34
@@ -478,7 +478,7 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
478 int lane_count; 478 int lane_count;
479 u8 buf[5]; 479 u8 buf[5];
480 480
481 u8 *adjust_request; 481 u8 adjust_request[2];
482 u8 voltage_swing; 482 u8 voltage_swing;
483 u8 pre_emphasis; 483 u8 pre_emphasis;
484 u8 training_lane; 484 u8 training_lane;
@@ -493,8 +493,8 @@ static int exynos_dp_process_clock_recovery(struct exynos_dp_device *dp)
493 /* set training pattern 2 for EQ */ 493 /* set training pattern 2 for EQ */
494 exynos_dp_set_training_pattern(dp, TRAINING_PTN2); 494 exynos_dp_set_training_pattern(dp, TRAINING_PTN2);
495 495
496 adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1 496 adjust_request[0] = link_status[4];
497 - DPCD_ADDR_LANE0_1_STATUS); 497 adjust_request[1] = link_status[5];
498 498
499 exynos_dp_get_adjust_train(dp, adjust_request); 499 exynos_dp_get_adjust_train(dp, adjust_request);
500 500
@@ -566,7 +566,7 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
566 u8 buf[5]; 566 u8 buf[5];
567 u32 reg; 567 u32 reg;
568 568
569 u8 *adjust_request; 569 u8 adjust_request[2];
570 570
571 udelay(400); 571 udelay(400);
572 572
@@ -575,8 +575,8 @@ static int exynos_dp_process_equalizer_training(struct exynos_dp_device *dp)
575 lane_count = dp->link_train.lane_count; 575 lane_count = dp->link_train.lane_count;
576 576
577 if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) { 577 if (exynos_dp_clock_recovery_ok(link_status, lane_count) == 0) {
578 adjust_request = link_status + (DPCD_ADDR_ADJUST_REQUEST_LANE0_1 578 adjust_request[0] = link_status[4];
579 - DPCD_ADDR_LANE0_1_STATUS); 579 adjust_request[1] = link_status[5];
580 580
581 if (exynos_dp_channel_eq_ok(link_status, lane_count) == 0) { 581 if (exynos_dp_channel_eq_ok(link_status, lane_count) == 0) {
582 /* traing pattern Set to Normal */ 582 /* traing pattern Set to Normal */
@@ -770,7 +770,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
770 return -ETIMEDOUT; 770 return -ETIMEDOUT;
771 } 771 }
772 772
773 mdelay(100); 773 udelay(1);
774 } 774 }
775 775
776 /* Set to use the register calculated M/N video */ 776 /* Set to use the register calculated M/N video */
@@ -804,7 +804,7 @@ static int exynos_dp_config_video(struct exynos_dp_device *dp,
804 return -ETIMEDOUT; 804 return -ETIMEDOUT;
805 } 805 }
806 806
807 mdelay(100); 807 mdelay(1);
808 } 808 }
809 809
810 if (retval != 0) 810 if (retval != 0)
@@ -860,7 +860,8 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
860 return -EINVAL; 860 return -EINVAL;
861 } 861 }
862 862
863 dp = kzalloc(sizeof(struct exynos_dp_device), GFP_KERNEL); 863 dp = devm_kzalloc(&pdev->dev, sizeof(struct exynos_dp_device),
864 GFP_KERNEL);
864 if (!dp) { 865 if (!dp) {
865 dev_err(&pdev->dev, "no memory for device data\n"); 866 dev_err(&pdev->dev, "no memory for device data\n");
866 return -ENOMEM; 867 return -ENOMEM;
@@ -871,8 +872,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
871 dp->clock = clk_get(&pdev->dev, "dp"); 872 dp->clock = clk_get(&pdev->dev, "dp");
872 if (IS_ERR(dp->clock)) { 873 if (IS_ERR(dp->clock)) {
873 dev_err(&pdev->dev, "failed to get clock\n"); 874 dev_err(&pdev->dev, "failed to get clock\n");
874 ret = PTR_ERR(dp->clock); 875 return PTR_ERR(dp->clock);
875 goto err_dp;
876 } 876 }
877 877
878 clk_enable(dp->clock); 878 clk_enable(dp->clock);
@@ -884,35 +884,25 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
884 goto err_clock; 884 goto err_clock;
885 } 885 }
886 886
887 res = request_mem_region(res->start, resource_size(res), 887 dp->reg_base = devm_request_and_ioremap(&pdev->dev, res);
888 dev_name(&pdev->dev));
889 if (!res) {
890 dev_err(&pdev->dev, "failed to request registers region\n");
891 ret = -EINVAL;
892 goto err_clock;
893 }
894
895 dp->res = res;
896
897 dp->reg_base = ioremap(res->start, resource_size(res));
898 if (!dp->reg_base) { 888 if (!dp->reg_base) {
899 dev_err(&pdev->dev, "failed to ioremap\n"); 889 dev_err(&pdev->dev, "failed to ioremap\n");
900 ret = -ENOMEM; 890 ret = -ENOMEM;
901 goto err_req_region; 891 goto err_clock;
902 } 892 }
903 893
904 dp->irq = platform_get_irq(pdev, 0); 894 dp->irq = platform_get_irq(pdev, 0);
905 if (!dp->irq) { 895 if (!dp->irq) {
906 dev_err(&pdev->dev, "failed to get irq\n"); 896 dev_err(&pdev->dev, "failed to get irq\n");
907 ret = -ENODEV; 897 ret = -ENODEV;
908 goto err_ioremap; 898 goto err_clock;
909 } 899 }
910 900
911 ret = request_irq(dp->irq, exynos_dp_irq_handler, 0, 901 ret = devm_request_irq(&pdev->dev, dp->irq, exynos_dp_irq_handler, 0,
912 "exynos-dp", dp); 902 "exynos-dp", dp);
913 if (ret) { 903 if (ret) {
914 dev_err(&pdev->dev, "failed to request irq\n"); 904 dev_err(&pdev->dev, "failed to request irq\n");
915 goto err_ioremap; 905 goto err_clock;
916 } 906 }
917 907
918 dp->video_info = pdata->video_info; 908 dp->video_info = pdata->video_info;
@@ -924,7 +914,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
924 ret = exynos_dp_detect_hpd(dp); 914 ret = exynos_dp_detect_hpd(dp);
925 if (ret) { 915 if (ret) {
926 dev_err(&pdev->dev, "unable to detect hpd\n"); 916 dev_err(&pdev->dev, "unable to detect hpd\n");
927 goto err_irq; 917 goto err_clock;
928 } 918 }
929 919
930 exynos_dp_handle_edid(dp); 920 exynos_dp_handle_edid(dp);
@@ -933,7 +923,7 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
933 dp->video_info->link_rate); 923 dp->video_info->link_rate);
934 if (ret) { 924 if (ret) {
935 dev_err(&pdev->dev, "unable to do link train\n"); 925 dev_err(&pdev->dev, "unable to do link train\n");
936 goto err_irq; 926 goto err_clock;
937 } 927 }
938 928
939 exynos_dp_enable_scramble(dp, 1); 929 exynos_dp_enable_scramble(dp, 1);
@@ -947,23 +937,15 @@ static int __devinit exynos_dp_probe(struct platform_device *pdev)
947 ret = exynos_dp_config_video(dp, dp->video_info); 937 ret = exynos_dp_config_video(dp, dp->video_info);
948 if (ret) { 938 if (ret) {
949 dev_err(&pdev->dev, "unable to config video\n"); 939 dev_err(&pdev->dev, "unable to config video\n");
950 goto err_irq; 940 goto err_clock;
951 } 941 }
952 942
953 platform_set_drvdata(pdev, dp); 943 platform_set_drvdata(pdev, dp);
954 944
955 return 0; 945 return 0;
956 946
957err_irq:
958 free_irq(dp->irq, dp);
959err_ioremap:
960 iounmap(dp->reg_base);
961err_req_region:
962 release_mem_region(res->start, resource_size(res));
963err_clock: 947err_clock:
964 clk_put(dp->clock); 948 clk_put(dp->clock);
965err_dp:
966 kfree(dp);
967 949
968 return ret; 950 return ret;
969} 951}
@@ -976,16 +958,9 @@ static int __devexit exynos_dp_remove(struct platform_device *pdev)
976 if (pdata && pdata->phy_exit) 958 if (pdata && pdata->phy_exit)
977 pdata->phy_exit(); 959 pdata->phy_exit();
978 960
979 free_irq(dp->irq, dp);
980 iounmap(dp->reg_base);
981
982 clk_disable(dp->clock); 961 clk_disable(dp->clock);
983 clk_put(dp->clock); 962 clk_put(dp->clock);
984 963
985 release_mem_region(dp->res->start, resource_size(dp->res));
986
987 kfree(dp);
988
989 return 0; 964 return 0;
990} 965}
991 966
diff --git a/drivers/video/exynos/exynos_dp_core.h b/drivers/video/exynos/exynos_dp_core.h
index 90ceaca0fa24..1e0f998e0c9f 100644
--- a/drivers/video/exynos/exynos_dp_core.h
+++ b/drivers/video/exynos/exynos_dp_core.h
@@ -26,7 +26,6 @@ struct link_train {
26 26
27struct exynos_dp_device { 27struct exynos_dp_device {
28 struct device *dev; 28 struct device *dev;
29 struct resource *res;
30 struct clk *clock; 29 struct clk *clock;
31 unsigned int irq; 30 unsigned int irq;
32 void __iomem *reg_base; 31 void __iomem *reg_base;
@@ -39,8 +38,10 @@ struct exynos_dp_device {
39void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable); 38void exynos_dp_enable_video_mute(struct exynos_dp_device *dp, bool enable);
40void exynos_dp_stop_video(struct exynos_dp_device *dp); 39void exynos_dp_stop_video(struct exynos_dp_device *dp);
41void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable); 40void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable);
41void exynos_dp_init_analog_param(struct exynos_dp_device *dp);
42void exynos_dp_init_interrupt(struct exynos_dp_device *dp); 42void exynos_dp_init_interrupt(struct exynos_dp_device *dp);
43void exynos_dp_reset(struct exynos_dp_device *dp); 43void exynos_dp_reset(struct exynos_dp_device *dp);
44void exynos_dp_swreset(struct exynos_dp_device *dp);
44void exynos_dp_config_interrupt(struct exynos_dp_device *dp); 45void exynos_dp_config_interrupt(struct exynos_dp_device *dp);
45u32 exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp); 46u32 exynos_dp_get_pll_lock_status(struct exynos_dp_device *dp);
46void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable); 47void exynos_dp_set_pll_power_down(struct exynos_dp_device *dp, bool enable);
diff --git a/drivers/video/exynos/exynos_dp_reg.c b/drivers/video/exynos/exynos_dp_reg.c
index 6548afa0e3d2..6ce76d56c3a1 100644
--- a/drivers/video/exynos/exynos_dp_reg.c
+++ b/drivers/video/exynos/exynos_dp_reg.c
@@ -16,8 +16,6 @@
16 16
17#include <video/exynos_dp.h> 17#include <video/exynos_dp.h>
18 18
19#include <plat/cpu.h>
20
21#include "exynos_dp_core.h" 19#include "exynos_dp_core.h"
22#include "exynos_dp_reg.h" 20#include "exynos_dp_reg.h"
23 21
@@ -65,6 +63,28 @@ void exynos_dp_lane_swap(struct exynos_dp_device *dp, bool enable)
65 writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP); 63 writel(reg, dp->reg_base + EXYNOS_DP_LANE_MAP);
66} 64}
67 65
66void exynos_dp_init_analog_param(struct exynos_dp_device *dp)
67{
68 u32 reg;
69
70 reg = TX_TERMINAL_CTRL_50_OHM;
71 writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_1);
72
73 reg = SEL_24M | TX_DVDD_BIT_1_0625V;
74 writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_2);
75
76 reg = DRIVE_DVDD_BIT_1_0625V | VCO_BIT_600_MICRO;
77 writel(reg, dp->reg_base + EXYNOS_DP_ANALOG_CTL_3);
78
79 reg = PD_RING_OSC | AUX_TERMINAL_CTRL_50_OHM |
80 TX_CUR1_2X | TX_CUR_8_MA;
81 writel(reg, dp->reg_base + EXYNOS_DP_PLL_FILTER_CTL_1);
82
83 reg = CH3_AMP_400_MV | CH2_AMP_400_MV |
84 CH1_AMP_400_MV | CH0_AMP_400_MV;
85 writel(reg, dp->reg_base + EXYNOS_DP_TX_AMP_TUNING_CTL);
86}
87
68void exynos_dp_init_interrupt(struct exynos_dp_device *dp) 88void exynos_dp_init_interrupt(struct exynos_dp_device *dp)
69{ 89{
70 /* Set interrupt pin assertion polarity as high */ 90 /* Set interrupt pin assertion polarity as high */
@@ -89,8 +109,6 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
89{ 109{
90 u32 reg; 110 u32 reg;
91 111
92 writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
93
94 exynos_dp_stop_video(dp); 112 exynos_dp_stop_video(dp);
95 exynos_dp_enable_video_mute(dp, 0); 113 exynos_dp_enable_video_mute(dp, 0);
96 114
@@ -131,9 +149,15 @@ void exynos_dp_reset(struct exynos_dp_device *dp)
131 149
132 writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL); 150 writel(0x00000101, dp->reg_base + EXYNOS_DP_SOC_GENERAL_CTL);
133 151
152 exynos_dp_init_analog_param(dp);
134 exynos_dp_init_interrupt(dp); 153 exynos_dp_init_interrupt(dp);
135} 154}
136 155
156void exynos_dp_swreset(struct exynos_dp_device *dp)
157{
158 writel(RESET_DP_TX, dp->reg_base + EXYNOS_DP_TX_SW_RESET);
159}
160
137void exynos_dp_config_interrupt(struct exynos_dp_device *dp) 161void exynos_dp_config_interrupt(struct exynos_dp_device *dp)
138{ 162{
139 u32 reg; 163 u32 reg;
@@ -271,6 +295,7 @@ void exynos_dp_set_analog_power_down(struct exynos_dp_device *dp,
271void exynos_dp_init_analog_func(struct exynos_dp_device *dp) 295void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
272{ 296{
273 u32 reg; 297 u32 reg;
298 int timeout_loop = 0;
274 299
275 exynos_dp_set_analog_power_down(dp, POWER_ALL, 0); 300 exynos_dp_set_analog_power_down(dp, POWER_ALL, 0);
276 301
@@ -282,9 +307,19 @@ void exynos_dp_init_analog_func(struct exynos_dp_device *dp)
282 writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL); 307 writel(reg, dp->reg_base + EXYNOS_DP_DEBUG_CTL);
283 308
284 /* Power up PLL */ 309 /* Power up PLL */
285 if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) 310 if (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
286 exynos_dp_set_pll_power_down(dp, 0); 311 exynos_dp_set_pll_power_down(dp, 0);
287 312
313 while (exynos_dp_get_pll_lock_status(dp) == PLL_UNLOCKED) {
314 timeout_loop++;
315 if (DP_TIMEOUT_LOOP_COUNT < timeout_loop) {
316 dev_err(dp->dev, "failed to get pll lock status\n");
317 return;
318 }
319 usleep_range(10, 20);
320 }
321 }
322
288 /* Enable Serdes FIFO function and Link symbol clock domain module */ 323 /* Enable Serdes FIFO function and Link symbol clock domain module */
289 reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2); 324 reg = readl(dp->reg_base + EXYNOS_DP_FUNC_EN_2);
290 reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N 325 reg &= ~(SERDES_FIFO_FUNC_EN_N | LS_CLK_DOMAIN_FUNC_EN_N
diff --git a/drivers/video/exynos/exynos_dp_reg.h b/drivers/video/exynos/exynos_dp_reg.h
index 42f608e2a43e..125b27cd57ae 100644
--- a/drivers/video/exynos/exynos_dp_reg.h
+++ b/drivers/video/exynos/exynos_dp_reg.h
@@ -24,6 +24,12 @@
24 24
25#define EXYNOS_DP_LANE_MAP 0x35C 25#define EXYNOS_DP_LANE_MAP 0x35C
26 26
27#define EXYNOS_DP_ANALOG_CTL_1 0x370
28#define EXYNOS_DP_ANALOG_CTL_2 0x374
29#define EXYNOS_DP_ANALOG_CTL_3 0x378
30#define EXYNOS_DP_PLL_FILTER_CTL_1 0x37C
31#define EXYNOS_DP_TX_AMP_TUNING_CTL 0x380
32
27#define EXYNOS_DP_AUX_HW_RETRY_CTL 0x390 33#define EXYNOS_DP_AUX_HW_RETRY_CTL 0x390
28 34
29#define EXYNOS_DP_COMMON_INT_STA_1 0x3C4 35#define EXYNOS_DP_COMMON_INT_STA_1 0x3C4
@@ -166,6 +172,29 @@
166#define LANE0_MAP_LOGIC_LANE_2 (0x2 << 0) 172#define LANE0_MAP_LOGIC_LANE_2 (0x2 << 0)
167#define LANE0_MAP_LOGIC_LANE_3 (0x3 << 0) 173#define LANE0_MAP_LOGIC_LANE_3 (0x3 << 0)
168 174
175/* EXYNOS_DP_ANALOG_CTL_1 */
176#define TX_TERMINAL_CTRL_50_OHM (0x1 << 4)
177
178/* EXYNOS_DP_ANALOG_CTL_2 */
179#define SEL_24M (0x1 << 3)
180#define TX_DVDD_BIT_1_0625V (0x4 << 0)
181
182/* EXYNOS_DP_ANALOG_CTL_3 */
183#define DRIVE_DVDD_BIT_1_0625V (0x4 << 5)
184#define VCO_BIT_600_MICRO (0x5 << 0)
185
186/* EXYNOS_DP_PLL_FILTER_CTL_1 */
187#define PD_RING_OSC (0x1 << 6)
188#define AUX_TERMINAL_CTRL_50_OHM (0x2 << 4)
189#define TX_CUR1_2X (0x1 << 2)
190#define TX_CUR_8_MA (0x2 << 0)
191
192/* EXYNOS_DP_TX_AMP_TUNING_CTL */
193#define CH3_AMP_400_MV (0x0 << 24)
194#define CH2_AMP_400_MV (0x0 << 16)
195#define CH1_AMP_400_MV (0x0 << 8)
196#define CH0_AMP_400_MV (0x0 << 0)
197
169/* EXYNOS_DP_AUX_HW_RETRY_CTL */ 198/* EXYNOS_DP_AUX_HW_RETRY_CTL */
170#define AUX_BIT_PERIOD_EXPECTED_DELAY(x) (((x) & 0x7) << 8) 199#define AUX_BIT_PERIOD_EXPECTED_DELAY(x) (((x) & 0x7) << 8)
171#define AUX_HW_RETRY_INTERVAL_MASK (0x3 << 3) 200#define AUX_HW_RETRY_INTERVAL_MASK (0x3 << 3)
diff --git a/drivers/video/exynos/exynos_mipi_dsi.c b/drivers/video/exynos/exynos_mipi_dsi.c
index 557091dc0e97..6c1f5c314a42 100644
--- a/drivers/video/exynos/exynos_mipi_dsi.c
+++ b/drivers/video/exynos/exynos_mipi_dsi.c
@@ -58,7 +58,7 @@ static struct mipi_dsim_platform_data *to_dsim_plat(struct platform_device
58} 58}
59 59
60static struct regulator_bulk_data supplies[] = { 60static struct regulator_bulk_data supplies[] = {
61 { .supply = "vdd10", }, 61 { .supply = "vdd11", },
62 { .supply = "vdd18", }, 62 { .supply = "vdd18", },
63}; 63};
64 64
@@ -102,6 +102,8 @@ static void exynos_mipi_update_cfg(struct mipi_dsim_device *dsim)
102 /* set display timing. */ 102 /* set display timing. */
103 exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config); 103 exynos_mipi_dsi_set_display_mode(dsim, dsim->dsim_config);
104 104
105 exynos_mipi_dsi_init_interrupt(dsim);
106
105 /* 107 /*
106 * data from Display controller(FIMD) is transferred in video mode 108 * data from Display controller(FIMD) is transferred in video mode
107 * but in case of command mode, all settigs is updated to registers. 109 * but in case of command mode, all settigs is updated to registers.
@@ -413,27 +415,30 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
413 goto err_platform_get_irq; 415 goto err_platform_get_irq;
414 } 416 }
415 417
418 init_completion(&dsim_wr_comp);
419 init_completion(&dsim_rd_comp);
420 platform_set_drvdata(pdev, dsim);
421
416 ret = request_irq(dsim->irq, exynos_mipi_dsi_interrupt_handler, 422 ret = request_irq(dsim->irq, exynos_mipi_dsi_interrupt_handler,
417 IRQF_SHARED, pdev->name, dsim); 423 IRQF_SHARED, dev_name(&pdev->dev), dsim);
418 if (ret != 0) { 424 if (ret != 0) {
419 dev_err(&pdev->dev, "failed to request dsim irq\n"); 425 dev_err(&pdev->dev, "failed to request dsim irq\n");
420 ret = -EINVAL; 426 ret = -EINVAL;
421 goto err_bind; 427 goto err_bind;
422 } 428 }
423 429
424 init_completion(&dsim_wr_comp); 430 /* enable interrupts */
425 init_completion(&dsim_rd_comp);
426
427 /* enable interrupt */
428 exynos_mipi_dsi_init_interrupt(dsim); 431 exynos_mipi_dsi_init_interrupt(dsim);
429 432
430 /* initialize mipi-dsi client(lcd panel). */ 433 /* initialize mipi-dsi client(lcd panel). */
431 if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe) 434 if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->probe)
432 dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev); 435 dsim_ddi->dsim_lcd_drv->probe(dsim_ddi->dsim_lcd_dev);
433 436
434 /* in case that mipi got enabled at bootloader. */ 437 /* in case mipi-dsi has been enabled by bootloader */
435 if (dsim_pd->enabled) 438 if (dsim_pd->enabled) {
436 goto out; 439 exynos_mipi_regulator_enable(dsim);
440 goto done;
441 }
437 442
438 /* lcd panel power on. */ 443 /* lcd panel power on. */
439 if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on) 444 if (dsim_ddi->dsim_lcd_drv && dsim_ddi->dsim_lcd_drv->power_on)
@@ -453,12 +458,11 @@ static int exynos_mipi_dsi_probe(struct platform_device *pdev)
453 458
454 dsim->suspended = false; 459 dsim->suspended = false;
455 460
456out: 461done:
457 platform_set_drvdata(pdev, dsim); 462 platform_set_drvdata(pdev, dsim);
458 463
459 dev_dbg(&pdev->dev, "mipi-dsi driver(%s mode) has been probed.\n", 464 dev_dbg(&pdev->dev, "%s() completed sucessfuly (%s mode)\n", __func__,
460 (dsim_config->e_interface == DSIM_COMMAND) ? 465 dsim_config->e_interface == DSIM_COMMAND ? "CPU" : "RGB");
461 "CPU" : "RGB");
462 466
463 return 0; 467 return 0;
464 468
@@ -515,10 +519,10 @@ static int __devexit exynos_mipi_dsi_remove(struct platform_device *pdev)
515 return 0; 519 return 0;
516} 520}
517 521
518#ifdef CONFIG_PM 522#ifdef CONFIG_PM_SLEEP
519static int exynos_mipi_dsi_suspend(struct platform_device *pdev, 523static int exynos_mipi_dsi_suspend(struct device *dev)
520 pm_message_t state)
521{ 524{
525 struct platform_device *pdev = to_platform_device(dev);
522 struct mipi_dsim_device *dsim = platform_get_drvdata(pdev); 526 struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
523 struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; 527 struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
524 struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; 528 struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -544,8 +548,9 @@ static int exynos_mipi_dsi_suspend(struct platform_device *pdev,
544 return 0; 548 return 0;
545} 549}
546 550
547static int exynos_mipi_dsi_resume(struct platform_device *pdev) 551static int exynos_mipi_dsi_resume(struct device *dev)
548{ 552{
553 struct platform_device *pdev = to_platform_device(dev);
549 struct mipi_dsim_device *dsim = platform_get_drvdata(pdev); 554 struct mipi_dsim_device *dsim = platform_get_drvdata(pdev);
550 struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv; 555 struct mipi_dsim_lcd_driver *client_drv = dsim->dsim_lcd_drv;
551 struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev; 556 struct mipi_dsim_lcd_device *client_dev = dsim->dsim_lcd_dev;
@@ -577,19 +582,19 @@ static int exynos_mipi_dsi_resume(struct platform_device *pdev)
577 582
578 return 0; 583 return 0;
579} 584}
580#else
581#define exynos_mipi_dsi_suspend NULL
582#define exynos_mipi_dsi_resume NULL
583#endif 585#endif
584 586
587static const struct dev_pm_ops exynos_mipi_dsi_pm_ops = {
588 SET_SYSTEM_SLEEP_PM_OPS(exynos_mipi_dsi_suspend, exynos_mipi_dsi_resume)
589};
590
585static struct platform_driver exynos_mipi_dsi_driver = { 591static struct platform_driver exynos_mipi_dsi_driver = {
586 .probe = exynos_mipi_dsi_probe, 592 .probe = exynos_mipi_dsi_probe,
587 .remove = __devexit_p(exynos_mipi_dsi_remove), 593 .remove = __devexit_p(exynos_mipi_dsi_remove),
588 .suspend = exynos_mipi_dsi_suspend,
589 .resume = exynos_mipi_dsi_resume,
590 .driver = { 594 .driver = {
591 .name = "exynos-mipi-dsim", 595 .name = "exynos-mipi-dsim",
592 .owner = THIS_MODULE, 596 .owner = THIS_MODULE,
597 .pm = &exynos_mipi_dsi_pm_ops,
593 }, 598 },
594}; 599};
595 600
diff --git a/drivers/video/exynos/exynos_mipi_dsi_common.c b/drivers/video/exynos/exynos_mipi_dsi_common.c
index 14909c1d3832..47b533a183be 100644
--- a/drivers/video/exynos/exynos_mipi_dsi_common.c
+++ b/drivers/video/exynos/exynos_mipi_dsi_common.c
@@ -76,33 +76,25 @@ static unsigned int dpll_table[15] = {
76 76
77irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id) 77irqreturn_t exynos_mipi_dsi_interrupt_handler(int irq, void *dev_id)
78{ 78{
79 unsigned int intsrc = 0; 79 struct mipi_dsim_device *dsim = dev_id;
80 unsigned int intmsk = 0; 80 unsigned int intsrc, intmsk;
81 struct mipi_dsim_device *dsim = NULL; 81
82 82 if (dsim == NULL) {
83 dsim = dev_id; 83 dev_err(dsim->dev, "%s: wrong parameter\n", __func__);
84 if (!dsim) { 84 return IRQ_NONE;
85 dev_dbg(dsim->dev, KERN_ERR "%s:error: wrong parameter\n",
86 __func__);
87 return IRQ_HANDLED;
88 } 85 }
89 86
90 intsrc = exynos_mipi_dsi_read_interrupt(dsim); 87 intsrc = exynos_mipi_dsi_read_interrupt(dsim);
91 intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim); 88 intmsk = exynos_mipi_dsi_read_interrupt_mask(dsim);
89 intmsk = ~intmsk & intsrc;
92 90
93 intmsk = ~(intmsk) & intsrc; 91 if (intsrc & INTMSK_RX_DONE) {
94
95 switch (intmsk) {
96 case INTMSK_RX_DONE:
97 complete(&dsim_rd_comp); 92 complete(&dsim_rd_comp);
98 dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n"); 93 dev_dbg(dsim->dev, "MIPI INTMSK_RX_DONE\n");
99 break; 94 }
100 case INTMSK_FIFO_EMPTY: 95 if (intsrc & INTMSK_FIFO_EMPTY) {
101 complete(&dsim_wr_comp); 96 complete(&dsim_wr_comp);
102 dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n"); 97 dev_dbg(dsim->dev, "MIPI INTMSK_FIFO_EMPTY\n");
103 break;
104 default:
105 break;
106 } 98 }
107 99
108 exynos_mipi_dsi_clear_interrupt(dsim, intmsk); 100 exynos_mipi_dsi_clear_interrupt(dsim, intmsk);
@@ -738,11 +730,11 @@ int exynos_mipi_dsi_set_display_mode(struct mipi_dsim_device *dsim,
738 if (dsim_config->auto_vertical_cnt == 0) { 730 if (dsim_config->auto_vertical_cnt == 0) {
739 exynos_mipi_dsi_set_main_disp_vporch(dsim, 731 exynos_mipi_dsi_set_main_disp_vporch(dsim,
740 dsim_config->cmd_allow, 732 dsim_config->cmd_allow,
741 timing->upper_margin, 733 timing->lower_margin,
742 timing->lower_margin); 734 timing->upper_margin);
743 exynos_mipi_dsi_set_main_disp_hporch(dsim, 735 exynos_mipi_dsi_set_main_disp_hporch(dsim,
744 timing->left_margin, 736 timing->right_margin,
745 timing->right_margin); 737 timing->left_margin);
746 exynos_mipi_dsi_set_main_disp_sync_area(dsim, 738 exynos_mipi_dsi_set_main_disp_sync_area(dsim,
747 timing->vsync_len, 739 timing->vsync_len,
748 timing->hsync_len); 740 timing->hsync_len);
diff --git a/drivers/video/exynos/s6e8ax0.c b/drivers/video/exynos/s6e8ax0.c
index 4aa9ac6218bf..05d080b63bc0 100644
--- a/drivers/video/exynos/s6e8ax0.c
+++ b/drivers/video/exynos/s6e8ax0.c
@@ -293,9 +293,20 @@ static void s6e8ax0_panel_cond(struct s6e8ax0 *lcd)
293 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0, 293 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
294 0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8 294 0xc8, 0x08, 0x48, 0xc1, 0x00, 0xc1, 0xff, 0xff, 0xc8
295 }; 295 };
296 static const unsigned char data_to_send_panel_reverse[] = {
297 0xf8, 0x19, 0x35, 0x00, 0x00, 0x00, 0x93, 0x00, 0x3c, 0x7d,
298 0x08, 0x27, 0x7d, 0x3f, 0x00, 0x00, 0x00, 0x20, 0x04, 0x08,
299 0x6e, 0x00, 0x00, 0x00, 0x02, 0x08, 0x08, 0x23, 0x23, 0xc0,
300 0xc1, 0x01, 0x41, 0xc1, 0x00, 0xc1, 0xf6, 0xf6, 0xc1
301 };
296 302
297 ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE, 303 if (lcd->dsim_dev->panel_reverse)
298 data_to_send, ARRAY_SIZE(data_to_send)); 304 ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
305 data_to_send_panel_reverse,
306 ARRAY_SIZE(data_to_send_panel_reverse));
307 else
308 ops->cmd_write(lcd_to_master(lcd), MIPI_DSI_DCS_LONG_WRITE,
309 data_to_send, ARRAY_SIZE(data_to_send));
299} 310}
300 311
301static void s6e8ax0_display_cond(struct s6e8ax0 *lcd) 312static void s6e8ax0_display_cond(struct s6e8ax0 *lcd)
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index c27e153d8882..1ddeb11659d4 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -23,7 +23,7 @@
23#include <linux/rmap.h> 23#include <linux/rmap.h>
24#include <linux/pagemap.h> 24#include <linux/pagemap.h>
25 25
26struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs) 26static struct page *fb_deferred_io_page(struct fb_info *info, unsigned long offs)
27{ 27{
28 void *screen_base = (void __force *) info->screen_base; 28 void *screen_base = (void __force *) info->screen_base;
29 struct page *page; 29 struct page *page;
@@ -107,6 +107,10 @@ static int fb_deferred_io_mkwrite(struct vm_area_struct *vma,
107 /* protect against the workqueue changing the page list */ 107 /* protect against the workqueue changing the page list */
108 mutex_lock(&fbdefio->lock); 108 mutex_lock(&fbdefio->lock);
109 109
110 /* first write in this cycle, notify the driver */
111 if (fbdefio->first_io && list_empty(&fbdefio->pagelist))
112 fbdefio->first_io(info);
113
110 /* 114 /*
111 * We want the page to remain locked from ->page_mkwrite until 115 * We want the page to remain locked from ->page_mkwrite until
112 * the PTE is marked dirty to avoid page_mkclean() being called 116 * the PTE is marked dirty to avoid page_mkclean() being called
diff --git a/drivers/video/fbsysfs.c b/drivers/video/fbsysfs.c
index 67afa9c2289d..a55e3669d135 100644
--- a/drivers/video/fbsysfs.c
+++ b/drivers/video/fbsysfs.c
@@ -80,6 +80,8 @@ EXPORT_SYMBOL(framebuffer_alloc);
80 */ 80 */
81void framebuffer_release(struct fb_info *info) 81void framebuffer_release(struct fb_info *info)
82{ 82{
83 if (!info)
84 return;
83 kfree(info->apertures); 85 kfree(info->apertures);
84 kfree(info); 86 kfree(info);
85} 87}
diff --git a/drivers/video/fsl-diu-fb.c b/drivers/video/fsl-diu-fb.c
index 6af3f16754f0..458c00664ade 100644
--- a/drivers/video/fsl-diu-fb.c
+++ b/drivers/video/fsl-diu-fb.c
@@ -834,7 +834,6 @@ static void update_lcdc(struct fb_info *info)
834 diu_ops.set_pixel_clock(var->pixclock); 834 diu_ops.set_pixel_clock(var->pixclock);
835 835
836 out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */ 836 out_be32(&hw->syn_pol, 0); /* SYNC SIGNALS POLARITY */
837 out_be32(&hw->thresholds, 0x00037800); /* The Thresholds */
838 out_be32(&hw->int_status, 0); /* INTERRUPT STATUS */ 837 out_be32(&hw->int_status, 0); /* INTERRUPT STATUS */
839 out_be32(&hw->plut, 0x01F5F666); 838 out_be32(&hw->plut, 0x01F5F666);
840 839
diff --git a/drivers/video/intelfb/intelfbdrv.c b/drivers/video/intelfb/intelfbdrv.c
index 02fd2263610c..bdcbfbae2777 100644
--- a/drivers/video/intelfb/intelfbdrv.c
+++ b/drivers/video/intelfb/intelfbdrv.c
@@ -680,6 +680,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
680 + dinfo->fb.size); 680 + dinfo->fb.size);
681 if (!dinfo->aperture.virtual) { 681 if (!dinfo->aperture.virtual) {
682 ERR_MSG("Cannot remap FB region.\n"); 682 ERR_MSG("Cannot remap FB region.\n");
683 agp_backend_release(bridge);
683 cleanup(dinfo); 684 cleanup(dinfo);
684 return -ENODEV; 685 return -ENODEV;
685 } 686 }
@@ -689,6 +690,7 @@ static int __devinit intelfb_pci_register(struct pci_dev *pdev,
689 INTEL_REG_SIZE); 690 INTEL_REG_SIZE);
690 if (!dinfo->mmio_base) { 691 if (!dinfo->mmio_base) {
691 ERR_MSG("Cannot remap MMIO region.\n"); 692 ERR_MSG("Cannot remap MMIO region.\n");
693 agp_backend_release(bridge);
692 cleanup(dinfo); 694 cleanup(dinfo);
693 return -ENODEV; 695 return -ENODEV;
694 } 696 }
diff --git a/drivers/video/mb862xx/mb862xx-i2c.c b/drivers/video/mb862xx/mb862xx-i2c.c
index 273769bb8deb..c87e17afb3e2 100644
--- a/drivers/video/mb862xx/mb862xx-i2c.c
+++ b/drivers/video/mb862xx/mb862xx-i2c.c
@@ -68,7 +68,7 @@ static int mb862xx_i2c_read_byte(struct i2c_adapter *adap, u8 *byte, int last)
68 return 1; 68 return 1;
69} 69}
70 70
71void mb862xx_i2c_stop(struct i2c_adapter *adap) 71static void mb862xx_i2c_stop(struct i2c_adapter *adap)
72{ 72{
73 struct mb862xxfb_par *par = adap->algo_data; 73 struct mb862xxfb_par *par = adap->algo_data;
74 74
diff --git a/drivers/video/mb862xx/mb862xxfbdrv.c b/drivers/video/mb862xx/mb862xxfbdrv.c
index 11a7a333701d..00ce1f34b496 100644
--- a/drivers/video/mb862xx/mb862xxfbdrv.c
+++ b/drivers/video/mb862xx/mb862xxfbdrv.c
@@ -579,7 +579,7 @@ static ssize_t mb862xxfb_show_dispregs(struct device *dev,
579 579
580static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL); 580static DEVICE_ATTR(dispregs, 0444, mb862xxfb_show_dispregs, NULL);
581 581
582irqreturn_t mb862xx_intr(int irq, void *dev_id) 582static irqreturn_t mb862xx_intr(int irq, void *dev_id)
583{ 583{
584 struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id; 584 struct mb862xxfb_par *par = (struct mb862xxfb_par *) dev_id;
585 unsigned long reg_ist, mask; 585 unsigned long reg_ist, mask;
diff --git a/drivers/video/mbx/mbxfb.c b/drivers/video/mbx/mbxfb.c
index 55bf6196b7a0..85e4f44bfa61 100644
--- a/drivers/video/mbx/mbxfb.c
+++ b/drivers/video/mbx/mbxfb.c
@@ -950,7 +950,7 @@ static int __devinit mbxfb_probe(struct platform_device *dev)
950 950
951 mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr, 951 mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr,
952 res_size(mfbi->fb_req)); 952 res_size(mfbi->fb_req));
953 if (!mfbi->reg_virt_addr) { 953 if (!mfbi->fb_virt_addr) {
954 dev_err(&dev->dev, "failed to ioremap frame buffer\n"); 954 dev_err(&dev->dev, "failed to ioremap frame buffer\n");
955 ret = -EINVAL; 955 ret = -EINVAL;
956 goto err4; 956 goto err4;
@@ -1045,7 +1045,7 @@ static int __devexit mbxfb_remove(struct platform_device *dev)
1045 1045
1046static struct platform_driver mbxfb_driver = { 1046static struct platform_driver mbxfb_driver = {
1047 .probe = mbxfb_probe, 1047 .probe = mbxfb_probe,
1048 .remove = mbxfb_remove, 1048 .remove = __devexit_p(mbxfb_remove),
1049 .suspend = mbxfb_suspend, 1049 .suspend = mbxfb_suspend,
1050 .resume = mbxfb_resume, 1050 .resume = mbxfb_resume,
1051 .driver = { 1051 .driver = {
diff --git a/drivers/video/mxsfb.c b/drivers/video/mxsfb.c
index 6c6bc578d0fc..abbe691047bd 100644
--- a/drivers/video/mxsfb.c
+++ b/drivers/video/mxsfb.c
@@ -889,6 +889,18 @@ static int __devexit mxsfb_remove(struct platform_device *pdev)
889 return 0; 889 return 0;
890} 890}
891 891
892static void mxsfb_shutdown(struct platform_device *pdev)
893{
894 struct fb_info *fb_info = platform_get_drvdata(pdev);
895 struct mxsfb_info *host = to_imxfb_host(fb_info);
896
897 /*
898 * Force stop the LCD controller as keeping it running during reboot
899 * might interfere with the BootROM's boot mode pads sampling.
900 */
901 writel(CTRL_RUN, host->base + LCDC_CTRL + REG_CLR);
902}
903
892static struct platform_device_id mxsfb_devtype[] = { 904static struct platform_device_id mxsfb_devtype[] = {
893 { 905 {
894 .name = "imx23-fb", 906 .name = "imx23-fb",
@@ -905,6 +917,7 @@ MODULE_DEVICE_TABLE(platform, mxsfb_devtype);
905static struct platform_driver mxsfb_driver = { 917static struct platform_driver mxsfb_driver = {
906 .probe = mxsfb_probe, 918 .probe = mxsfb_probe,
907 .remove = __devexit_p(mxsfb_remove), 919 .remove = __devexit_p(mxsfb_remove),
920 .shutdown = mxsfb_shutdown,
908 .id_table = mxsfb_devtype, 921 .id_table = mxsfb_devtype,
909 .driver = { 922 .driver = {
910 .name = DRIVER_NAME, 923 .name = DRIVER_NAME,
diff --git a/drivers/video/omap/Kconfig b/drivers/video/omap/Kconfig
index 1e7536d9a8fc..b48f95f0dfe2 100644
--- a/drivers/video/omap/Kconfig
+++ b/drivers/video/omap/Kconfig
@@ -39,14 +39,6 @@ config FB_OMAP_LCD_MIPID
39 the Mobile Industry Processor Interface DBI-C/DCS 39 the Mobile Industry Processor Interface DBI-C/DCS
40 specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3) 40 specification. (Supported LCDs: Philips LPH8923, Sharp LS041Y3)
41 41
42config FB_OMAP_BOOTLOADER_INIT
43 bool "Check bootloader initialization"
44 depends on FB_OMAP
45 help
46 Say Y here if you want to enable checking if the bootloader has
47 already initialized the display controller. In this case the
48 driver will skip the initialization.
49
50config FB_OMAP_CONSISTENT_DMA_SIZE 42config FB_OMAP_CONSISTENT_DMA_SIZE
51 int "Consistent DMA memory size (MB)" 43 int "Consistent DMA memory size (MB)"
52 depends on FB_OMAP 44 depends on FB_OMAP
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index 74e7cf078505..ad741c3d1ae1 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -739,12 +739,6 @@ static void acx_panel_set_timings(struct omap_dss_device *dssdev,
739 } 739 }
740} 740}
741 741
742static void acx_panel_get_timings(struct omap_dss_device *dssdev,
743 struct omap_video_timings *timings)
744{
745 *timings = dssdev->panel.timings;
746}
747
748static int acx_panel_check_timings(struct omap_dss_device *dssdev, 742static int acx_panel_check_timings(struct omap_dss_device *dssdev,
749 struct omap_video_timings *timings) 743 struct omap_video_timings *timings)
750{ 744{
@@ -762,7 +756,6 @@ static struct omap_dss_driver acx_panel_driver = {
762 .resume = acx_panel_resume, 756 .resume = acx_panel_resume,
763 757
764 .set_timings = acx_panel_set_timings, 758 .set_timings = acx_panel_set_timings,
765 .get_timings = acx_panel_get_timings,
766 .check_timings = acx_panel_check_timings, 759 .check_timings = acx_panel_check_timings,
767 760
768 .get_recommended_bpp = acx_get_recommended_bpp, 761 .get_recommended_bpp = acx_get_recommended_bpp,
diff --git a/drivers/video/omap2/displays/panel-generic-dpi.c b/drivers/video/omap2/displays/panel-generic-dpi.c
index 30fe4dfeb227..e42f9dc22123 100644
--- a/drivers/video/omap2/displays/panel-generic-dpi.c
+++ b/drivers/video/omap2/displays/panel-generic-dpi.c
@@ -386,6 +386,106 @@ static struct panel_config generic_dpi_panels[] = {
386 386
387 .name = "innolux_at080tn52", 387 .name = "innolux_at080tn52",
388 }, 388 },
389
390 /* Mitsubishi AA084SB01 */
391 {
392 {
393 .x_res = 800,
394 .y_res = 600,
395 .pixel_clock = 40000,
396
397 .hsw = 1,
398 .hfp = 254,
399 .hbp = 1,
400
401 .vsw = 1,
402 .vfp = 26,
403 .vbp = 1,
404 },
405 .config = OMAP_DSS_LCD_TFT,
406 .name = "mitsubishi_aa084sb01",
407 },
408 /* EDT ET0500G0DH6 */
409 {
410 {
411 .x_res = 800,
412 .y_res = 480,
413 .pixel_clock = 33260,
414
415 .hsw = 128,
416 .hfp = 216,
417 .hbp = 40,
418
419 .vsw = 2,
420 .vfp = 35,
421 .vbp = 10,
422 },
423 .config = OMAP_DSS_LCD_TFT,
424 .name = "edt_et0500g0dh6",
425 },
426
427 /* Prime-View PD050VL1 */
428 {
429 {
430 .x_res = 640,
431 .y_res = 480,
432
433 .pixel_clock = 25000,
434
435 .hsw = 96,
436 .hfp = 18,
437 .hbp = 46,
438
439 .vsw = 2,
440 .vfp = 10,
441 .vbp = 33,
442 },
443 .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
444 OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
445 .name = "primeview_pd050vl1",
446 },
447
448 /* Prime-View PM070WL4 */
449 {
450 {
451 .x_res = 800,
452 .y_res = 480,
453
454 .pixel_clock = 32000,
455
456 .hsw = 128,
457 .hfp = 42,
458 .hbp = 86,
459
460 .vsw = 2,
461 .vfp = 10,
462 .vbp = 33,
463 },
464 .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
465 OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
466 .name = "primeview_pm070wl4",
467 },
468
469 /* Prime-View PD104SLF */
470 {
471 {
472 .x_res = 800,
473 .y_res = 600,
474
475 .pixel_clock = 40000,
476
477 .hsw = 128,
478 .hfp = 42,
479 .hbp = 86,
480
481 .vsw = 4,
482 .vfp = 1,
483 .vbp = 23,
484 },
485 .config = OMAP_DSS_LCD_TFT | OMAP_DSS_LCD_IVS |
486 OMAP_DSS_LCD_IHS | OMAP_DSS_LCD_IPC,
487 .name = "primeview_pd104slf",
488 },
389}; 489};
390 490
391struct panel_drv_data { 491struct panel_drv_data {
@@ -549,12 +649,6 @@ static void generic_dpi_panel_set_timings(struct omap_dss_device *dssdev,
549 dpi_set_timings(dssdev, timings); 649 dpi_set_timings(dssdev, timings);
550} 650}
551 651
552static void generic_dpi_panel_get_timings(struct omap_dss_device *dssdev,
553 struct omap_video_timings *timings)
554{
555 *timings = dssdev->panel.timings;
556}
557
558static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev, 652static int generic_dpi_panel_check_timings(struct omap_dss_device *dssdev,
559 struct omap_video_timings *timings) 653 struct omap_video_timings *timings)
560{ 654{
@@ -571,7 +665,6 @@ static struct omap_dss_driver dpi_driver = {
571 .resume = generic_dpi_panel_resume, 665 .resume = generic_dpi_panel_resume,
572 666
573 .set_timings = generic_dpi_panel_set_timings, 667 .set_timings = generic_dpi_panel_set_timings,
574 .get_timings = generic_dpi_panel_get_timings,
575 .check_timings = generic_dpi_panel_check_timings, 668 .check_timings = generic_dpi_panel_check_timings,
576 669
577 .driver = { 670 .driver = {
diff --git a/drivers/video/omap2/displays/panel-n8x0.c b/drivers/video/omap2/displays/panel-n8x0.c
index dc9408dc93d1..4a34cdc1371b 100644
--- a/drivers/video/omap2/displays/panel-n8x0.c
+++ b/drivers/video/omap2/displays/panel-n8x0.c
@@ -610,12 +610,6 @@ static int n8x0_panel_resume(struct omap_dss_device *dssdev)
610 return 0; 610 return 0;
611} 611}
612 612
613static void n8x0_panel_get_timings(struct omap_dss_device *dssdev,
614 struct omap_video_timings *timings)
615{
616 *timings = dssdev->panel.timings;
617}
618
619static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev, 613static void n8x0_panel_get_resolution(struct omap_dss_device *dssdev,
620 u16 *xres, u16 *yres) 614 u16 *xres, u16 *yres)
621{ 615{
@@ -678,8 +672,6 @@ static struct omap_dss_driver n8x0_panel_driver = {
678 .get_resolution = n8x0_panel_get_resolution, 672 .get_resolution = n8x0_panel_get_resolution,
679 .get_recommended_bpp = omapdss_default_get_recommended_bpp, 673 .get_recommended_bpp = omapdss_default_get_recommended_bpp,
680 674
681 .get_timings = n8x0_panel_get_timings,
682
683 .driver = { 675 .driver = {
684 .name = "n8x0_panel", 676 .name = "n8x0_panel",
685 .owner = THIS_MODULE, 677 .owner = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-taal.c b/drivers/video/omap2/displays/panel-taal.c
index b2dd88b48420..901576eb5a84 100644
--- a/drivers/video/omap2/displays/panel-taal.c
+++ b/drivers/video/omap2/displays/panel-taal.c
@@ -30,7 +30,6 @@
30#include <linux/gpio.h> 30#include <linux/gpio.h>
31#include <linux/workqueue.h> 31#include <linux/workqueue.h>
32#include <linux/slab.h> 32#include <linux/slab.h>
33#include <linux/regulator/consumer.h>
34#include <linux/mutex.h> 33#include <linux/mutex.h>
35 34
36#include <video/omapdss.h> 35#include <video/omapdss.h>
@@ -55,73 +54,6 @@ static int _taal_enable_te(struct omap_dss_device *dssdev, bool enable);
55 54
56static int taal_panel_reset(struct omap_dss_device *dssdev); 55static int taal_panel_reset(struct omap_dss_device *dssdev);
57 56
58struct panel_regulator {
59 struct regulator *regulator;
60 const char *name;
61 int min_uV;
62 int max_uV;
63};
64
65static void free_regulators(struct panel_regulator *regulators, int n)
66{
67 int i;
68
69 for (i = 0; i < n; i++) {
70 /* disable/put in reverse order */
71 regulator_disable(regulators[n - i - 1].regulator);
72 regulator_put(regulators[n - i - 1].regulator);
73 }
74}
75
76static int init_regulators(struct omap_dss_device *dssdev,
77 struct panel_regulator *regulators, int n)
78{
79 int r, i, v;
80
81 for (i = 0; i < n; i++) {
82 struct regulator *reg;
83
84 reg = regulator_get(&dssdev->dev, regulators[i].name);
85 if (IS_ERR(reg)) {
86 dev_err(&dssdev->dev, "failed to get regulator %s\n",
87 regulators[i].name);
88 r = PTR_ERR(reg);
89 goto err;
90 }
91
92 /* FIXME: better handling of fixed vs. variable regulators */
93 v = regulator_get_voltage(reg);
94 if (v < regulators[i].min_uV || v > regulators[i].max_uV) {
95 r = regulator_set_voltage(reg, regulators[i].min_uV,
96 regulators[i].max_uV);
97 if (r) {
98 dev_err(&dssdev->dev,
99 "failed to set regulator %s voltage\n",
100 regulators[i].name);
101 regulator_put(reg);
102 goto err;
103 }
104 }
105
106 r = regulator_enable(reg);
107 if (r) {
108 dev_err(&dssdev->dev, "failed to enable regulator %s\n",
109 regulators[i].name);
110 regulator_put(reg);
111 goto err;
112 }
113
114 regulators[i].regulator = reg;
115 }
116
117 return 0;
118
119err:
120 free_regulators(regulators, i);
121
122 return r;
123}
124
125/** 57/**
126 * struct panel_config - panel configuration 58 * struct panel_config - panel configuration
127 * @name: panel name 59 * @name: panel name
@@ -150,8 +82,6 @@ struct panel_config {
150 unsigned int low; 82 unsigned int low;
151 } reset_sequence; 83 } reset_sequence;
152 84
153 struct panel_regulator *regulators;
154 int num_regulators;
155}; 85};
156 86
157enum { 87enum {
@@ -577,12 +507,6 @@ static const struct backlight_ops taal_bl_ops = {
577 .update_status = taal_bl_update_status, 507 .update_status = taal_bl_update_status,
578}; 508};
579 509
580static void taal_get_timings(struct omap_dss_device *dssdev,
581 struct omap_video_timings *timings)
582{
583 *timings = dssdev->panel.timings;
584}
585
586static void taal_get_resolution(struct omap_dss_device *dssdev, 510static void taal_get_resolution(struct omap_dss_device *dssdev,
587 u16 *xres, u16 *yres) 511 u16 *xres, u16 *yres)
588{ 512{
@@ -602,7 +526,7 @@ static ssize_t taal_num_errors_show(struct device *dev,
602{ 526{
603 struct omap_dss_device *dssdev = to_dss_device(dev); 527 struct omap_dss_device *dssdev = to_dss_device(dev);
604 struct taal_data *td = dev_get_drvdata(&dssdev->dev); 528 struct taal_data *td = dev_get_drvdata(&dssdev->dev);
605 u8 errors; 529 u8 errors = 0;
606 int r; 530 int r;
607 531
608 mutex_lock(&td->lock); 532 mutex_lock(&td->lock);
@@ -977,11 +901,6 @@ static int taal_probe(struct omap_dss_device *dssdev)
977 901
978 atomic_set(&td->do_update, 0); 902 atomic_set(&td->do_update, 0);
979 903
980 r = init_regulators(dssdev, panel_config->regulators,
981 panel_config->num_regulators);
982 if (r)
983 goto err_reg;
984
985 td->workqueue = create_singlethread_workqueue("taal_esd"); 904 td->workqueue = create_singlethread_workqueue("taal_esd");
986 if (td->workqueue == NULL) { 905 if (td->workqueue == NULL) {
987 dev_err(&dssdev->dev, "can't create ESD workqueue\n"); 906 dev_err(&dssdev->dev, "can't create ESD workqueue\n");
@@ -1087,8 +1006,6 @@ err_bl:
1087err_rst_gpio: 1006err_rst_gpio:
1088 destroy_workqueue(td->workqueue); 1007 destroy_workqueue(td->workqueue);
1089err_wq: 1008err_wq:
1090 free_regulators(panel_config->regulators, panel_config->num_regulators);
1091err_reg:
1092 kfree(td); 1009 kfree(td);
1093err: 1010err:
1094 return r; 1011 return r;
@@ -1125,9 +1042,6 @@ static void __exit taal_remove(struct omap_dss_device *dssdev)
1125 /* reset, to be sure that the panel is in a valid state */ 1042 /* reset, to be sure that the panel is in a valid state */
1126 taal_hw_reset(dssdev); 1043 taal_hw_reset(dssdev);
1127 1044
1128 free_regulators(td->panel_config->regulators,
1129 td->panel_config->num_regulators);
1130
1131 if (gpio_is_valid(panel_data->reset_gpio)) 1045 if (gpio_is_valid(panel_data->reset_gpio))
1132 gpio_free(panel_data->reset_gpio); 1046 gpio_free(panel_data->reset_gpio);
1133 1047
@@ -1909,8 +1823,6 @@ static struct omap_dss_driver taal_driver = {
1909 .run_test = taal_run_test, 1823 .run_test = taal_run_test,
1910 .memory_read = taal_memory_read, 1824 .memory_read = taal_memory_read,
1911 1825
1912 .get_timings = taal_get_timings,
1913
1914 .driver = { 1826 .driver = {
1915 .name = "taal", 1827 .name = "taal",
1916 .owner = THIS_MODULE, 1828 .owner = THIS_MODULE,
diff --git a/drivers/video/omap2/displays/panel-tfp410.c b/drivers/video/omap2/displays/panel-tfp410.c
index 52637fa8fda8..bff306e041ca 100644
--- a/drivers/video/omap2/displays/panel-tfp410.c
+++ b/drivers/video/omap2/displays/panel-tfp410.c
@@ -47,13 +47,9 @@ struct panel_drv_data {
47 struct mutex lock; 47 struct mutex lock;
48 48
49 int pd_gpio; 49 int pd_gpio;
50};
51 50
52static inline struct tfp410_platform_data 51 struct i2c_adapter *i2c_adapter;
53*get_pdata(const struct omap_dss_device *dssdev) 52};
54{
55 return dssdev->data;
56}
57 53
58static int tfp410_power_on(struct omap_dss_device *dssdev) 54static int tfp410_power_on(struct omap_dss_device *dssdev)
59{ 55{
@@ -68,7 +64,7 @@ static int tfp410_power_on(struct omap_dss_device *dssdev)
68 goto err0; 64 goto err0;
69 65
70 if (gpio_is_valid(ddata->pd_gpio)) 66 if (gpio_is_valid(ddata->pd_gpio))
71 gpio_set_value(ddata->pd_gpio, 1); 67 gpio_set_value_cansleep(ddata->pd_gpio, 1);
72 68
73 return 0; 69 return 0;
74err0: 70err0:
@@ -83,18 +79,18 @@ static void tfp410_power_off(struct omap_dss_device *dssdev)
83 return; 79 return;
84 80
85 if (gpio_is_valid(ddata->pd_gpio)) 81 if (gpio_is_valid(ddata->pd_gpio))
86 gpio_set_value(ddata->pd_gpio, 0); 82 gpio_set_value_cansleep(ddata->pd_gpio, 0);
87 83
88 omapdss_dpi_display_disable(dssdev); 84 omapdss_dpi_display_disable(dssdev);
89} 85}
90 86
91static int tfp410_probe(struct omap_dss_device *dssdev) 87static int tfp410_probe(struct omap_dss_device *dssdev)
92{ 88{
93 struct tfp410_platform_data *pdata = get_pdata(dssdev);
94 struct panel_drv_data *ddata; 89 struct panel_drv_data *ddata;
95 int r; 90 int r;
91 int i2c_bus_num;
96 92
97 ddata = kzalloc(sizeof(*ddata), GFP_KERNEL); 93 ddata = devm_kzalloc(&dssdev->dev, sizeof(*ddata), GFP_KERNEL);
98 if (!ddata) 94 if (!ddata)
99 return -ENOMEM; 95 return -ENOMEM;
100 96
@@ -104,10 +100,15 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
104 ddata->dssdev = dssdev; 100 ddata->dssdev = dssdev;
105 mutex_init(&ddata->lock); 101 mutex_init(&ddata->lock);
106 102
107 if (pdata) 103 if (dssdev->data) {
104 struct tfp410_platform_data *pdata = dssdev->data;
105
108 ddata->pd_gpio = pdata->power_down_gpio; 106 ddata->pd_gpio = pdata->power_down_gpio;
109 else 107 i2c_bus_num = pdata->i2c_bus_num;
108 } else {
110 ddata->pd_gpio = -1; 109 ddata->pd_gpio = -1;
110 i2c_bus_num = -1;
111 }
111 112
112 if (gpio_is_valid(ddata->pd_gpio)) { 113 if (gpio_is_valid(ddata->pd_gpio)) {
113 r = gpio_request_one(ddata->pd_gpio, GPIOF_OUT_INIT_LOW, 114 r = gpio_request_one(ddata->pd_gpio, GPIOF_OUT_INIT_LOW,
@@ -115,13 +116,31 @@ static int tfp410_probe(struct omap_dss_device *dssdev)
115 if (r) { 116 if (r) {
116 dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n", 117 dev_err(&dssdev->dev, "Failed to request PD GPIO %d\n",
117 ddata->pd_gpio); 118 ddata->pd_gpio);
118 ddata->pd_gpio = -1; 119 return r;
119 } 120 }
120 } 121 }
121 122
123 if (i2c_bus_num != -1) {
124 struct i2c_adapter *adapter;
125
126 adapter = i2c_get_adapter(i2c_bus_num);
127 if (!adapter) {
128 dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
129 i2c_bus_num);
130 r = -EINVAL;
131 goto err_i2c;
132 }
133
134 ddata->i2c_adapter = adapter;
135 }
136
122 dev_set_drvdata(&dssdev->dev, ddata); 137 dev_set_drvdata(&dssdev->dev, ddata);
123 138
124 return 0; 139 return 0;
140err_i2c:
141 if (gpio_is_valid(ddata->pd_gpio))
142 gpio_free(ddata->pd_gpio);
143 return r;
125} 144}
126 145
127static void __exit tfp410_remove(struct omap_dss_device *dssdev) 146static void __exit tfp410_remove(struct omap_dss_device *dssdev)
@@ -130,14 +149,15 @@ static void __exit tfp410_remove(struct omap_dss_device *dssdev)
130 149
131 mutex_lock(&ddata->lock); 150 mutex_lock(&ddata->lock);
132 151
152 if (ddata->i2c_adapter)
153 i2c_put_adapter(ddata->i2c_adapter);
154
133 if (gpio_is_valid(ddata->pd_gpio)) 155 if (gpio_is_valid(ddata->pd_gpio))
134 gpio_free(ddata->pd_gpio); 156 gpio_free(ddata->pd_gpio);
135 157
136 dev_set_drvdata(&dssdev->dev, NULL); 158 dev_set_drvdata(&dssdev->dev, NULL);
137 159
138 mutex_unlock(&ddata->lock); 160 mutex_unlock(&ddata->lock);
139
140 kfree(ddata);
141} 161}
142 162
143static int tfp410_enable(struct omap_dss_device *dssdev) 163static int tfp410_enable(struct omap_dss_device *dssdev)
@@ -269,27 +289,17 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev,
269 u8 *edid, int len) 289 u8 *edid, int len)
270{ 290{
271 struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); 291 struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
272 struct tfp410_platform_data *pdata = get_pdata(dssdev);
273 struct i2c_adapter *adapter;
274 int r, l, bytes_read; 292 int r, l, bytes_read;
275 293
276 mutex_lock(&ddata->lock); 294 mutex_lock(&ddata->lock);
277 295
278 if (pdata->i2c_bus_num == 0) { 296 if (!ddata->i2c_adapter) {
279 r = -ENODEV; 297 r = -ENODEV;
280 goto err; 298 goto err;
281 } 299 }
282 300
283 adapter = i2c_get_adapter(pdata->i2c_bus_num);
284 if (!adapter) {
285 dev_err(&dssdev->dev, "Failed to get I2C adapter, bus %d\n",
286 pdata->i2c_bus_num);
287 r = -EINVAL;
288 goto err;
289 }
290
291 l = min(EDID_LENGTH, len); 301 l = min(EDID_LENGTH, len);
292 r = tfp410_ddc_read(adapter, edid, l, 0); 302 r = tfp410_ddc_read(ddata->i2c_adapter, edid, l, 0);
293 if (r) 303 if (r)
294 goto err; 304 goto err;
295 305
@@ -299,7 +309,7 @@ static int tfp410_read_edid(struct omap_dss_device *dssdev,
299 if (len > EDID_LENGTH && edid[0x7e] > 0) { 309 if (len > EDID_LENGTH && edid[0x7e] > 0) {
300 l = min(EDID_LENGTH, len - EDID_LENGTH); 310 l = min(EDID_LENGTH, len - EDID_LENGTH);
301 311
302 r = tfp410_ddc_read(adapter, edid + EDID_LENGTH, 312 r = tfp410_ddc_read(ddata->i2c_adapter, edid + EDID_LENGTH,
303 l, EDID_LENGTH); 313 l, EDID_LENGTH);
304 if (r) 314 if (r)
305 goto err; 315 goto err;
@@ -319,21 +329,15 @@ err:
319static bool tfp410_detect(struct omap_dss_device *dssdev) 329static bool tfp410_detect(struct omap_dss_device *dssdev)
320{ 330{
321 struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev); 331 struct panel_drv_data *ddata = dev_get_drvdata(&dssdev->dev);
322 struct tfp410_platform_data *pdata = get_pdata(dssdev);
323 struct i2c_adapter *adapter;
324 unsigned char out; 332 unsigned char out;
325 int r; 333 int r;
326 334
327 mutex_lock(&ddata->lock); 335 mutex_lock(&ddata->lock);
328 336
329 if (pdata->i2c_bus_num == 0) 337 if (!ddata->i2c_adapter)
330 goto out;
331
332 adapter = i2c_get_adapter(pdata->i2c_bus_num);
333 if (!adapter)
334 goto out; 338 goto out;
335 339
336 r = tfp410_ddc_read(adapter, &out, 1, 0); 340 r = tfp410_ddc_read(ddata->i2c_adapter, &out, 1, 0);
337 341
338 mutex_unlock(&ddata->lock); 342 mutex_unlock(&ddata->lock);
339 343
diff --git a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
index 32f3fcd7f0f0..4b6448b3c31f 100644
--- a/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
+++ b/drivers/video/omap2/displays/panel-tpo-td043mtea1.c
@@ -272,13 +272,16 @@ static const struct omap_video_timings tpo_td043_timings = {
272static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043) 272static int tpo_td043_power_on(struct tpo_td043_device *tpo_td043)
273{ 273{
274 int nreset_gpio = tpo_td043->nreset_gpio; 274 int nreset_gpio = tpo_td043->nreset_gpio;
275 int r;
275 276
276 if (tpo_td043->powered_on) 277 if (tpo_td043->powered_on)
277 return 0; 278 return 0;
278 279
279 regulator_enable(tpo_td043->vcc_reg); 280 r = regulator_enable(tpo_td043->vcc_reg);
281 if (r != 0)
282 return r;
280 283
281 /* wait for regulator to stabilize */ 284 /* wait for panel to stabilize */
282 msleep(160); 285 msleep(160);
283 286
284 if (gpio_is_valid(nreset_gpio)) 287 if (gpio_is_valid(nreset_gpio))
@@ -470,6 +473,18 @@ static void tpo_td043_remove(struct omap_dss_device *dssdev)
470 gpio_free(nreset_gpio); 473 gpio_free(nreset_gpio);
471} 474}
472 475
476static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
477 struct omap_video_timings *timings)
478{
479 dpi_set_timings(dssdev, timings);
480}
481
482static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
483 struct omap_video_timings *timings)
484{
485 return dpi_check_timings(dssdev, timings);
486}
487
473static struct omap_dss_driver tpo_td043_driver = { 488static struct omap_dss_driver tpo_td043_driver = {
474 .probe = tpo_td043_probe, 489 .probe = tpo_td043_probe,
475 .remove = tpo_td043_remove, 490 .remove = tpo_td043_remove,
@@ -481,6 +496,9 @@ static struct omap_dss_driver tpo_td043_driver = {
481 .set_mirror = tpo_td043_set_hmirror, 496 .set_mirror = tpo_td043_set_hmirror,
482 .get_mirror = tpo_td043_get_hmirror, 497 .get_mirror = tpo_td043_get_hmirror,
483 498
499 .set_timings = tpo_td043_set_timings,
500 .check_timings = tpo_td043_check_timings,
501
484 .driver = { 502 .driver = {
485 .name = "tpo_td043mtea1_panel", 503 .name = "tpo_td043mtea1_panel",
486 .owner = THIS_MODULE, 504 .owner = THIS_MODULE,
diff --git a/drivers/video/omap2/dss/Kconfig b/drivers/video/omap2/dss/Kconfig
index 7be7c06a249e..43324e5ed25f 100644
--- a/drivers/video/omap2/dss/Kconfig
+++ b/drivers/video/omap2/dss/Kconfig
@@ -68,6 +68,10 @@ config OMAP4_DSS_HDMI
68 HDMI Interface. This adds the High Definition Multimedia Interface. 68 HDMI Interface. This adds the High Definition Multimedia Interface.
69 See http://www.hdmi.org/ for HDMI specification. 69 See http://www.hdmi.org/ for HDMI specification.
70 70
71config OMAP4_DSS_HDMI_AUDIO
72 bool
73 depends on OMAP4_DSS_HDMI
74
71config OMAP2_DSS_SDI 75config OMAP2_DSS_SDI
72 bool "SDI support" 76 bool "SDI support"
73 depends on ARCH_OMAP3 77 depends on ARCH_OMAP3
@@ -90,15 +94,6 @@ config OMAP2_DSS_DSI
90 94
91 See http://www.mipi.org/ for DSI spesifications. 95 See http://www.mipi.org/ for DSI spesifications.
92 96
93config OMAP2_DSS_FAKE_VSYNC
94 bool "Fake VSYNC irq from manual update displays"
95 default n
96 help
97 If this is selected, DSI will generate a fake DISPC VSYNC interrupt
98 when DSI has sent a frame. This is only needed with DSI or RFBI
99 displays using manual mode, and you want VSYNC to, for example,
100 time animation.
101
102config OMAP2_DSS_MIN_FCK_PER_PCK 97config OMAP2_DSS_MIN_FCK_PER_PCK
103 int "Minimum FCK/PCK ratio (for scaling)" 98 int "Minimum FCK/PCK ratio (for scaling)"
104 range 0 32 99 range 0 32
diff --git a/drivers/video/omap2/dss/apply.c b/drivers/video/omap2/dss/apply.c
index b10b3bc1931e..ab22cc224f3e 100644
--- a/drivers/video/omap2/dss/apply.c
+++ b/drivers/video/omap2/dss/apply.c
@@ -99,6 +99,11 @@ struct mgr_priv_data {
99 99
100 /* If true, a display is enabled using this manager */ 100 /* If true, a display is enabled using this manager */
101 bool enabled; 101 bool enabled;
102
103 bool extra_info_dirty;
104 bool shadow_extra_info_dirty;
105
106 struct omap_video_timings timings;
102}; 107};
103 108
104static struct { 109static struct {
@@ -176,7 +181,7 @@ static bool mgr_manual_update(struct omap_overlay_manager *mgr)
176} 181}
177 182
178static int dss_check_settings_low(struct omap_overlay_manager *mgr, 183static int dss_check_settings_low(struct omap_overlay_manager *mgr,
179 struct omap_dss_device *dssdev, bool applying) 184 bool applying)
180{ 185{
181 struct omap_overlay_info *oi; 186 struct omap_overlay_info *oi;
182 struct omap_overlay_manager_info *mi; 187 struct omap_overlay_manager_info *mi;
@@ -187,6 +192,9 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
187 192
188 mp = get_mgr_priv(mgr); 193 mp = get_mgr_priv(mgr);
189 194
195 if (!mp->enabled)
196 return 0;
197
190 if (applying && mp->user_info_dirty) 198 if (applying && mp->user_info_dirty)
191 mi = &mp->user_info; 199 mi = &mp->user_info;
192 else 200 else
@@ -206,26 +214,24 @@ static int dss_check_settings_low(struct omap_overlay_manager *mgr,
206 ois[ovl->id] = oi; 214 ois[ovl->id] = oi;
207 } 215 }
208 216
209 return dss_mgr_check(mgr, dssdev, mi, ois); 217 return dss_mgr_check(mgr, mi, &mp->timings, ois);
210} 218}
211 219
212/* 220/*
213 * check manager and overlay settings using overlay_info from data->info 221 * check manager and overlay settings using overlay_info from data->info
214 */ 222 */
215static int dss_check_settings(struct omap_overlay_manager *mgr, 223static int dss_check_settings(struct omap_overlay_manager *mgr)
216 struct omap_dss_device *dssdev)
217{ 224{
218 return dss_check_settings_low(mgr, dssdev, false); 225 return dss_check_settings_low(mgr, false);
219} 226}
220 227
221/* 228/*
222 * check manager and overlay settings using overlay_info from ovl->info if 229 * check manager and overlay settings using overlay_info from ovl->info if
223 * dirty and from data->info otherwise 230 * dirty and from data->info otherwise
224 */ 231 */
225static int dss_check_settings_apply(struct omap_overlay_manager *mgr, 232static int dss_check_settings_apply(struct omap_overlay_manager *mgr)
226 struct omap_dss_device *dssdev)
227{ 233{
228 return dss_check_settings_low(mgr, dssdev, true); 234 return dss_check_settings_low(mgr, true);
229} 235}
230 236
231static bool need_isr(void) 237static bool need_isr(void)
@@ -261,6 +267,20 @@ static bool need_isr(void)
261 if (mp->shadow_info_dirty) 267 if (mp->shadow_info_dirty)
262 return true; 268 return true;
263 269
270 /*
271 * NOTE: we don't check extra_info flags for disabled
272 * managers, once the manager is enabled, the extra_info
273 * related manager changes will be taken in by HW.
274 */
275
276 /* to write new values to registers */
277 if (mp->extra_info_dirty)
278 return true;
279
280 /* to set GO bit */
281 if (mp->shadow_extra_info_dirty)
282 return true;
283
264 list_for_each_entry(ovl, &mgr->overlays, list) { 284 list_for_each_entry(ovl, &mgr->overlays, list) {
265 struct ovl_priv_data *op; 285 struct ovl_priv_data *op;
266 286
@@ -305,7 +325,7 @@ static bool need_go(struct omap_overlay_manager *mgr)
305 325
306 mp = get_mgr_priv(mgr); 326 mp = get_mgr_priv(mgr);
307 327
308 if (mp->shadow_info_dirty) 328 if (mp->shadow_info_dirty || mp->shadow_extra_info_dirty)
309 return true; 329 return true;
310 330
311 list_for_each_entry(ovl, &mgr->overlays, list) { 331 list_for_each_entry(ovl, &mgr->overlays, list) {
@@ -320,20 +340,16 @@ static bool need_go(struct omap_overlay_manager *mgr)
320/* returns true if an extra_info field is currently being updated */ 340/* returns true if an extra_info field is currently being updated */
321static bool extra_info_update_ongoing(void) 341static bool extra_info_update_ongoing(void)
322{ 342{
323 const int num_ovls = omap_dss_get_num_overlays(); 343 const int num_mgrs = dss_feat_get_num_mgrs();
324 struct ovl_priv_data *op;
325 struct omap_overlay *ovl;
326 struct mgr_priv_data *mp;
327 int i; 344 int i;
328 345
329 for (i = 0; i < num_ovls; ++i) { 346 for (i = 0; i < num_mgrs; ++i) {
330 ovl = omap_dss_get_overlay(i); 347 struct omap_overlay_manager *mgr;
331 op = get_ovl_priv(ovl); 348 struct omap_overlay *ovl;
332 349 struct mgr_priv_data *mp;
333 if (!ovl->manager)
334 continue;
335 350
336 mp = get_mgr_priv(ovl->manager); 351 mgr = omap_dss_get_overlay_manager(i);
352 mp = get_mgr_priv(mgr);
337 353
338 if (!mp->enabled) 354 if (!mp->enabled)
339 continue; 355 continue;
@@ -341,8 +357,15 @@ static bool extra_info_update_ongoing(void)
341 if (!mp->updating) 357 if (!mp->updating)
342 continue; 358 continue;
343 359
344 if (op->extra_info_dirty || op->shadow_extra_info_dirty) 360 if (mp->extra_info_dirty || mp->shadow_extra_info_dirty)
345 return true; 361 return true;
362
363 list_for_each_entry(ovl, &mgr->overlays, list) {
364 struct ovl_priv_data *op = get_ovl_priv(ovl);
365
366 if (op->extra_info_dirty || op->shadow_extra_info_dirty)
367 return true;
368 }
346 } 369 }
347 370
348 return false; 371 return false;
@@ -525,11 +548,13 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
525 548
526 oi = &op->info; 549 oi = &op->info;
527 550
551 mp = get_mgr_priv(ovl->manager);
552
528 replication = dss_use_replication(ovl->manager->device, oi->color_mode); 553 replication = dss_use_replication(ovl->manager->device, oi->color_mode);
529 554
530 ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC; 555 ilace = ovl->manager->device->type == OMAP_DISPLAY_TYPE_VENC;
531 556
532 r = dispc_ovl_setup(ovl->id, oi, ilace, replication); 557 r = dispc_ovl_setup(ovl->id, oi, ilace, replication, &mp->timings);
533 if (r) { 558 if (r) {
534 /* 559 /*
535 * We can't do much here, as this function can be called from 560 * We can't do much here, as this function can be called from
@@ -543,8 +568,6 @@ static void dss_ovl_write_regs(struct omap_overlay *ovl)
543 return; 568 return;
544 } 569 }
545 570
546 mp = get_mgr_priv(ovl->manager);
547
548 op->info_dirty = false; 571 op->info_dirty = false;
549 if (mp->updating) 572 if (mp->updating)
550 op->shadow_info_dirty = true; 573 op->shadow_info_dirty = true;
@@ -601,6 +624,22 @@ static void dss_mgr_write_regs(struct omap_overlay_manager *mgr)
601 } 624 }
602} 625}
603 626
627static void dss_mgr_write_regs_extra(struct omap_overlay_manager *mgr)
628{
629 struct mgr_priv_data *mp = get_mgr_priv(mgr);
630
631 DSSDBGF("%d", mgr->id);
632
633 if (!mp->extra_info_dirty)
634 return;
635
636 dispc_mgr_set_timings(mgr->id, &mp->timings);
637
638 mp->extra_info_dirty = false;
639 if (mp->updating)
640 mp->shadow_extra_info_dirty = true;
641}
642
604static void dss_write_regs_common(void) 643static void dss_write_regs_common(void)
605{ 644{
606 const int num_mgrs = omap_dss_get_num_overlay_managers(); 645 const int num_mgrs = omap_dss_get_num_overlay_managers();
@@ -646,7 +685,7 @@ static void dss_write_regs(void)
646 if (!mp->enabled || mgr_manual_update(mgr) || mp->busy) 685 if (!mp->enabled || mgr_manual_update(mgr) || mp->busy)
647 continue; 686 continue;
648 687
649 r = dss_check_settings(mgr, mgr->device); 688 r = dss_check_settings(mgr);
650 if (r) { 689 if (r) {
651 DSSERR("cannot write registers for manager %s: " 690 DSSERR("cannot write registers for manager %s: "
652 "illegal configuration\n", mgr->name); 691 "illegal configuration\n", mgr->name);
@@ -654,6 +693,7 @@ static void dss_write_regs(void)
654 } 693 }
655 694
656 dss_mgr_write_regs(mgr); 695 dss_mgr_write_regs(mgr);
696 dss_mgr_write_regs_extra(mgr);
657 } 697 }
658} 698}
659 699
@@ -693,6 +733,7 @@ static void mgr_clear_shadow_dirty(struct omap_overlay_manager *mgr)
693 733
694 mp = get_mgr_priv(mgr); 734 mp = get_mgr_priv(mgr);
695 mp->shadow_info_dirty = false; 735 mp->shadow_info_dirty = false;
736 mp->shadow_extra_info_dirty = false;
696 737
697 list_for_each_entry(ovl, &mgr->overlays, list) { 738 list_for_each_entry(ovl, &mgr->overlays, list) {
698 op = get_ovl_priv(ovl); 739 op = get_ovl_priv(ovl);
@@ -711,7 +752,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
711 752
712 WARN_ON(mp->updating); 753 WARN_ON(mp->updating);
713 754
714 r = dss_check_settings(mgr, mgr->device); 755 r = dss_check_settings(mgr);
715 if (r) { 756 if (r) {
716 DSSERR("cannot start manual update: illegal configuration\n"); 757 DSSERR("cannot start manual update: illegal configuration\n");
717 spin_unlock_irqrestore(&data_lock, flags); 758 spin_unlock_irqrestore(&data_lock, flags);
@@ -719,6 +760,7 @@ void dss_mgr_start_update(struct omap_overlay_manager *mgr)
719 } 760 }
720 761
721 dss_mgr_write_regs(mgr); 762 dss_mgr_write_regs(mgr);
763 dss_mgr_write_regs_extra(mgr);
722 764
723 dss_write_regs_common(); 765 dss_write_regs_common();
724 766
@@ -857,7 +899,7 @@ int omap_dss_mgr_apply(struct omap_overlay_manager *mgr)
857 899
858 spin_lock_irqsave(&data_lock, flags); 900 spin_lock_irqsave(&data_lock, flags);
859 901
860 r = dss_check_settings_apply(mgr, mgr->device); 902 r = dss_check_settings_apply(mgr);
861 if (r) { 903 if (r) {
862 spin_unlock_irqrestore(&data_lock, flags); 904 spin_unlock_irqrestore(&data_lock, flags);
863 DSSERR("failed to apply settings: illegal configuration.\n"); 905 DSSERR("failed to apply settings: illegal configuration.\n");
@@ -918,16 +960,13 @@ static void dss_ovl_setup_fifo(struct omap_overlay *ovl,
918 bool use_fifo_merge) 960 bool use_fifo_merge)
919{ 961{
920 struct ovl_priv_data *op = get_ovl_priv(ovl); 962 struct ovl_priv_data *op = get_ovl_priv(ovl);
921 struct omap_dss_device *dssdev;
922 u32 fifo_low, fifo_high; 963 u32 fifo_low, fifo_high;
923 964
924 if (!op->enabled && !op->enabling) 965 if (!op->enabled && !op->enabling)
925 return; 966 return;
926 967
927 dssdev = ovl->manager->device;
928
929 dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high, 968 dispc_ovl_compute_fifo_thresholds(ovl->id, &fifo_low, &fifo_high,
930 use_fifo_merge); 969 use_fifo_merge, ovl_manual_update(ovl));
931 970
932 dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high); 971 dss_apply_ovl_fifo_thresholds(ovl, fifo_low, fifo_high);
933} 972}
@@ -1050,7 +1089,7 @@ int dss_mgr_enable(struct omap_overlay_manager *mgr)
1050 1089
1051 mp->enabled = true; 1090 mp->enabled = true;
1052 1091
1053 r = dss_check_settings(mgr, mgr->device); 1092 r = dss_check_settings(mgr);
1054 if (r) { 1093 if (r) {
1055 DSSERR("failed to enable manager %d: check_settings failed\n", 1094 DSSERR("failed to enable manager %d: check_settings failed\n",
1056 mgr->id); 1095 mgr->id);
@@ -1225,6 +1264,35 @@ err:
1225 return r; 1264 return r;
1226} 1265}
1227 1266
1267static void dss_apply_mgr_timings(struct omap_overlay_manager *mgr,
1268 struct omap_video_timings *timings)
1269{
1270 struct mgr_priv_data *mp = get_mgr_priv(mgr);
1271
1272 mp->timings = *timings;
1273 mp->extra_info_dirty = true;
1274}
1275
1276void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
1277 struct omap_video_timings *timings)
1278{
1279 unsigned long flags;
1280
1281 mutex_lock(&apply_lock);
1282
1283 spin_lock_irqsave(&data_lock, flags);
1284
1285 dss_apply_mgr_timings(mgr, timings);
1286
1287 dss_write_regs();
1288 dss_set_go_bits();
1289
1290 spin_unlock_irqrestore(&data_lock, flags);
1291
1292 wait_pending_extra_info_updates();
1293
1294 mutex_unlock(&apply_lock);
1295}
1228 1296
1229int dss_ovl_set_info(struct omap_overlay *ovl, 1297int dss_ovl_set_info(struct omap_overlay *ovl,
1230 struct omap_overlay_info *info) 1298 struct omap_overlay_info *info)
@@ -1393,7 +1461,7 @@ int dss_ovl_enable(struct omap_overlay *ovl)
1393 1461
1394 op->enabling = true; 1462 op->enabling = true;
1395 1463
1396 r = dss_check_settings(ovl->manager, ovl->manager->device); 1464 r = dss_check_settings(ovl->manager);
1397 if (r) { 1465 if (r) {
1398 DSSERR("failed to enable overlay %d: check_settings failed\n", 1466 DSSERR("failed to enable overlay %d: check_settings failed\n",
1399 ovl->id); 1467 ovl->id);
diff --git a/drivers/video/omap2/dss/core.c b/drivers/video/omap2/dss/core.c
index e8a120771ac6..5066eee10ccf 100644
--- a/drivers/video/omap2/dss/core.c
+++ b/drivers/video/omap2/dss/core.c
@@ -43,6 +43,8 @@ static struct {
43 43
44 struct regulator *vdds_dsi_reg; 44 struct regulator *vdds_dsi_reg;
45 struct regulator *vdds_sdi_reg; 45 struct regulator *vdds_sdi_reg;
46
47 const char *default_display_name;
46} core; 48} core;
47 49
48static char *def_disp_name; 50static char *def_disp_name;
@@ -54,9 +56,6 @@ bool dss_debug;
54module_param_named(debug, dss_debug, bool, 0644); 56module_param_named(debug, dss_debug, bool, 0644);
55#endif 57#endif
56 58
57static int omap_dss_register_device(struct omap_dss_device *);
58static void omap_dss_unregister_device(struct omap_dss_device *);
59
60/* REGULATORS */ 59/* REGULATORS */
61 60
62struct regulator *dss_get_vdds_dsi(void) 61struct regulator *dss_get_vdds_dsi(void)
@@ -87,6 +86,51 @@ struct regulator *dss_get_vdds_sdi(void)
87 return reg; 86 return reg;
88} 87}
89 88
89int dss_get_ctx_loss_count(struct device *dev)
90{
91 struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
92 int cnt;
93
94 if (!board_data->get_context_loss_count)
95 return -ENOENT;
96
97 cnt = board_data->get_context_loss_count(dev);
98
99 WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
100
101 return cnt;
102}
103
104int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask)
105{
106 struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
107
108 if (!board_data->dsi_enable_pads)
109 return -ENOENT;
110
111 return board_data->dsi_enable_pads(dsi_id, lane_mask);
112}
113
114void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask)
115{
116 struct omap_dss_board_info *board_data = core.pdev->dev.platform_data;
117
118 if (!board_data->dsi_enable_pads)
119 return;
120
121 return board_data->dsi_disable_pads(dsi_id, lane_mask);
122}
123
124int dss_set_min_bus_tput(struct device *dev, unsigned long tput)
125{
126 struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
127
128 if (pdata->set_min_bus_tput)
129 return pdata->set_min_bus_tput(dev, tput);
130 else
131 return 0;
132}
133
90#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) 134#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
91static int dss_debug_show(struct seq_file *s, void *unused) 135static int dss_debug_show(struct seq_file *s, void *unused)
92{ 136{
@@ -121,34 +165,6 @@ static int dss_initialize_debugfs(void)
121 debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir, 165 debugfs_create_file("clk", S_IRUGO, dss_debugfs_dir,
122 &dss_debug_dump_clocks, &dss_debug_fops); 166 &dss_debug_dump_clocks, &dss_debug_fops);
123 167
124#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
125 debugfs_create_file("dispc_irq", S_IRUGO, dss_debugfs_dir,
126 &dispc_dump_irqs, &dss_debug_fops);
127#endif
128
129#if defined(CONFIG_OMAP2_DSS_DSI) && defined(CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS)
130 dsi_create_debugfs_files_irq(dss_debugfs_dir, &dss_debug_fops);
131#endif
132
133 debugfs_create_file("dss", S_IRUGO, dss_debugfs_dir,
134 &dss_dump_regs, &dss_debug_fops);
135 debugfs_create_file("dispc", S_IRUGO, dss_debugfs_dir,
136 &dispc_dump_regs, &dss_debug_fops);
137#ifdef CONFIG_OMAP2_DSS_RFBI
138 debugfs_create_file("rfbi", S_IRUGO, dss_debugfs_dir,
139 &rfbi_dump_regs, &dss_debug_fops);
140#endif
141#ifdef CONFIG_OMAP2_DSS_DSI
142 dsi_create_debugfs_files_reg(dss_debugfs_dir, &dss_debug_fops);
143#endif
144#ifdef CONFIG_OMAP2_DSS_VENC
145 debugfs_create_file("venc", S_IRUGO, dss_debugfs_dir,
146 &venc_dump_regs, &dss_debug_fops);
147#endif
148#ifdef CONFIG_OMAP4_DSS_HDMI
149 debugfs_create_file("hdmi", S_IRUGO, dss_debugfs_dir,
150 &hdmi_dump_regs, &dss_debug_fops);
151#endif
152 return 0; 168 return 0;
153} 169}
154 170
@@ -157,6 +173,19 @@ static void dss_uninitialize_debugfs(void)
157 if (dss_debugfs_dir) 173 if (dss_debugfs_dir)
158 debugfs_remove_recursive(dss_debugfs_dir); 174 debugfs_remove_recursive(dss_debugfs_dir);
159} 175}
176
177int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
178{
179 struct dentry *d;
180
181 d = debugfs_create_file(name, S_IRUGO, dss_debugfs_dir,
182 write, &dss_debug_fops);
183
184 if (IS_ERR(d))
185 return PTR_ERR(d);
186
187 return 0;
188}
160#else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */ 189#else /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
161static inline int dss_initialize_debugfs(void) 190static inline int dss_initialize_debugfs(void)
162{ 191{
@@ -165,14 +194,17 @@ static inline int dss_initialize_debugfs(void)
165static inline void dss_uninitialize_debugfs(void) 194static inline void dss_uninitialize_debugfs(void)
166{ 195{
167} 196}
197int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *))
198{
199 return 0;
200}
168#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */ 201#endif /* CONFIG_DEBUG_FS && CONFIG_OMAP2_DSS_DEBUG_SUPPORT */
169 202
170/* PLATFORM DEVICE */ 203/* PLATFORM DEVICE */
171static int omap_dss_probe(struct platform_device *pdev) 204static int __init omap_dss_probe(struct platform_device *pdev)
172{ 205{
173 struct omap_dss_board_info *pdata = pdev->dev.platform_data; 206 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
174 int r; 207 int r;
175 int i;
176 208
177 core.pdev = pdev; 209 core.pdev = pdev;
178 210
@@ -187,28 +219,13 @@ static int omap_dss_probe(struct platform_device *pdev)
187 if (r) 219 if (r)
188 goto err_debugfs; 220 goto err_debugfs;
189 221
190 for (i = 0; i < pdata->num_devices; ++i) { 222 if (def_disp_name)
191 struct omap_dss_device *dssdev = pdata->devices[i]; 223 core.default_display_name = def_disp_name;
192 224 else if (pdata->default_device)
193 r = omap_dss_register_device(dssdev); 225 core.default_display_name = pdata->default_device->name;
194 if (r) {
195 DSSERR("device %d %s register failed %d\n", i,
196 dssdev->name ?: "unnamed", r);
197
198 while (--i >= 0)
199 omap_dss_unregister_device(pdata->devices[i]);
200
201 goto err_register;
202 }
203
204 if (def_disp_name && strcmp(def_disp_name, dssdev->name) == 0)
205 pdata->default_device = dssdev;
206 }
207 226
208 return 0; 227 return 0;
209 228
210err_register:
211 dss_uninitialize_debugfs();
212err_debugfs: 229err_debugfs:
213 230
214 return r; 231 return r;
@@ -216,17 +233,11 @@ err_debugfs:
216 233
217static int omap_dss_remove(struct platform_device *pdev) 234static int omap_dss_remove(struct platform_device *pdev)
218{ 235{
219 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
220 int i;
221
222 dss_uninitialize_debugfs(); 236 dss_uninitialize_debugfs();
223 237
224 dss_uninit_overlays(pdev); 238 dss_uninit_overlays(pdev);
225 dss_uninit_overlay_managers(pdev); 239 dss_uninit_overlay_managers(pdev);
226 240
227 for (i = 0; i < pdata->num_devices; ++i)
228 omap_dss_unregister_device(pdata->devices[i]);
229
230 return 0; 241 return 0;
231} 242}
232 243
@@ -251,7 +262,6 @@ static int omap_dss_resume(struct platform_device *pdev)
251} 262}
252 263
253static struct platform_driver omap_dss_driver = { 264static struct platform_driver omap_dss_driver = {
254 .probe = omap_dss_probe,
255 .remove = omap_dss_remove, 265 .remove = omap_dss_remove,
256 .shutdown = omap_dss_shutdown, 266 .shutdown = omap_dss_shutdown,
257 .suspend = omap_dss_suspend, 267 .suspend = omap_dss_suspend,
@@ -326,7 +336,6 @@ static int dss_driver_probe(struct device *dev)
326 int r; 336 int r;
327 struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver); 337 struct omap_dss_driver *dssdrv = to_dss_driver(dev->driver);
328 struct omap_dss_device *dssdev = to_dss_device(dev); 338 struct omap_dss_device *dssdev = to_dss_device(dev);
329 struct omap_dss_board_info *pdata = core.pdev->dev.platform_data;
330 bool force; 339 bool force;
331 340
332 DSSDBG("driver_probe: dev %s/%s, drv %s\n", 341 DSSDBG("driver_probe: dev %s/%s, drv %s\n",
@@ -335,7 +344,8 @@ static int dss_driver_probe(struct device *dev)
335 344
336 dss_init_device(core.pdev, dssdev); 345 dss_init_device(core.pdev, dssdev);
337 346
338 force = pdata->default_device == dssdev; 347 force = core.default_display_name &&
348 strcmp(core.default_display_name, dssdev->name) == 0;
339 dss_recheck_connections(dssdev, force); 349 dss_recheck_connections(dssdev, force);
340 350
341 r = dssdrv->probe(dssdev); 351 r = dssdrv->probe(dssdev);
@@ -381,6 +391,8 @@ int omap_dss_register_driver(struct omap_dss_driver *dssdriver)
381 if (dssdriver->get_recommended_bpp == NULL) 391 if (dssdriver->get_recommended_bpp == NULL)
382 dssdriver->get_recommended_bpp = 392 dssdriver->get_recommended_bpp =
383 omapdss_default_get_recommended_bpp; 393 omapdss_default_get_recommended_bpp;
394 if (dssdriver->get_timings == NULL)
395 dssdriver->get_timings = omapdss_default_get_timings;
384 396
385 return driver_register(&dssdriver->driver); 397 return driver_register(&dssdriver->driver);
386} 398}
@@ -427,27 +439,38 @@ static void omap_dss_dev_release(struct device *dev)
427 reset_device(dev, 0); 439 reset_device(dev, 0);
428} 440}
429 441
430static int omap_dss_register_device(struct omap_dss_device *dssdev) 442int omap_dss_register_device(struct omap_dss_device *dssdev,
443 struct device *parent, int disp_num)
431{ 444{
432 static int dev_num;
433
434 WARN_ON(!dssdev->driver_name); 445 WARN_ON(!dssdev->driver_name);
435 446
436 reset_device(&dssdev->dev, 1); 447 reset_device(&dssdev->dev, 1);
437 dssdev->dev.bus = &dss_bus_type; 448 dssdev->dev.bus = &dss_bus_type;
438 dssdev->dev.parent = &dss_bus; 449 dssdev->dev.parent = parent;
439 dssdev->dev.release = omap_dss_dev_release; 450 dssdev->dev.release = omap_dss_dev_release;
440 dev_set_name(&dssdev->dev, "display%d", dev_num++); 451 dev_set_name(&dssdev->dev, "display%d", disp_num);
441 return device_register(&dssdev->dev); 452 return device_register(&dssdev->dev);
442} 453}
443 454
444static void omap_dss_unregister_device(struct omap_dss_device *dssdev) 455void omap_dss_unregister_device(struct omap_dss_device *dssdev)
445{ 456{
446 device_unregister(&dssdev->dev); 457 device_unregister(&dssdev->dev);
447} 458}
448 459
460static int dss_unregister_dss_dev(struct device *dev, void *data)
461{
462 struct omap_dss_device *dssdev = to_dss_device(dev);
463 omap_dss_unregister_device(dssdev);
464 return 0;
465}
466
467void omap_dss_unregister_child_devices(struct device *parent)
468{
469 device_for_each_child(parent, NULL, dss_unregister_dss_dev);
470}
471
449/* BUS */ 472/* BUS */
450static int omap_dss_bus_register(void) 473static int __init omap_dss_bus_register(void)
451{ 474{
452 int r; 475 int r;
453 476
@@ -469,12 +492,56 @@ static int omap_dss_bus_register(void)
469} 492}
470 493
471/* INIT */ 494/* INIT */
495static int (*dss_output_drv_reg_funcs[])(void) __initdata = {
496#ifdef CONFIG_OMAP2_DSS_DPI
497 dpi_init_platform_driver,
498#endif
499#ifdef CONFIG_OMAP2_DSS_SDI
500 sdi_init_platform_driver,
501#endif
502#ifdef CONFIG_OMAP2_DSS_RFBI
503 rfbi_init_platform_driver,
504#endif
505#ifdef CONFIG_OMAP2_DSS_VENC
506 venc_init_platform_driver,
507#endif
508#ifdef CONFIG_OMAP2_DSS_DSI
509 dsi_init_platform_driver,
510#endif
511#ifdef CONFIG_OMAP4_DSS_HDMI
512 hdmi_init_platform_driver,
513#endif
514};
515
516static void (*dss_output_drv_unreg_funcs[])(void) __exitdata = {
517#ifdef CONFIG_OMAP2_DSS_DPI
518 dpi_uninit_platform_driver,
519#endif
520#ifdef CONFIG_OMAP2_DSS_SDI
521 sdi_uninit_platform_driver,
522#endif
523#ifdef CONFIG_OMAP2_DSS_RFBI
524 rfbi_uninit_platform_driver,
525#endif
526#ifdef CONFIG_OMAP2_DSS_VENC
527 venc_uninit_platform_driver,
528#endif
529#ifdef CONFIG_OMAP2_DSS_DSI
530 dsi_uninit_platform_driver,
531#endif
532#ifdef CONFIG_OMAP4_DSS_HDMI
533 hdmi_uninit_platform_driver,
534#endif
535};
536
537static bool dss_output_drv_loaded[ARRAY_SIZE(dss_output_drv_reg_funcs)];
472 538
473static int __init omap_dss_register_drivers(void) 539static int __init omap_dss_register_drivers(void)
474{ 540{
475 int r; 541 int r;
542 int i;
476 543
477 r = platform_driver_register(&omap_dss_driver); 544 r = platform_driver_probe(&omap_dss_driver, omap_dss_probe);
478 if (r) 545 if (r)
479 return r; 546 return r;
480 547
@@ -490,40 +557,18 @@ static int __init omap_dss_register_drivers(void)
490 goto err_dispc; 557 goto err_dispc;
491 } 558 }
492 559
493 r = rfbi_init_platform_driver(); 560 /*
494 if (r) { 561 * It's ok if the output-driver register fails. It happens, for example,
495 DSSERR("Failed to initialize rfbi platform driver\n"); 562 * when there is no output-device (e.g. SDI for OMAP4).
496 goto err_rfbi; 563 */
497 } 564 for (i = 0; i < ARRAY_SIZE(dss_output_drv_reg_funcs); ++i) {
498 565 r = dss_output_drv_reg_funcs[i]();
499 r = venc_init_platform_driver(); 566 if (r == 0)
500 if (r) { 567 dss_output_drv_loaded[i] = true;
501 DSSERR("Failed to initialize venc platform driver\n");
502 goto err_venc;
503 }
504
505 r = dsi_init_platform_driver();
506 if (r) {
507 DSSERR("Failed to initialize DSI platform driver\n");
508 goto err_dsi;
509 }
510
511 r = hdmi_init_platform_driver();
512 if (r) {
513 DSSERR("Failed to initialize hdmi\n");
514 goto err_hdmi;
515 } 568 }
516 569
517 return 0; 570 return 0;
518 571
519err_hdmi:
520 dsi_uninit_platform_driver();
521err_dsi:
522 venc_uninit_platform_driver();
523err_venc:
524 rfbi_uninit_platform_driver();
525err_rfbi:
526 dispc_uninit_platform_driver();
527err_dispc: 572err_dispc:
528 dss_uninit_platform_driver(); 573 dss_uninit_platform_driver();
529err_dss: 574err_dss:
@@ -534,10 +579,13 @@ err_dss:
534 579
535static void __exit omap_dss_unregister_drivers(void) 580static void __exit omap_dss_unregister_drivers(void)
536{ 581{
537 hdmi_uninit_platform_driver(); 582 int i;
538 dsi_uninit_platform_driver(); 583
539 venc_uninit_platform_driver(); 584 for (i = 0; i < ARRAY_SIZE(dss_output_drv_unreg_funcs); ++i) {
540 rfbi_uninit_platform_driver(); 585 if (dss_output_drv_loaded[i])
586 dss_output_drv_unreg_funcs[i]();
587 }
588
541 dispc_uninit_platform_driver(); 589 dispc_uninit_platform_driver();
542 dss_uninit_platform_driver(); 590 dss_uninit_platform_driver();
543 591
diff --git a/drivers/video/omap2/dss/dispc.c b/drivers/video/omap2/dss/dispc.c
index ee30937482e1..4749ac356469 100644
--- a/drivers/video/omap2/dss/dispc.c
+++ b/drivers/video/omap2/dss/dispc.c
@@ -131,23 +131,6 @@ static inline u32 dispc_read_reg(const u16 idx)
131 return __raw_readl(dispc.base + idx); 131 return __raw_readl(dispc.base + idx);
132} 132}
133 133
134static int dispc_get_ctx_loss_count(void)
135{
136 struct device *dev = &dispc.pdev->dev;
137 struct omap_display_platform_data *pdata = dev->platform_data;
138 struct omap_dss_board_info *board_data = pdata->board_data;
139 int cnt;
140
141 if (!board_data->get_context_loss_count)
142 return -ENOENT;
143
144 cnt = board_data->get_context_loss_count(dev);
145
146 WARN_ONCE(cnt < 0, "get_context_loss_count failed: %d\n", cnt);
147
148 return cnt;
149}
150
151#define SR(reg) \ 134#define SR(reg) \
152 dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg) 135 dispc.ctx[DISPC_##reg / sizeof(u32)] = dispc_read_reg(DISPC_##reg)
153#define RR(reg) \ 136#define RR(reg) \
@@ -251,7 +234,7 @@ static void dispc_save_context(void)
251 if (dss_has_feature(FEAT_CORE_CLK_DIV)) 234 if (dss_has_feature(FEAT_CORE_CLK_DIV))
252 SR(DIVISOR); 235 SR(DIVISOR);
253 236
254 dispc.ctx_loss_cnt = dispc_get_ctx_loss_count(); 237 dispc.ctx_loss_cnt = dss_get_ctx_loss_count(&dispc.pdev->dev);
255 dispc.ctx_valid = true; 238 dispc.ctx_valid = true;
256 239
257 DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt); 240 DSSDBG("context saved, ctx_loss_count %d\n", dispc.ctx_loss_cnt);
@@ -266,7 +249,7 @@ static void dispc_restore_context(void)
266 if (!dispc.ctx_valid) 249 if (!dispc.ctx_valid)
267 return; 250 return;
268 251
269 ctx = dispc_get_ctx_loss_count(); 252 ctx = dss_get_ctx_loss_count(&dispc.pdev->dev);
270 253
271 if (ctx >= 0 && ctx == dispc.ctx_loss_cnt) 254 if (ctx >= 0 && ctx == dispc.ctx_loss_cnt)
272 return; 255 return;
@@ -413,14 +396,6 @@ static inline bool dispc_mgr_is_lcd(enum omap_channel channel)
413 return false; 396 return false;
414} 397}
415 398
416static struct omap_dss_device *dispc_mgr_get_device(enum omap_channel channel)
417{
418 struct omap_overlay_manager *mgr =
419 omap_dss_get_overlay_manager(channel);
420
421 return mgr ? mgr->device : NULL;
422}
423
424u32 dispc_mgr_get_vsync_irq(enum omap_channel channel) 399u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
425{ 400{
426 switch (channel) { 401 switch (channel) {
@@ -432,6 +407,7 @@ u32 dispc_mgr_get_vsync_irq(enum omap_channel channel)
432 return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN; 407 return DISPC_IRQ_EVSYNC_ODD | DISPC_IRQ_EVSYNC_EVEN;
433 default: 408 default:
434 BUG(); 409 BUG();
410 return 0;
435 } 411 }
436} 412}
437 413
@@ -446,6 +422,7 @@ u32 dispc_mgr_get_framedone_irq(enum omap_channel channel)
446 return 0; 422 return 0;
447 default: 423 default:
448 BUG(); 424 BUG();
425 return 0;
449 } 426 }
450} 427}
451 428
@@ -764,7 +741,7 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
764 case OMAP_DSS_COLOR_XRGB16_1555: 741 case OMAP_DSS_COLOR_XRGB16_1555:
765 m = 0xf; break; 742 m = 0xf; break;
766 default: 743 default:
767 BUG(); break; 744 BUG(); return;
768 } 745 }
769 } else { 746 } else {
770 switch (color_mode) { 747 switch (color_mode) {
@@ -801,13 +778,25 @@ static void dispc_ovl_set_color_mode(enum omap_plane plane,
801 case OMAP_DSS_COLOR_XRGB16_1555: 778 case OMAP_DSS_COLOR_XRGB16_1555:
802 m = 0xf; break; 779 m = 0xf; break;
803 default: 780 default:
804 BUG(); break; 781 BUG(); return;
805 } 782 }
806 } 783 }
807 784
808 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1); 785 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), m, 4, 1);
809} 786}
810 787
788static void dispc_ovl_configure_burst_type(enum omap_plane plane,
789 enum omap_dss_rotation_type rotation_type)
790{
791 if (dss_has_feature(FEAT_BURST_2D) == 0)
792 return;
793
794 if (rotation_type == OMAP_DSS_ROT_TILER)
795 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 1, 29, 29);
796 else
797 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), 0, 29, 29);
798}
799
811void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel) 800void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
812{ 801{
813 int shift; 802 int shift;
@@ -845,6 +834,7 @@ void dispc_ovl_set_channel_out(enum omap_plane plane, enum omap_channel channel)
845 break; 834 break;
846 default: 835 default:
847 BUG(); 836 BUG();
837 return;
848 } 838 }
849 839
850 val = FLD_MOD(val, chan, shift, shift); 840 val = FLD_MOD(val, chan, shift, shift);
@@ -872,6 +862,7 @@ static enum omap_channel dispc_ovl_get_channel_out(enum omap_plane plane)
872 break; 862 break;
873 default: 863 default:
874 BUG(); 864 BUG();
865 return 0;
875 } 866 }
876 867
877 val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane)); 868 val = dispc_read_reg(DISPC_OVL_ATTRIBUTES(plane));
@@ -983,20 +974,13 @@ static void dispc_ovl_enable_replication(enum omap_plane plane, bool enable)
983 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift); 974 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), enable, shift, shift);
984} 975}
985 976
986void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height) 977static void dispc_mgr_set_size(enum omap_channel channel, u16 width,
978 u16 height)
987{ 979{
988 u32 val; 980 u32 val;
989 BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
990 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
991 dispc_write_reg(DISPC_SIZE_MGR(channel), val);
992}
993 981
994void dispc_set_digit_size(u16 width, u16 height)
995{
996 u32 val;
997 BUG_ON((width > (1 << 11)) || (height > (1 << 11)));
998 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0); 982 val = FLD_VAL(height - 1, 26, 16) | FLD_VAL(width - 1, 10, 0);
999 dispc_write_reg(DISPC_SIZE_MGR(OMAP_DSS_CHANNEL_DIGIT), val); 983 dispc_write_reg(DISPC_SIZE_MGR(channel), val);
1000} 984}
1001 985
1002static void dispc_read_plane_fifo_sizes(void) 986static void dispc_read_plane_fifo_sizes(void)
@@ -1063,7 +1047,8 @@ void dispc_enable_fifomerge(bool enable)
1063} 1047}
1064 1048
1065void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, 1049void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
1066 u32 *fifo_low, u32 *fifo_high, bool use_fifomerge) 1050 u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
1051 bool manual_update)
1067{ 1052{
1068 /* 1053 /*
1069 * All sizes are in bytes. Both the buffer and burst are made of 1054 * All sizes are in bytes. Both the buffer and burst are made of
@@ -1091,7 +1076,7 @@ void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
1091 * combined fifo size 1076 * combined fifo size
1092 */ 1077 */
1093 1078
1094 if (dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) { 1079 if (manual_update && dss_has_feature(FEAT_OMAP3_DSI_FIFO_BUG)) {
1095 *fifo_low = ovl_fifo_size - burst_size * 2; 1080 *fifo_low = ovl_fifo_size - burst_size * 2;
1096 *fifo_high = total_fifo_size - burst_size; 1081 *fifo_high = total_fifo_size - burst_size;
1097 } else { 1082 } else {
@@ -1185,6 +1170,94 @@ static void dispc_ovl_set_scale_param(enum omap_plane plane,
1185 dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp); 1170 dispc_ovl_set_fir(plane, fir_hinc, fir_vinc, color_comp);
1186} 1171}
1187 1172
1173static void dispc_ovl_set_accu_uv(enum omap_plane plane,
1174 u16 orig_width, u16 orig_height, u16 out_width, u16 out_height,
1175 bool ilace, enum omap_color_mode color_mode, u8 rotation)
1176{
1177 int h_accu2_0, h_accu2_1;
1178 int v_accu2_0, v_accu2_1;
1179 int chroma_hinc, chroma_vinc;
1180 int idx;
1181
1182 struct accu {
1183 s8 h0_m, h0_n;
1184 s8 h1_m, h1_n;
1185 s8 v0_m, v0_n;
1186 s8 v1_m, v1_n;
1187 };
1188
1189 const struct accu *accu_table;
1190 const struct accu *accu_val;
1191
1192 static const struct accu accu_nv12[4] = {
1193 { 0, 1, 0, 1 , -1, 2, 0, 1 },
1194 { 1, 2, -3, 4 , 0, 1, 0, 1 },
1195 { -1, 1, 0, 1 , -1, 2, 0, 1 },
1196 { -1, 2, -1, 2 , -1, 1, 0, 1 },
1197 };
1198
1199 static const struct accu accu_nv12_ilace[4] = {
1200 { 0, 1, 0, 1 , -3, 4, -1, 4 },
1201 { -1, 4, -3, 4 , 0, 1, 0, 1 },
1202 { -1, 1, 0, 1 , -1, 4, -3, 4 },
1203 { -3, 4, -3, 4 , -1, 1, 0, 1 },
1204 };
1205
1206 static const struct accu accu_yuv[4] = {
1207 { 0, 1, 0, 1, 0, 1, 0, 1 },
1208 { 0, 1, 0, 1, 0, 1, 0, 1 },
1209 { -1, 1, 0, 1, 0, 1, 0, 1 },
1210 { 0, 1, 0, 1, -1, 1, 0, 1 },
1211 };
1212
1213 switch (rotation) {
1214 case OMAP_DSS_ROT_0:
1215 idx = 0;
1216 break;
1217 case OMAP_DSS_ROT_90:
1218 idx = 1;
1219 break;
1220 case OMAP_DSS_ROT_180:
1221 idx = 2;
1222 break;
1223 case OMAP_DSS_ROT_270:
1224 idx = 3;
1225 break;
1226 default:
1227 BUG();
1228 return;
1229 }
1230
1231 switch (color_mode) {
1232 case OMAP_DSS_COLOR_NV12:
1233 if (ilace)
1234 accu_table = accu_nv12_ilace;
1235 else
1236 accu_table = accu_nv12;
1237 break;
1238 case OMAP_DSS_COLOR_YUV2:
1239 case OMAP_DSS_COLOR_UYVY:
1240 accu_table = accu_yuv;
1241 break;
1242 default:
1243 BUG();
1244 return;
1245 }
1246
1247 accu_val = &accu_table[idx];
1248
1249 chroma_hinc = 1024 * orig_width / out_width;
1250 chroma_vinc = 1024 * orig_height / out_height;
1251
1252 h_accu2_0 = (accu_val->h0_m * chroma_hinc / accu_val->h0_n) % 1024;
1253 h_accu2_1 = (accu_val->h1_m * chroma_hinc / accu_val->h1_n) % 1024;
1254 v_accu2_0 = (accu_val->v0_m * chroma_vinc / accu_val->v0_n) % 1024;
1255 v_accu2_1 = (accu_val->v1_m * chroma_vinc / accu_val->v1_n) % 1024;
1256
1257 dispc_ovl_set_vid_accu2_0(plane, h_accu2_0, v_accu2_0);
1258 dispc_ovl_set_vid_accu2_1(plane, h_accu2_1, v_accu2_1);
1259}
1260
1188static void dispc_ovl_set_scaling_common(enum omap_plane plane, 1261static void dispc_ovl_set_scaling_common(enum omap_plane plane,
1189 u16 orig_width, u16 orig_height, 1262 u16 orig_width, u16 orig_height,
1190 u16 out_width, u16 out_height, 1263 u16 out_width, u16 out_height,
@@ -1258,6 +1331,10 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
1258 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8); 1331 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES2(plane), 0, 8, 8);
1259 return; 1332 return;
1260 } 1333 }
1334
1335 dispc_ovl_set_accu_uv(plane, orig_width, orig_height, out_width,
1336 out_height, ilace, color_mode, rotation);
1337
1261 switch (color_mode) { 1338 switch (color_mode) {
1262 case OMAP_DSS_COLOR_NV12: 1339 case OMAP_DSS_COLOR_NV12:
1263 /* UV is subsampled by 2 vertically*/ 1340 /* UV is subsampled by 2 vertically*/
@@ -1280,6 +1357,7 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
1280 break; 1357 break;
1281 default: 1358 default:
1282 BUG(); 1359 BUG();
1360 return;
1283 } 1361 }
1284 1362
1285 if (out_width != orig_width) 1363 if (out_width != orig_width)
@@ -1297,9 +1375,6 @@ static void dispc_ovl_set_scaling_uv(enum omap_plane plane,
1297 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5); 1375 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_x ? 1 : 0, 5, 5);
1298 /* set V scaling */ 1376 /* set V scaling */
1299 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6); 1377 REG_FLD_MOD(DISPC_OVL_ATTRIBUTES(plane), scale_y ? 1 : 0, 6, 6);
1300
1301 dispc_ovl_set_vid_accu2_0(plane, 0x80, 0);
1302 dispc_ovl_set_vid_accu2_1(plane, 0x80, 0);
1303} 1378}
1304 1379
1305static void dispc_ovl_set_scaling(enum omap_plane plane, 1380static void dispc_ovl_set_scaling(enum omap_plane plane,
@@ -1410,6 +1485,7 @@ static int color_mode_to_bpp(enum omap_color_mode color_mode)
1410 return 32; 1485 return 32;
1411 default: 1486 default:
1412 BUG(); 1487 BUG();
1488 return 0;
1413 } 1489 }
1414} 1490}
1415 1491
@@ -1423,6 +1499,7 @@ static s32 pixinc(int pixels, u8 ps)
1423 return 1 - (-pixels + 1) * ps; 1499 return 1 - (-pixels + 1) * ps;
1424 else 1500 else
1425 BUG(); 1501 BUG();
1502 return 0;
1426} 1503}
1427 1504
1428static void calc_vrfb_rotation_offset(u8 rotation, bool mirror, 1505static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
@@ -1431,7 +1508,7 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
1431 enum omap_color_mode color_mode, bool fieldmode, 1508 enum omap_color_mode color_mode, bool fieldmode,
1432 unsigned int field_offset, 1509 unsigned int field_offset,
1433 unsigned *offset0, unsigned *offset1, 1510 unsigned *offset0, unsigned *offset1,
1434 s32 *row_inc, s32 *pix_inc) 1511 s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
1435{ 1512{
1436 u8 ps; 1513 u8 ps;
1437 1514
@@ -1477,10 +1554,10 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
1477 else 1554 else
1478 *offset0 = 0; 1555 *offset0 = 0;
1479 1556
1480 *row_inc = pixinc(1 + (screen_width - width) + 1557 *row_inc = pixinc(1 +
1481 (fieldmode ? screen_width : 0), 1558 (y_predecim * screen_width - x_predecim * width) +
1482 ps); 1559 (fieldmode ? screen_width : 0), ps);
1483 *pix_inc = pixinc(1, ps); 1560 *pix_inc = pixinc(x_predecim, ps);
1484 break; 1561 break;
1485 1562
1486 case OMAP_DSS_ROT_0 + 4: 1563 case OMAP_DSS_ROT_0 + 4:
@@ -1498,14 +1575,15 @@ static void calc_vrfb_rotation_offset(u8 rotation, bool mirror,
1498 *offset0 = field_offset * screen_width * ps; 1575 *offset0 = field_offset * screen_width * ps;
1499 else 1576 else
1500 *offset0 = 0; 1577 *offset0 = 0;
1501 *row_inc = pixinc(1 - (screen_width + width) - 1578 *row_inc = pixinc(1 -
1502 (fieldmode ? screen_width : 0), 1579 (y_predecim * screen_width + x_predecim * width) -
1503 ps); 1580 (fieldmode ? screen_width : 0), ps);
1504 *pix_inc = pixinc(1, ps); 1581 *pix_inc = pixinc(x_predecim, ps);
1505 break; 1582 break;
1506 1583
1507 default: 1584 default:
1508 BUG(); 1585 BUG();
1586 return;
1509 } 1587 }
1510} 1588}
1511 1589
@@ -1515,7 +1593,7 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1515 enum omap_color_mode color_mode, bool fieldmode, 1593 enum omap_color_mode color_mode, bool fieldmode,
1516 unsigned int field_offset, 1594 unsigned int field_offset,
1517 unsigned *offset0, unsigned *offset1, 1595 unsigned *offset0, unsigned *offset1,
1518 s32 *row_inc, s32 *pix_inc) 1596 s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
1519{ 1597{
1520 u8 ps; 1598 u8 ps;
1521 u16 fbw, fbh; 1599 u16 fbw, fbh;
@@ -1557,10 +1635,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1557 *offset0 = *offset1 + field_offset * screen_width * ps; 1635 *offset0 = *offset1 + field_offset * screen_width * ps;
1558 else 1636 else
1559 *offset0 = *offset1; 1637 *offset0 = *offset1;
1560 *row_inc = pixinc(1 + (screen_width - fbw) + 1638 *row_inc = pixinc(1 +
1561 (fieldmode ? screen_width : 0), 1639 (y_predecim * screen_width - fbw * x_predecim) +
1562 ps); 1640 (fieldmode ? screen_width : 0), ps);
1563 *pix_inc = pixinc(1, ps); 1641 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1642 color_mode == OMAP_DSS_COLOR_UYVY)
1643 *pix_inc = pixinc(x_predecim, 2 * ps);
1644 else
1645 *pix_inc = pixinc(x_predecim, ps);
1564 break; 1646 break;
1565 case OMAP_DSS_ROT_90: 1647 case OMAP_DSS_ROT_90:
1566 *offset1 = screen_width * (fbh - 1) * ps; 1648 *offset1 = screen_width * (fbh - 1) * ps;
@@ -1568,9 +1650,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1568 *offset0 = *offset1 + field_offset * ps; 1650 *offset0 = *offset1 + field_offset * ps;
1569 else 1651 else
1570 *offset0 = *offset1; 1652 *offset0 = *offset1;
1571 *row_inc = pixinc(screen_width * (fbh - 1) + 1 + 1653 *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) +
1572 (fieldmode ? 1 : 0), ps); 1654 y_predecim + (fieldmode ? 1 : 0), ps);
1573 *pix_inc = pixinc(-screen_width, ps); 1655 *pix_inc = pixinc(-x_predecim * screen_width, ps);
1574 break; 1656 break;
1575 case OMAP_DSS_ROT_180: 1657 case OMAP_DSS_ROT_180:
1576 *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps; 1658 *offset1 = (screen_width * (fbh - 1) + fbw - 1) * ps;
@@ -1579,10 +1661,13 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1579 else 1661 else
1580 *offset0 = *offset1; 1662 *offset0 = *offset1;
1581 *row_inc = pixinc(-1 - 1663 *row_inc = pixinc(-1 -
1582 (screen_width - fbw) - 1664 (y_predecim * screen_width - fbw * x_predecim) -
1583 (fieldmode ? screen_width : 0), 1665 (fieldmode ? screen_width : 0), ps);
1584 ps); 1666 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1585 *pix_inc = pixinc(-1, ps); 1667 color_mode == OMAP_DSS_COLOR_UYVY)
1668 *pix_inc = pixinc(-x_predecim, 2 * ps);
1669 else
1670 *pix_inc = pixinc(-x_predecim, ps);
1586 break; 1671 break;
1587 case OMAP_DSS_ROT_270: 1672 case OMAP_DSS_ROT_270:
1588 *offset1 = (fbw - 1) * ps; 1673 *offset1 = (fbw - 1) * ps;
@@ -1590,9 +1675,9 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1590 *offset0 = *offset1 - field_offset * ps; 1675 *offset0 = *offset1 - field_offset * ps;
1591 else 1676 else
1592 *offset0 = *offset1; 1677 *offset0 = *offset1;
1593 *row_inc = pixinc(-screen_width * (fbh - 1) - 1 - 1678 *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) -
1594 (fieldmode ? 1 : 0), ps); 1679 y_predecim - (fieldmode ? 1 : 0), ps);
1595 *pix_inc = pixinc(screen_width, ps); 1680 *pix_inc = pixinc(x_predecim * screen_width, ps);
1596 break; 1681 break;
1597 1682
1598 /* mirroring */ 1683 /* mirroring */
@@ -1602,10 +1687,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1602 *offset0 = *offset1 + field_offset * screen_width * ps; 1687 *offset0 = *offset1 + field_offset * screen_width * ps;
1603 else 1688 else
1604 *offset0 = *offset1; 1689 *offset0 = *offset1;
1605 *row_inc = pixinc(screen_width * 2 - 1 + 1690 *row_inc = pixinc(y_predecim * screen_width * 2 - 1 +
1606 (fieldmode ? screen_width : 0), 1691 (fieldmode ? screen_width : 0),
1607 ps); 1692 ps);
1608 *pix_inc = pixinc(-1, ps); 1693 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1694 color_mode == OMAP_DSS_COLOR_UYVY)
1695 *pix_inc = pixinc(-x_predecim, 2 * ps);
1696 else
1697 *pix_inc = pixinc(-x_predecim, ps);
1609 break; 1698 break;
1610 1699
1611 case OMAP_DSS_ROT_90 + 4: 1700 case OMAP_DSS_ROT_90 + 4:
@@ -1614,10 +1703,10 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1614 *offset0 = *offset1 + field_offset * ps; 1703 *offset0 = *offset1 + field_offset * ps;
1615 else 1704 else
1616 *offset0 = *offset1; 1705 *offset0 = *offset1;
1617 *row_inc = pixinc(-screen_width * (fbh - 1) + 1 + 1706 *row_inc = pixinc(-screen_width * (fbh * x_predecim - 1) +
1618 (fieldmode ? 1 : 0), 1707 y_predecim + (fieldmode ? 1 : 0),
1619 ps); 1708 ps);
1620 *pix_inc = pixinc(screen_width, ps); 1709 *pix_inc = pixinc(x_predecim * screen_width, ps);
1621 break; 1710 break;
1622 1711
1623 case OMAP_DSS_ROT_180 + 4: 1712 case OMAP_DSS_ROT_180 + 4:
@@ -1626,10 +1715,14 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1626 *offset0 = *offset1 - field_offset * screen_width * ps; 1715 *offset0 = *offset1 - field_offset * screen_width * ps;
1627 else 1716 else
1628 *offset0 = *offset1; 1717 *offset0 = *offset1;
1629 *row_inc = pixinc(1 - screen_width * 2 - 1718 *row_inc = pixinc(1 - y_predecim * screen_width * 2 -
1630 (fieldmode ? screen_width : 0), 1719 (fieldmode ? screen_width : 0),
1631 ps); 1720 ps);
1632 *pix_inc = pixinc(1, ps); 1721 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1722 color_mode == OMAP_DSS_COLOR_UYVY)
1723 *pix_inc = pixinc(x_predecim, 2 * ps);
1724 else
1725 *pix_inc = pixinc(x_predecim, ps);
1633 break; 1726 break;
1634 1727
1635 case OMAP_DSS_ROT_270 + 4: 1728 case OMAP_DSS_ROT_270 + 4:
@@ -1638,34 +1731,130 @@ static void calc_dma_rotation_offset(u8 rotation, bool mirror,
1638 *offset0 = *offset1 - field_offset * ps; 1731 *offset0 = *offset1 - field_offset * ps;
1639 else 1732 else
1640 *offset0 = *offset1; 1733 *offset0 = *offset1;
1641 *row_inc = pixinc(screen_width * (fbh - 1) - 1 - 1734 *row_inc = pixinc(screen_width * (fbh * x_predecim - 1) -
1642 (fieldmode ? 1 : 0), 1735 y_predecim - (fieldmode ? 1 : 0),
1643 ps); 1736 ps);
1644 *pix_inc = pixinc(-screen_width, ps); 1737 *pix_inc = pixinc(-x_predecim * screen_width, ps);
1645 break; 1738 break;
1646 1739
1647 default: 1740 default:
1648 BUG(); 1741 BUG();
1742 return;
1743 }
1744}
1745
1746static void calc_tiler_rotation_offset(u16 screen_width, u16 width,
1747 enum omap_color_mode color_mode, bool fieldmode,
1748 unsigned int field_offset, unsigned *offset0, unsigned *offset1,
1749 s32 *row_inc, s32 *pix_inc, int x_predecim, int y_predecim)
1750{
1751 u8 ps;
1752
1753 switch (color_mode) {
1754 case OMAP_DSS_COLOR_CLUT1:
1755 case OMAP_DSS_COLOR_CLUT2:
1756 case OMAP_DSS_COLOR_CLUT4:
1757 case OMAP_DSS_COLOR_CLUT8:
1758 BUG();
1759 return;
1760 default:
1761 ps = color_mode_to_bpp(color_mode) / 8;
1762 break;
1649 } 1763 }
1764
1765 DSSDBG("scrw %d, width %d\n", screen_width, width);
1766
1767 /*
1768 * field 0 = even field = bottom field
1769 * field 1 = odd field = top field
1770 */
1771 *offset1 = 0;
1772 if (field_offset)
1773 *offset0 = *offset1 + field_offset * screen_width * ps;
1774 else
1775 *offset0 = *offset1;
1776 *row_inc = pixinc(1 + (y_predecim * screen_width - width * x_predecim) +
1777 (fieldmode ? screen_width : 0), ps);
1778 if (color_mode == OMAP_DSS_COLOR_YUV2 ||
1779 color_mode == OMAP_DSS_COLOR_UYVY)
1780 *pix_inc = pixinc(x_predecim, 2 * ps);
1781 else
1782 *pix_inc = pixinc(x_predecim, ps);
1650} 1783}
1651 1784
1652static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width, 1785/*
1786 * This function is used to avoid synclosts in OMAP3, because of some
1787 * undocumented horizontal position and timing related limitations.
1788 */
1789static int check_horiz_timing_omap3(enum omap_channel channel,
1790 const struct omap_video_timings *t, u16 pos_x,
1791 u16 width, u16 height, u16 out_width, u16 out_height)
1792{
1793 int DS = DIV_ROUND_UP(height, out_height);
1794 unsigned long nonactive, lclk, pclk;
1795 static const u8 limits[3] = { 8, 10, 20 };
1796 u64 val, blank;
1797 int i;
1798
1799 nonactive = t->x_res + t->hfp + t->hsw + t->hbp - out_width;
1800 pclk = dispc_mgr_pclk_rate(channel);
1801 if (dispc_mgr_is_lcd(channel))
1802 lclk = dispc_mgr_lclk_rate(channel);
1803 else
1804 lclk = dispc_fclk_rate();
1805
1806 i = 0;
1807 if (out_height < height)
1808 i++;
1809 if (out_width < width)
1810 i++;
1811 blank = div_u64((u64)(t->hbp + t->hsw + t->hfp) * lclk, pclk);
1812 DSSDBG("blanking period + ppl = %llu (limit = %u)\n", blank, limits[i]);
1813 if (blank <= limits[i])
1814 return -EINVAL;
1815
1816 /*
1817 * Pixel data should be prepared before visible display point starts.
1818 * So, atleast DS-2 lines must have already been fetched by DISPC
1819 * during nonactive - pos_x period.
1820 */
1821 val = div_u64((u64)(nonactive - pos_x) * lclk, pclk);
1822 DSSDBG("(nonactive - pos_x) * pcd = %llu max(0, DS - 2) * width = %d\n",
1823 val, max(0, DS - 2) * width);
1824 if (val < max(0, DS - 2) * width)
1825 return -EINVAL;
1826
1827 /*
1828 * All lines need to be refilled during the nonactive period of which
1829 * only one line can be loaded during the active period. So, atleast
1830 * DS - 1 lines should be loaded during nonactive period.
1831 */
1832 val = div_u64((u64)nonactive * lclk, pclk);
1833 DSSDBG("nonactive * pcd = %llu, max(0, DS - 1) * width = %d\n",
1834 val, max(0, DS - 1) * width);
1835 if (val < max(0, DS - 1) * width)
1836 return -EINVAL;
1837
1838 return 0;
1839}
1840
1841static unsigned long calc_core_clk_five_taps(enum omap_channel channel,
1842 const struct omap_video_timings *mgr_timings, u16 width,
1653 u16 height, u16 out_width, u16 out_height, 1843 u16 height, u16 out_width, u16 out_height,
1654 enum omap_color_mode color_mode) 1844 enum omap_color_mode color_mode)
1655{ 1845{
1656 u32 fclk = 0; 1846 u32 core_clk = 0;
1657 u64 tmp, pclk = dispc_mgr_pclk_rate(channel); 1847 u64 tmp, pclk = dispc_mgr_pclk_rate(channel);
1658 1848
1659 if (height <= out_height && width <= out_width) 1849 if (height <= out_height && width <= out_width)
1660 return (unsigned long) pclk; 1850 return (unsigned long) pclk;
1661 1851
1662 if (height > out_height) { 1852 if (height > out_height) {
1663 struct omap_dss_device *dssdev = dispc_mgr_get_device(channel); 1853 unsigned int ppl = mgr_timings->x_res;
1664 unsigned int ppl = dssdev->panel.timings.x_res;
1665 1854
1666 tmp = pclk * height * out_width; 1855 tmp = pclk * height * out_width;
1667 do_div(tmp, 2 * out_height * ppl); 1856 do_div(tmp, 2 * out_height * ppl);
1668 fclk = tmp; 1857 core_clk = tmp;
1669 1858
1670 if (height > 2 * out_height) { 1859 if (height > 2 * out_height) {
1671 if (ppl == out_width) 1860 if (ppl == out_width)
@@ -1673,23 +1862,23 @@ static unsigned long calc_fclk_five_taps(enum omap_channel channel, u16 width,
1673 1862
1674 tmp = pclk * (height - 2 * out_height) * out_width; 1863 tmp = pclk * (height - 2 * out_height) * out_width;
1675 do_div(tmp, 2 * out_height * (ppl - out_width)); 1864 do_div(tmp, 2 * out_height * (ppl - out_width));
1676 fclk = max(fclk, (u32) tmp); 1865 core_clk = max_t(u32, core_clk, tmp);
1677 } 1866 }
1678 } 1867 }
1679 1868
1680 if (width > out_width) { 1869 if (width > out_width) {
1681 tmp = pclk * width; 1870 tmp = pclk * width;
1682 do_div(tmp, out_width); 1871 do_div(tmp, out_width);
1683 fclk = max(fclk, (u32) tmp); 1872 core_clk = max_t(u32, core_clk, tmp);
1684 1873
1685 if (color_mode == OMAP_DSS_COLOR_RGB24U) 1874 if (color_mode == OMAP_DSS_COLOR_RGB24U)
1686 fclk <<= 1; 1875 core_clk <<= 1;
1687 } 1876 }
1688 1877
1689 return fclk; 1878 return core_clk;
1690} 1879}
1691 1880
1692static unsigned long calc_fclk(enum omap_channel channel, u16 width, 1881static unsigned long calc_core_clk(enum omap_channel channel, u16 width,
1693 u16 height, u16 out_width, u16 out_height) 1882 u16 height, u16 out_width, u16 out_height)
1694{ 1883{
1695 unsigned int hf, vf; 1884 unsigned int hf, vf;
@@ -1730,15 +1919,20 @@ static unsigned long calc_fclk(enum omap_channel channel, u16 width,
1730} 1919}
1731 1920
1732static int dispc_ovl_calc_scaling(enum omap_plane plane, 1921static int dispc_ovl_calc_scaling(enum omap_plane plane,
1733 enum omap_channel channel, u16 width, u16 height, 1922 enum omap_channel channel,
1734 u16 out_width, u16 out_height, 1923 const struct omap_video_timings *mgr_timings,
1735 enum omap_color_mode color_mode, bool *five_taps) 1924 u16 width, u16 height, u16 out_width, u16 out_height,
1925 enum omap_color_mode color_mode, bool *five_taps,
1926 int *x_predecim, int *y_predecim, u16 pos_x)
1736{ 1927{
1737 struct omap_overlay *ovl = omap_dss_get_overlay(plane); 1928 struct omap_overlay *ovl = omap_dss_get_overlay(plane);
1738 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE); 1929 const int maxdownscale = dss_feat_get_param_max(FEAT_PARAM_DOWNSCALE);
1739 const int maxsinglelinewidth = 1930 const int maxsinglelinewidth =
1740 dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH); 1931 dss_feat_get_param_max(FEAT_PARAM_LINEWIDTH);
1741 unsigned long fclk = 0; 1932 const int max_decim_limit = 16;
1933 unsigned long core_clk = 0;
1934 int decim_x, decim_y, error, min_factor;
1935 u16 in_width, in_height, in_width_max = 0;
1742 1936
1743 if (width == out_width && height == out_height) 1937 if (width == out_width && height == out_height)
1744 return 0; 1938 return 0;
@@ -1746,64 +1940,154 @@ static int dispc_ovl_calc_scaling(enum omap_plane plane,
1746 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) 1940 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0)
1747 return -EINVAL; 1941 return -EINVAL;
1748 1942
1749 if (out_width < width / maxdownscale || 1943 *x_predecim = max_decim_limit;
1750 out_width > width * 8) 1944 *y_predecim = max_decim_limit;
1945
1946 if (color_mode == OMAP_DSS_COLOR_CLUT1 ||
1947 color_mode == OMAP_DSS_COLOR_CLUT2 ||
1948 color_mode == OMAP_DSS_COLOR_CLUT4 ||
1949 color_mode == OMAP_DSS_COLOR_CLUT8) {
1950 *x_predecim = 1;
1951 *y_predecim = 1;
1952 *five_taps = false;
1953 return 0;
1954 }
1955
1956 decim_x = DIV_ROUND_UP(DIV_ROUND_UP(width, out_width), maxdownscale);
1957 decim_y = DIV_ROUND_UP(DIV_ROUND_UP(height, out_height), maxdownscale);
1958
1959 min_factor = min(decim_x, decim_y);
1960
1961 if (decim_x > *x_predecim || out_width > width * 8)
1751 return -EINVAL; 1962 return -EINVAL;
1752 1963
1753 if (out_height < height / maxdownscale || 1964 if (decim_y > *y_predecim || out_height > height * 8)
1754 out_height > height * 8)
1755 return -EINVAL; 1965 return -EINVAL;
1756 1966
1757 if (cpu_is_omap24xx()) { 1967 if (cpu_is_omap24xx()) {
1758 if (width > maxsinglelinewidth)
1759 DSSERR("Cannot scale max input width exceeded");
1760 *five_taps = false; 1968 *five_taps = false;
1761 fclk = calc_fclk(channel, width, height, out_width, 1969
1762 out_height); 1970 do {
1971 in_height = DIV_ROUND_UP(height, decim_y);
1972 in_width = DIV_ROUND_UP(width, decim_x);
1973 core_clk = calc_core_clk(channel, in_width, in_height,
1974 out_width, out_height);
1975 error = (in_width > maxsinglelinewidth || !core_clk ||
1976 core_clk > dispc_core_clk_rate());
1977 if (error) {
1978 if (decim_x == decim_y) {
1979 decim_x = min_factor;
1980 decim_y++;
1981 } else {
1982 swap(decim_x, decim_y);
1983 if (decim_x < decim_y)
1984 decim_x++;
1985 }
1986 }
1987 } while (decim_x <= *x_predecim && decim_y <= *y_predecim &&
1988 error);
1989
1990 if (in_width > maxsinglelinewidth) {
1991 DSSERR("Cannot scale max input width exceeded");
1992 return -EINVAL;
1993 }
1763 } else if (cpu_is_omap34xx()) { 1994 } else if (cpu_is_omap34xx()) {
1764 if (width > (maxsinglelinewidth * 2)) { 1995
1996 do {
1997 in_height = DIV_ROUND_UP(height, decim_y);
1998 in_width = DIV_ROUND_UP(width, decim_x);
1999 core_clk = calc_core_clk_five_taps(channel, mgr_timings,
2000 in_width, in_height, out_width, out_height,
2001 color_mode);
2002
2003 error = check_horiz_timing_omap3(channel, mgr_timings,
2004 pos_x, in_width, in_height, out_width,
2005 out_height);
2006
2007 if (in_width > maxsinglelinewidth)
2008 if (in_height > out_height &&
2009 in_height < out_height * 2)
2010 *five_taps = false;
2011 if (!*five_taps)
2012 core_clk = calc_core_clk(channel, in_width,
2013 in_height, out_width, out_height);
2014 error = (error || in_width > maxsinglelinewidth * 2 ||
2015 (in_width > maxsinglelinewidth && *five_taps) ||
2016 !core_clk || core_clk > dispc_core_clk_rate());
2017 if (error) {
2018 if (decim_x == decim_y) {
2019 decim_x = min_factor;
2020 decim_y++;
2021 } else {
2022 swap(decim_x, decim_y);
2023 if (decim_x < decim_y)
2024 decim_x++;
2025 }
2026 }
2027 } while (decim_x <= *x_predecim && decim_y <= *y_predecim
2028 && error);
2029
2030 if (check_horiz_timing_omap3(channel, mgr_timings, pos_x, width,
2031 height, out_width, out_height)){
2032 DSSERR("horizontal timing too tight\n");
2033 return -EINVAL;
2034 }
2035
2036 if (in_width > (maxsinglelinewidth * 2)) {
1765 DSSERR("Cannot setup scaling"); 2037 DSSERR("Cannot setup scaling");
1766 DSSERR("width exceeds maximum width possible"); 2038 DSSERR("width exceeds maximum width possible");
1767 return -EINVAL; 2039 return -EINVAL;
1768 } 2040 }
1769 fclk = calc_fclk_five_taps(channel, width, height, out_width, 2041
1770 out_height, color_mode); 2042 if (in_width > maxsinglelinewidth && *five_taps) {
1771 if (width > maxsinglelinewidth) { 2043 DSSERR("cannot setup scaling with five taps");
1772 if (height > out_height && height < out_height * 2) 2044 return -EINVAL;
1773 *five_taps = false;
1774 else {
1775 DSSERR("cannot setup scaling with five taps");
1776 return -EINVAL;
1777 }
1778 } 2045 }
1779 if (!*five_taps)
1780 fclk = calc_fclk(channel, width, height, out_width,
1781 out_height);
1782 } else { 2046 } else {
1783 if (width > maxsinglelinewidth) { 2047 int decim_x_min = decim_x;
2048 in_height = DIV_ROUND_UP(height, decim_y);
2049 in_width_max = dispc_core_clk_rate() /
2050 DIV_ROUND_UP(dispc_mgr_pclk_rate(channel),
2051 out_width);
2052 decim_x = DIV_ROUND_UP(width, in_width_max);
2053
2054 decim_x = decim_x > decim_x_min ? decim_x : decim_x_min;
2055 if (decim_x > *x_predecim)
2056 return -EINVAL;
2057
2058 do {
2059 in_width = DIV_ROUND_UP(width, decim_x);
2060 } while (decim_x <= *x_predecim &&
2061 in_width > maxsinglelinewidth && decim_x++);
2062
2063 if (in_width > maxsinglelinewidth) {
1784 DSSERR("Cannot scale width exceeds max line width"); 2064 DSSERR("Cannot scale width exceeds max line width");
1785 return -EINVAL; 2065 return -EINVAL;
1786 } 2066 }
1787 fclk = calc_fclk(channel, width, height, out_width, 2067
1788 out_height); 2068 core_clk = calc_core_clk(channel, in_width, in_height,
2069 out_width, out_height);
1789 } 2070 }
1790 2071
1791 DSSDBG("required fclk rate = %lu Hz\n", fclk); 2072 DSSDBG("required core clk rate = %lu Hz\n", core_clk);
1792 DSSDBG("current fclk rate = %lu Hz\n", dispc_fclk_rate()); 2073 DSSDBG("current core clk rate = %lu Hz\n", dispc_core_clk_rate());
1793 2074
1794 if (!fclk || fclk > dispc_fclk_rate()) { 2075 if (!core_clk || core_clk > dispc_core_clk_rate()) {
1795 DSSERR("failed to set up scaling, " 2076 DSSERR("failed to set up scaling, "
1796 "required fclk rate = %lu Hz, " 2077 "required core clk rate = %lu Hz, "
1797 "current fclk rate = %lu Hz\n", 2078 "current core clk rate = %lu Hz\n",
1798 fclk, dispc_fclk_rate()); 2079 core_clk, dispc_core_clk_rate());
1799 return -EINVAL; 2080 return -EINVAL;
1800 } 2081 }
1801 2082
2083 *x_predecim = decim_x;
2084 *y_predecim = decim_y;
1802 return 0; 2085 return 0;
1803} 2086}
1804 2087
1805int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, 2088int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
1806 bool ilace, bool replication) 2089 bool ilace, bool replication,
2090 const struct omap_video_timings *mgr_timings)
1807{ 2091{
1808 struct omap_overlay *ovl = omap_dss_get_overlay(plane); 2092 struct omap_overlay *ovl = omap_dss_get_overlay(plane);
1809 bool five_taps = true; 2093 bool five_taps = true;
@@ -1814,8 +2098,11 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
1814 s32 pix_inc; 2098 s32 pix_inc;
1815 u16 frame_height = oi->height; 2099 u16 frame_height = oi->height;
1816 unsigned int field_offset = 0; 2100 unsigned int field_offset = 0;
1817 u16 outw, outh; 2101 u16 in_height = oi->height;
2102 u16 in_width = oi->width;
2103 u16 out_width, out_height;
1818 enum omap_channel channel; 2104 enum omap_channel channel;
2105 int x_predecim = 1, y_predecim = 1;
1819 2106
1820 channel = dispc_ovl_get_channel_out(plane); 2107 channel = dispc_ovl_get_channel_out(plane);
1821 2108
@@ -1829,32 +2116,35 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
1829 if (oi->paddr == 0) 2116 if (oi->paddr == 0)
1830 return -EINVAL; 2117 return -EINVAL;
1831 2118
1832 outw = oi->out_width == 0 ? oi->width : oi->out_width; 2119 out_width = oi->out_width == 0 ? oi->width : oi->out_width;
1833 outh = oi->out_height == 0 ? oi->height : oi->out_height; 2120 out_height = oi->out_height == 0 ? oi->height : oi->out_height;
1834 2121
1835 if (ilace && oi->height == outh) 2122 if (ilace && oi->height == out_height)
1836 fieldmode = 1; 2123 fieldmode = 1;
1837 2124
1838 if (ilace) { 2125 if (ilace) {
1839 if (fieldmode) 2126 if (fieldmode)
1840 oi->height /= 2; 2127 in_height /= 2;
1841 oi->pos_y /= 2; 2128 oi->pos_y /= 2;
1842 outh /= 2; 2129 out_height /= 2;
1843 2130
1844 DSSDBG("adjusting for ilace: height %d, pos_y %d, " 2131 DSSDBG("adjusting for ilace: height %d, pos_y %d, "
1845 "out_height %d\n", 2132 "out_height %d\n",
1846 oi->height, oi->pos_y, outh); 2133 in_height, oi->pos_y, out_height);
1847 } 2134 }
1848 2135
1849 if (!dss_feat_color_mode_supported(plane, oi->color_mode)) 2136 if (!dss_feat_color_mode_supported(plane, oi->color_mode))
1850 return -EINVAL; 2137 return -EINVAL;
1851 2138
1852 r = dispc_ovl_calc_scaling(plane, channel, oi->width, oi->height, 2139 r = dispc_ovl_calc_scaling(plane, channel, mgr_timings, in_width,
1853 outw, outh, oi->color_mode, 2140 in_height, out_width, out_height, oi->color_mode,
1854 &five_taps); 2141 &five_taps, &x_predecim, &y_predecim, oi->pos_x);
1855 if (r) 2142 if (r)
1856 return r; 2143 return r;
1857 2144
2145 in_width = DIV_ROUND_UP(in_width, x_predecim);
2146 in_height = DIV_ROUND_UP(in_height, y_predecim);
2147
1858 if (oi->color_mode == OMAP_DSS_COLOR_YUV2 || 2148 if (oi->color_mode == OMAP_DSS_COLOR_YUV2 ||
1859 oi->color_mode == OMAP_DSS_COLOR_UYVY || 2149 oi->color_mode == OMAP_DSS_COLOR_UYVY ||
1860 oi->color_mode == OMAP_DSS_COLOR_NV12) 2150 oi->color_mode == OMAP_DSS_COLOR_NV12)
@@ -1868,32 +2158,46 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
1868 * so the integer part must be added to the base address of the 2158 * so the integer part must be added to the base address of the
1869 * bottom field. 2159 * bottom field.
1870 */ 2160 */
1871 if (!oi->height || oi->height == outh) 2161 if (!in_height || in_height == out_height)
1872 field_offset = 0; 2162 field_offset = 0;
1873 else 2163 else
1874 field_offset = oi->height / outh / 2; 2164 field_offset = in_height / out_height / 2;
1875 } 2165 }
1876 2166
1877 /* Fields are independent but interleaved in memory. */ 2167 /* Fields are independent but interleaved in memory. */
1878 if (fieldmode) 2168 if (fieldmode)
1879 field_offset = 1; 2169 field_offset = 1;
1880 2170
1881 if (oi->rotation_type == OMAP_DSS_ROT_DMA) 2171 offset0 = 0;
2172 offset1 = 0;
2173 row_inc = 0;
2174 pix_inc = 0;
2175
2176 if (oi->rotation_type == OMAP_DSS_ROT_TILER)
2177 calc_tiler_rotation_offset(oi->screen_width, in_width,
2178 oi->color_mode, fieldmode, field_offset,
2179 &offset0, &offset1, &row_inc, &pix_inc,
2180 x_predecim, y_predecim);
2181 else if (oi->rotation_type == OMAP_DSS_ROT_DMA)
1882 calc_dma_rotation_offset(oi->rotation, oi->mirror, 2182 calc_dma_rotation_offset(oi->rotation, oi->mirror,
1883 oi->screen_width, oi->width, frame_height, 2183 oi->screen_width, in_width, frame_height,
1884 oi->color_mode, fieldmode, field_offset, 2184 oi->color_mode, fieldmode, field_offset,
1885 &offset0, &offset1, &row_inc, &pix_inc); 2185 &offset0, &offset1, &row_inc, &pix_inc,
2186 x_predecim, y_predecim);
1886 else 2187 else
1887 calc_vrfb_rotation_offset(oi->rotation, oi->mirror, 2188 calc_vrfb_rotation_offset(oi->rotation, oi->mirror,
1888 oi->screen_width, oi->width, frame_height, 2189 oi->screen_width, in_width, frame_height,
1889 oi->color_mode, fieldmode, field_offset, 2190 oi->color_mode, fieldmode, field_offset,
1890 &offset0, &offset1, &row_inc, &pix_inc); 2191 &offset0, &offset1, &row_inc, &pix_inc,
2192 x_predecim, y_predecim);
1891 2193
1892 DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n", 2194 DSSDBG("offset0 %u, offset1 %u, row_inc %d, pix_inc %d\n",
1893 offset0, offset1, row_inc, pix_inc); 2195 offset0, offset1, row_inc, pix_inc);
1894 2196
1895 dispc_ovl_set_color_mode(plane, oi->color_mode); 2197 dispc_ovl_set_color_mode(plane, oi->color_mode);
1896 2198
2199 dispc_ovl_configure_burst_type(plane, oi->rotation_type);
2200
1897 dispc_ovl_set_ba0(plane, oi->paddr + offset0); 2201 dispc_ovl_set_ba0(plane, oi->paddr + offset0);
1898 dispc_ovl_set_ba1(plane, oi->paddr + offset1); 2202 dispc_ovl_set_ba1(plane, oi->paddr + offset1);
1899 2203
@@ -1906,19 +2210,18 @@ int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
1906 dispc_ovl_set_row_inc(plane, row_inc); 2210 dispc_ovl_set_row_inc(plane, row_inc);
1907 dispc_ovl_set_pix_inc(plane, pix_inc); 2211 dispc_ovl_set_pix_inc(plane, pix_inc);
1908 2212
1909 DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, oi->width, 2213 DSSDBG("%d,%d %dx%d -> %dx%d\n", oi->pos_x, oi->pos_y, in_width,
1910 oi->height, outw, outh); 2214 in_height, out_width, out_height);
1911 2215
1912 dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y); 2216 dispc_ovl_set_pos(plane, oi->pos_x, oi->pos_y);
1913 2217
1914 dispc_ovl_set_pic_size(plane, oi->width, oi->height); 2218 dispc_ovl_set_pic_size(plane, in_width, in_height);
1915 2219
1916 if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) { 2220 if (ovl->caps & OMAP_DSS_OVL_CAP_SCALE) {
1917 dispc_ovl_set_scaling(plane, oi->width, oi->height, 2221 dispc_ovl_set_scaling(plane, in_width, in_height, out_width,
1918 outw, outh, 2222 out_height, ilace, five_taps, fieldmode,
1919 ilace, five_taps, fieldmode,
1920 oi->color_mode, oi->rotation); 2223 oi->color_mode, oi->rotation);
1921 dispc_ovl_set_vid_size(plane, outw, outh); 2224 dispc_ovl_set_vid_size(plane, out_width, out_height);
1922 dispc_ovl_set_vid_color_conv(plane, cconv); 2225 dispc_ovl_set_vid_color_conv(plane, cconv);
1923 } 2226 }
1924 2227
@@ -2087,8 +2390,10 @@ bool dispc_mgr_is_enabled(enum omap_channel channel)
2087 return !!REG_GET(DISPC_CONTROL, 1, 1); 2390 return !!REG_GET(DISPC_CONTROL, 1, 1);
2088 else if (channel == OMAP_DSS_CHANNEL_LCD2) 2391 else if (channel == OMAP_DSS_CHANNEL_LCD2)
2089 return !!REG_GET(DISPC_CONTROL2, 0, 0); 2392 return !!REG_GET(DISPC_CONTROL2, 0, 0);
2090 else 2393 else {
2091 BUG(); 2394 BUG();
2395 return false;
2396 }
2092} 2397}
2093 2398
2094void dispc_mgr_enable(enum omap_channel channel, bool enable) 2399void dispc_mgr_enable(enum omap_channel channel, bool enable)
@@ -2285,6 +2590,12 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable)
2285 REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11); 2590 REG_FLD_MOD(DISPC_CONTROL, enable, 11, 11);
2286} 2591}
2287 2592
2593static bool _dispc_mgr_size_ok(u16 width, u16 height)
2594{
2595 return width <= dss_feat_get_param_max(FEAT_PARAM_MGR_WIDTH) &&
2596 height <= dss_feat_get_param_max(FEAT_PARAM_MGR_HEIGHT);
2597}
2598
2288static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp, 2599static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
2289 int vsw, int vfp, int vbp) 2600 int vsw, int vfp, int vbp)
2290{ 2601{
@@ -2309,11 +2620,20 @@ static bool _dispc_lcd_timings_ok(int hsw, int hfp, int hbp,
2309 return true; 2620 return true;
2310} 2621}
2311 2622
2312bool dispc_lcd_timings_ok(struct omap_video_timings *timings) 2623bool dispc_mgr_timings_ok(enum omap_channel channel,
2624 const struct omap_video_timings *timings)
2313{ 2625{
2314 return _dispc_lcd_timings_ok(timings->hsw, timings->hfp, 2626 bool timings_ok;
2315 timings->hbp, timings->vsw, 2627
2316 timings->vfp, timings->vbp); 2628 timings_ok = _dispc_mgr_size_ok(timings->x_res, timings->y_res);
2629
2630 if (dispc_mgr_is_lcd(channel))
2631 timings_ok = timings_ok && _dispc_lcd_timings_ok(timings->hsw,
2632 timings->hfp, timings->hbp,
2633 timings->vsw, timings->vfp,
2634 timings->vbp);
2635
2636 return timings_ok;
2317} 2637}
2318 2638
2319static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw, 2639static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
@@ -2340,37 +2660,45 @@ static void _dispc_mgr_set_lcd_timings(enum omap_channel channel, int hsw,
2340} 2660}
2341 2661
2342/* change name to mode? */ 2662/* change name to mode? */
2343void dispc_mgr_set_lcd_timings(enum omap_channel channel, 2663void dispc_mgr_set_timings(enum omap_channel channel,
2344 struct omap_video_timings *timings) 2664 struct omap_video_timings *timings)
2345{ 2665{
2346 unsigned xtot, ytot; 2666 unsigned xtot, ytot;
2347 unsigned long ht, vt; 2667 unsigned long ht, vt;
2668 struct omap_video_timings t = *timings;
2669
2670 DSSDBG("channel %d xres %u yres %u\n", channel, t.x_res, t.y_res);
2348 2671
2349 if (!_dispc_lcd_timings_ok(timings->hsw, timings->hfp, 2672 if (!dispc_mgr_timings_ok(channel, &t)) {
2350 timings->hbp, timings->vsw,
2351 timings->vfp, timings->vbp))
2352 BUG(); 2673 BUG();
2674 return;
2675 }
2676
2677 if (dispc_mgr_is_lcd(channel)) {
2678 _dispc_mgr_set_lcd_timings(channel, t.hsw, t.hfp, t.hbp, t.vsw,
2679 t.vfp, t.vbp);
2680
2681 xtot = t.x_res + t.hfp + t.hsw + t.hbp;
2682 ytot = t.y_res + t.vfp + t.vsw + t.vbp;
2353 2683
2354 _dispc_mgr_set_lcd_timings(channel, timings->hsw, timings->hfp, 2684 ht = (timings->pixel_clock * 1000) / xtot;
2355 timings->hbp, timings->vsw, timings->vfp, 2685 vt = (timings->pixel_clock * 1000) / xtot / ytot;
2356 timings->vbp);
2357 2686
2358 dispc_mgr_set_lcd_size(channel, timings->x_res, timings->y_res); 2687 DSSDBG("pck %u\n", timings->pixel_clock);
2688 DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
2689 t.hsw, t.hfp, t.hbp, t.vsw, t.vfp, t.vbp);
2359 2690
2360 xtot = timings->x_res + timings->hfp + timings->hsw + timings->hbp; 2691 DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt);
2361 ytot = timings->y_res + timings->vfp + timings->vsw + timings->vbp; 2692 } else {
2693 enum dss_hdmi_venc_clk_source_select source;
2362 2694
2363 ht = (timings->pixel_clock * 1000) / xtot; 2695 source = dss_get_hdmi_venc_clk_source();
2364 vt = (timings->pixel_clock * 1000) / xtot / ytot;
2365 2696
2366 DSSDBG("channel %d xres %u yres %u\n", channel, timings->x_res, 2697 if (source == DSS_VENC_TV_CLK)
2367 timings->y_res); 2698 t.y_res /= 2;
2368 DSSDBG("pck %u\n", timings->pixel_clock); 2699 }
2369 DSSDBG("hsw %d hfp %d hbp %d vsw %d vfp %d vbp %d\n",
2370 timings->hsw, timings->hfp, timings->hbp,
2371 timings->vsw, timings->vfp, timings->vbp);
2372 2700
2373 DSSDBG("hsync %luHz, vsync %luHz\n", ht, vt); 2701 dispc_mgr_set_size(channel, t.x_res, t.y_res);
2374} 2702}
2375 2703
2376static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div, 2704static void dispc_mgr_set_lcd_divisor(enum omap_channel channel, u16 lck_div,
@@ -2411,6 +2739,7 @@ unsigned long dispc_fclk_rate(void)
2411 break; 2739 break;
2412 default: 2740 default:
2413 BUG(); 2741 BUG();
2742 return 0;
2414 } 2743 }
2415 2744
2416 return r; 2745 return r;
@@ -2441,6 +2770,7 @@ unsigned long dispc_mgr_lclk_rate(enum omap_channel channel)
2441 break; 2770 break;
2442 default: 2771 default:
2443 BUG(); 2772 BUG();
2773 return 0;
2444 } 2774 }
2445 2775
2446 return r / lcd; 2776 return r / lcd;
@@ -2462,20 +2792,35 @@ unsigned long dispc_mgr_pclk_rate(enum omap_channel channel)
2462 2792
2463 return r / pcd; 2793 return r / pcd;
2464 } else { 2794 } else {
2465 struct omap_dss_device *dssdev = 2795 enum dss_hdmi_venc_clk_source_select source;
2466 dispc_mgr_get_device(channel);
2467 2796
2468 switch (dssdev->type) { 2797 source = dss_get_hdmi_venc_clk_source();
2469 case OMAP_DISPLAY_TYPE_VENC: 2798
2799 switch (source) {
2800 case DSS_VENC_TV_CLK:
2470 return venc_get_pixel_clock(); 2801 return venc_get_pixel_clock();
2471 case OMAP_DISPLAY_TYPE_HDMI: 2802 case DSS_HDMI_M_PCLK:
2472 return hdmi_get_pixel_clock(); 2803 return hdmi_get_pixel_clock();
2473 default: 2804 default:
2474 BUG(); 2805 BUG();
2806 return 0;
2475 } 2807 }
2476 } 2808 }
2477} 2809}
2478 2810
2811unsigned long dispc_core_clk_rate(void)
2812{
2813 int lcd;
2814 unsigned long fclk = dispc_fclk_rate();
2815
2816 if (dss_has_feature(FEAT_CORE_CLK_DIV))
2817 lcd = REG_GET(DISPC_DIVISOR, 23, 16);
2818 else
2819 lcd = REG_GET(DISPC_DIVISORo(OMAP_DSS_CHANNEL_LCD), 23, 16);
2820
2821 return fclk / lcd;
2822}
2823
2479void dispc_dump_clocks(struct seq_file *s) 2824void dispc_dump_clocks(struct seq_file *s)
2480{ 2825{
2481 int lcd, pcd; 2826 int lcd, pcd;
@@ -2588,7 +2933,7 @@ void dispc_dump_irqs(struct seq_file *s)
2588} 2933}
2589#endif 2934#endif
2590 2935
2591void dispc_dump_regs(struct seq_file *s) 2936static void dispc_dump_regs(struct seq_file *s)
2592{ 2937{
2593 int i, j; 2938 int i, j;
2594 const char *mgr_names[] = { 2939 const char *mgr_names[] = {
@@ -3247,27 +3592,6 @@ int omap_dispc_wait_for_irq_interruptible_timeout(u32 irqmask,
3247 return 0; 3592 return 0;
3248} 3593}
3249 3594
3250#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
3251void dispc_fake_vsync_irq(void)
3252{
3253 u32 irqstatus = DISPC_IRQ_VSYNC;
3254 int i;
3255
3256 WARN_ON(!in_interrupt());
3257
3258 for (i = 0; i < DISPC_MAX_NR_ISRS; i++) {
3259 struct omap_dispc_isr_data *isr_data;
3260 isr_data = &dispc.registered_isr[i];
3261
3262 if (!isr_data->isr)
3263 continue;
3264
3265 if (isr_data->mask & irqstatus)
3266 isr_data->isr(isr_data->arg, irqstatus);
3267 }
3268}
3269#endif
3270
3271static void _omap_dispc_initialize_irq(void) 3595static void _omap_dispc_initialize_irq(void)
3272{ 3596{
3273 unsigned long flags; 3597 unsigned long flags;
@@ -3330,7 +3654,7 @@ static void _omap_dispc_initial_config(void)
3330} 3654}
3331 3655
3332/* DISPC HW IP initialisation */ 3656/* DISPC HW IP initialisation */
3333static int omap_dispchw_probe(struct platform_device *pdev) 3657static int __init omap_dispchw_probe(struct platform_device *pdev)
3334{ 3658{
3335 u32 rev; 3659 u32 rev;
3336 int r = 0; 3660 int r = 0;
@@ -3399,6 +3723,11 @@ static int omap_dispchw_probe(struct platform_device *pdev)
3399 3723
3400 dispc_runtime_put(); 3724 dispc_runtime_put();
3401 3725
3726 dss_debugfs_create_file("dispc", dispc_dump_regs);
3727
3728#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
3729 dss_debugfs_create_file("dispc_irq", dispc_dump_irqs);
3730#endif
3402 return 0; 3731 return 0;
3403 3732
3404err_runtime_get: 3733err_runtime_get:
@@ -3407,7 +3736,7 @@ err_runtime_get:
3407 return r; 3736 return r;
3408} 3737}
3409 3738
3410static int omap_dispchw_remove(struct platform_device *pdev) 3739static int __exit omap_dispchw_remove(struct platform_device *pdev)
3411{ 3740{
3412 pm_runtime_disable(&pdev->dev); 3741 pm_runtime_disable(&pdev->dev);
3413 3742
@@ -3419,19 +3748,12 @@ static int omap_dispchw_remove(struct platform_device *pdev)
3419static int dispc_runtime_suspend(struct device *dev) 3748static int dispc_runtime_suspend(struct device *dev)
3420{ 3749{
3421 dispc_save_context(); 3750 dispc_save_context();
3422 dss_runtime_put();
3423 3751
3424 return 0; 3752 return 0;
3425} 3753}
3426 3754
3427static int dispc_runtime_resume(struct device *dev) 3755static int dispc_runtime_resume(struct device *dev)
3428{ 3756{
3429 int r;
3430
3431 r = dss_runtime_get();
3432 if (r < 0)
3433 return r;
3434
3435 dispc_restore_context(); 3757 dispc_restore_context();
3436 3758
3437 return 0; 3759 return 0;
@@ -3443,8 +3765,7 @@ static const struct dev_pm_ops dispc_pm_ops = {
3443}; 3765};
3444 3766
3445static struct platform_driver omap_dispchw_driver = { 3767static struct platform_driver omap_dispchw_driver = {
3446 .probe = omap_dispchw_probe, 3768 .remove = __exit_p(omap_dispchw_remove),
3447 .remove = omap_dispchw_remove,
3448 .driver = { 3769 .driver = {
3449 .name = "omapdss_dispc", 3770 .name = "omapdss_dispc",
3450 .owner = THIS_MODULE, 3771 .owner = THIS_MODULE,
@@ -3452,12 +3773,12 @@ static struct platform_driver omap_dispchw_driver = {
3452 }, 3773 },
3453}; 3774};
3454 3775
3455int dispc_init_platform_driver(void) 3776int __init dispc_init_platform_driver(void)
3456{ 3777{
3457 return platform_driver_register(&omap_dispchw_driver); 3778 return platform_driver_probe(&omap_dispchw_driver, omap_dispchw_probe);
3458} 3779}
3459 3780
3460void dispc_uninit_platform_driver(void) 3781void __exit dispc_uninit_platform_driver(void)
3461{ 3782{
3462 return platform_driver_unregister(&omap_dispchw_driver); 3783 platform_driver_unregister(&omap_dispchw_driver);
3463} 3784}
diff --git a/drivers/video/omap2/dss/dispc.h b/drivers/video/omap2/dss/dispc.h
index 5836bd1650f9..f278080e1063 100644
--- a/drivers/video/omap2/dss/dispc.h
+++ b/drivers/video/omap2/dss/dispc.h
@@ -120,6 +120,7 @@ static inline u16 DISPC_DEFAULT_COLOR(enum omap_channel channel)
120 return 0x03AC; 120 return 0x03AC;
121 default: 121 default:
122 BUG(); 122 BUG();
123 return 0;
123 } 124 }
124} 125}
125 126
@@ -134,6 +135,7 @@ static inline u16 DISPC_TRANS_COLOR(enum omap_channel channel)
134 return 0x03B0; 135 return 0x03B0;
135 default: 136 default:
136 BUG(); 137 BUG();
138 return 0;
137 } 139 }
138} 140}
139 141
@@ -144,10 +146,12 @@ static inline u16 DISPC_TIMING_H(enum omap_channel channel)
144 return 0x0064; 146 return 0x0064;
145 case OMAP_DSS_CHANNEL_DIGIT: 147 case OMAP_DSS_CHANNEL_DIGIT:
146 BUG(); 148 BUG();
149 return 0;
147 case OMAP_DSS_CHANNEL_LCD2: 150 case OMAP_DSS_CHANNEL_LCD2:
148 return 0x0400; 151 return 0x0400;
149 default: 152 default:
150 BUG(); 153 BUG();
154 return 0;
151 } 155 }
152} 156}
153 157
@@ -158,10 +162,12 @@ static inline u16 DISPC_TIMING_V(enum omap_channel channel)
158 return 0x0068; 162 return 0x0068;
159 case OMAP_DSS_CHANNEL_DIGIT: 163 case OMAP_DSS_CHANNEL_DIGIT:
160 BUG(); 164 BUG();
165 return 0;
161 case OMAP_DSS_CHANNEL_LCD2: 166 case OMAP_DSS_CHANNEL_LCD2:
162 return 0x0404; 167 return 0x0404;
163 default: 168 default:
164 BUG(); 169 BUG();
170 return 0;
165 } 171 }
166} 172}
167 173
@@ -172,10 +178,12 @@ static inline u16 DISPC_POL_FREQ(enum omap_channel channel)
172 return 0x006C; 178 return 0x006C;
173 case OMAP_DSS_CHANNEL_DIGIT: 179 case OMAP_DSS_CHANNEL_DIGIT:
174 BUG(); 180 BUG();
181 return 0;
175 case OMAP_DSS_CHANNEL_LCD2: 182 case OMAP_DSS_CHANNEL_LCD2:
176 return 0x0408; 183 return 0x0408;
177 default: 184 default:
178 BUG(); 185 BUG();
186 return 0;
179 } 187 }
180} 188}
181 189
@@ -186,10 +194,12 @@ static inline u16 DISPC_DIVISORo(enum omap_channel channel)
186 return 0x0070; 194 return 0x0070;
187 case OMAP_DSS_CHANNEL_DIGIT: 195 case OMAP_DSS_CHANNEL_DIGIT:
188 BUG(); 196 BUG();
197 return 0;
189 case OMAP_DSS_CHANNEL_LCD2: 198 case OMAP_DSS_CHANNEL_LCD2:
190 return 0x040C; 199 return 0x040C;
191 default: 200 default:
192 BUG(); 201 BUG();
202 return 0;
193 } 203 }
194} 204}
195 205
@@ -205,6 +215,7 @@ static inline u16 DISPC_SIZE_MGR(enum omap_channel channel)
205 return 0x03CC; 215 return 0x03CC;
206 default: 216 default:
207 BUG(); 217 BUG();
218 return 0;
208 } 219 }
209} 220}
210 221
@@ -215,10 +226,12 @@ static inline u16 DISPC_DATA_CYCLE1(enum omap_channel channel)
215 return 0x01D4; 226 return 0x01D4;
216 case OMAP_DSS_CHANNEL_DIGIT: 227 case OMAP_DSS_CHANNEL_DIGIT:
217 BUG(); 228 BUG();
229 return 0;
218 case OMAP_DSS_CHANNEL_LCD2: 230 case OMAP_DSS_CHANNEL_LCD2:
219 return 0x03C0; 231 return 0x03C0;
220 default: 232 default:
221 BUG(); 233 BUG();
234 return 0;
222 } 235 }
223} 236}
224 237
@@ -229,10 +242,12 @@ static inline u16 DISPC_DATA_CYCLE2(enum omap_channel channel)
229 return 0x01D8; 242 return 0x01D8;
230 case OMAP_DSS_CHANNEL_DIGIT: 243 case OMAP_DSS_CHANNEL_DIGIT:
231 BUG(); 244 BUG();
245 return 0;
232 case OMAP_DSS_CHANNEL_LCD2: 246 case OMAP_DSS_CHANNEL_LCD2:
233 return 0x03C4; 247 return 0x03C4;
234 default: 248 default:
235 BUG(); 249 BUG();
250 return 0;
236 } 251 }
237} 252}
238 253
@@ -243,10 +258,12 @@ static inline u16 DISPC_DATA_CYCLE3(enum omap_channel channel)
243 return 0x01DC; 258 return 0x01DC;
244 case OMAP_DSS_CHANNEL_DIGIT: 259 case OMAP_DSS_CHANNEL_DIGIT:
245 BUG(); 260 BUG();
261 return 0;
246 case OMAP_DSS_CHANNEL_LCD2: 262 case OMAP_DSS_CHANNEL_LCD2:
247 return 0x03C8; 263 return 0x03C8;
248 default: 264 default:
249 BUG(); 265 BUG();
266 return 0;
250 } 267 }
251} 268}
252 269
@@ -257,10 +274,12 @@ static inline u16 DISPC_CPR_COEF_R(enum omap_channel channel)
257 return 0x0220; 274 return 0x0220;
258 case OMAP_DSS_CHANNEL_DIGIT: 275 case OMAP_DSS_CHANNEL_DIGIT:
259 BUG(); 276 BUG();
277 return 0;
260 case OMAP_DSS_CHANNEL_LCD2: 278 case OMAP_DSS_CHANNEL_LCD2:
261 return 0x03BC; 279 return 0x03BC;
262 default: 280 default:
263 BUG(); 281 BUG();
282 return 0;
264 } 283 }
265} 284}
266 285
@@ -271,10 +290,12 @@ static inline u16 DISPC_CPR_COEF_G(enum omap_channel channel)
271 return 0x0224; 290 return 0x0224;
272 case OMAP_DSS_CHANNEL_DIGIT: 291 case OMAP_DSS_CHANNEL_DIGIT:
273 BUG(); 292 BUG();
293 return 0;
274 case OMAP_DSS_CHANNEL_LCD2: 294 case OMAP_DSS_CHANNEL_LCD2:
275 return 0x03B8; 295 return 0x03B8;
276 default: 296 default:
277 BUG(); 297 BUG();
298 return 0;
278 } 299 }
279} 300}
280 301
@@ -285,10 +306,12 @@ static inline u16 DISPC_CPR_COEF_B(enum omap_channel channel)
285 return 0x0228; 306 return 0x0228;
286 case OMAP_DSS_CHANNEL_DIGIT: 307 case OMAP_DSS_CHANNEL_DIGIT:
287 BUG(); 308 BUG();
309 return 0;
288 case OMAP_DSS_CHANNEL_LCD2: 310 case OMAP_DSS_CHANNEL_LCD2:
289 return 0x03B4; 311 return 0x03B4;
290 default: 312 default:
291 BUG(); 313 BUG();
314 return 0;
292 } 315 }
293} 316}
294 317
@@ -306,6 +329,7 @@ static inline u16 DISPC_OVL_BASE(enum omap_plane plane)
306 return 0x0300; 329 return 0x0300;
307 default: 330 default:
308 BUG(); 331 BUG();
332 return 0;
309 } 333 }
310} 334}
311 335
@@ -321,6 +345,7 @@ static inline u16 DISPC_BA0_OFFSET(enum omap_plane plane)
321 return 0x0008; 345 return 0x0008;
322 default: 346 default:
323 BUG(); 347 BUG();
348 return 0;
324 } 349 }
325} 350}
326 351
@@ -335,6 +360,7 @@ static inline u16 DISPC_BA1_OFFSET(enum omap_plane plane)
335 return 0x000C; 360 return 0x000C;
336 default: 361 default:
337 BUG(); 362 BUG();
363 return 0;
338 } 364 }
339} 365}
340 366
@@ -343,6 +369,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
343 switch (plane) { 369 switch (plane) {
344 case OMAP_DSS_GFX: 370 case OMAP_DSS_GFX:
345 BUG(); 371 BUG();
372 return 0;
346 case OMAP_DSS_VIDEO1: 373 case OMAP_DSS_VIDEO1:
347 return 0x0544; 374 return 0x0544;
348 case OMAP_DSS_VIDEO2: 375 case OMAP_DSS_VIDEO2:
@@ -351,6 +378,7 @@ static inline u16 DISPC_BA0_UV_OFFSET(enum omap_plane plane)
351 return 0x0310; 378 return 0x0310;
352 default: 379 default:
353 BUG(); 380 BUG();
381 return 0;
354 } 382 }
355} 383}
356 384
@@ -359,6 +387,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
359 switch (plane) { 387 switch (plane) {
360 case OMAP_DSS_GFX: 388 case OMAP_DSS_GFX:
361 BUG(); 389 BUG();
390 return 0;
362 case OMAP_DSS_VIDEO1: 391 case OMAP_DSS_VIDEO1:
363 return 0x0548; 392 return 0x0548;
364 case OMAP_DSS_VIDEO2: 393 case OMAP_DSS_VIDEO2:
@@ -367,6 +396,7 @@ static inline u16 DISPC_BA1_UV_OFFSET(enum omap_plane plane)
367 return 0x0314; 396 return 0x0314;
368 default: 397 default:
369 BUG(); 398 BUG();
399 return 0;
370 } 400 }
371} 401}
372 402
@@ -381,6 +411,7 @@ static inline u16 DISPC_POS_OFFSET(enum omap_plane plane)
381 return 0x009C; 411 return 0x009C;
382 default: 412 default:
383 BUG(); 413 BUG();
414 return 0;
384 } 415 }
385} 416}
386 417
@@ -395,6 +426,7 @@ static inline u16 DISPC_SIZE_OFFSET(enum omap_plane plane)
395 return 0x00A8; 426 return 0x00A8;
396 default: 427 default:
397 BUG(); 428 BUG();
429 return 0;
398 } 430 }
399} 431}
400 432
@@ -410,6 +442,7 @@ static inline u16 DISPC_ATTR_OFFSET(enum omap_plane plane)
410 return 0x0070; 442 return 0x0070;
411 default: 443 default:
412 BUG(); 444 BUG();
445 return 0;
413 } 446 }
414} 447}
415 448
@@ -418,6 +451,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
418 switch (plane) { 451 switch (plane) {
419 case OMAP_DSS_GFX: 452 case OMAP_DSS_GFX:
420 BUG(); 453 BUG();
454 return 0;
421 case OMAP_DSS_VIDEO1: 455 case OMAP_DSS_VIDEO1:
422 return 0x0568; 456 return 0x0568;
423 case OMAP_DSS_VIDEO2: 457 case OMAP_DSS_VIDEO2:
@@ -426,6 +460,7 @@ static inline u16 DISPC_ATTR2_OFFSET(enum omap_plane plane)
426 return 0x032C; 460 return 0x032C;
427 default: 461 default:
428 BUG(); 462 BUG();
463 return 0;
429 } 464 }
430} 465}
431 466
@@ -441,6 +476,7 @@ static inline u16 DISPC_FIFO_THRESH_OFFSET(enum omap_plane plane)
441 return 0x008C; 476 return 0x008C;
442 default: 477 default:
443 BUG(); 478 BUG();
479 return 0;
444 } 480 }
445} 481}
446 482
@@ -456,6 +492,7 @@ static inline u16 DISPC_FIFO_SIZE_STATUS_OFFSET(enum omap_plane plane)
456 return 0x0088; 492 return 0x0088;
457 default: 493 default:
458 BUG(); 494 BUG();
495 return 0;
459 } 496 }
460} 497}
461 498
@@ -471,6 +508,7 @@ static inline u16 DISPC_ROW_INC_OFFSET(enum omap_plane plane)
471 return 0x00A4; 508 return 0x00A4;
472 default: 509 default:
473 BUG(); 510 BUG();
511 return 0;
474 } 512 }
475} 513}
476 514
@@ -486,6 +524,7 @@ static inline u16 DISPC_PIX_INC_OFFSET(enum omap_plane plane)
486 return 0x0098; 524 return 0x0098;
487 default: 525 default:
488 BUG(); 526 BUG();
527 return 0;
489 } 528 }
490} 529}
491 530
@@ -498,8 +537,10 @@ static inline u16 DISPC_WINDOW_SKIP_OFFSET(enum omap_plane plane)
498 case OMAP_DSS_VIDEO2: 537 case OMAP_DSS_VIDEO2:
499 case OMAP_DSS_VIDEO3: 538 case OMAP_DSS_VIDEO3:
500 BUG(); 539 BUG();
540 return 0;
501 default: 541 default:
502 BUG(); 542 BUG();
543 return 0;
503 } 544 }
504} 545}
505 546
@@ -512,8 +553,10 @@ static inline u16 DISPC_TABLE_BA_OFFSET(enum omap_plane plane)
512 case OMAP_DSS_VIDEO2: 553 case OMAP_DSS_VIDEO2:
513 case OMAP_DSS_VIDEO3: 554 case OMAP_DSS_VIDEO3:
514 BUG(); 555 BUG();
556 return 0;
515 default: 557 default:
516 BUG(); 558 BUG();
559 return 0;
517 } 560 }
518} 561}
519 562
@@ -522,6 +565,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
522 switch (plane) { 565 switch (plane) {
523 case OMAP_DSS_GFX: 566 case OMAP_DSS_GFX:
524 BUG(); 567 BUG();
568 return 0;
525 case OMAP_DSS_VIDEO1: 569 case OMAP_DSS_VIDEO1:
526 case OMAP_DSS_VIDEO2: 570 case OMAP_DSS_VIDEO2:
527 return 0x0024; 571 return 0x0024;
@@ -529,6 +573,7 @@ static inline u16 DISPC_FIR_OFFSET(enum omap_plane plane)
529 return 0x0090; 573 return 0x0090;
530 default: 574 default:
531 BUG(); 575 BUG();
576 return 0;
532 } 577 }
533} 578}
534 579
@@ -537,6 +582,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
537 switch (plane) { 582 switch (plane) {
538 case OMAP_DSS_GFX: 583 case OMAP_DSS_GFX:
539 BUG(); 584 BUG();
585 return 0;
540 case OMAP_DSS_VIDEO1: 586 case OMAP_DSS_VIDEO1:
541 return 0x0580; 587 return 0x0580;
542 case OMAP_DSS_VIDEO2: 588 case OMAP_DSS_VIDEO2:
@@ -545,6 +591,7 @@ static inline u16 DISPC_FIR2_OFFSET(enum omap_plane plane)
545 return 0x0424; 591 return 0x0424;
546 default: 592 default:
547 BUG(); 593 BUG();
594 return 0;
548 } 595 }
549} 596}
550 597
@@ -553,6 +600,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
553 switch (plane) { 600 switch (plane) {
554 case OMAP_DSS_GFX: 601 case OMAP_DSS_GFX:
555 BUG(); 602 BUG();
603 return 0;
556 case OMAP_DSS_VIDEO1: 604 case OMAP_DSS_VIDEO1:
557 case OMAP_DSS_VIDEO2: 605 case OMAP_DSS_VIDEO2:
558 return 0x0028; 606 return 0x0028;
@@ -560,6 +608,7 @@ static inline u16 DISPC_PIC_SIZE_OFFSET(enum omap_plane plane)
560 return 0x0094; 608 return 0x0094;
561 default: 609 default:
562 BUG(); 610 BUG();
611 return 0;
563 } 612 }
564} 613}
565 614
@@ -569,6 +618,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
569 switch (plane) { 618 switch (plane) {
570 case OMAP_DSS_GFX: 619 case OMAP_DSS_GFX:
571 BUG(); 620 BUG();
621 return 0;
572 case OMAP_DSS_VIDEO1: 622 case OMAP_DSS_VIDEO1:
573 case OMAP_DSS_VIDEO2: 623 case OMAP_DSS_VIDEO2:
574 return 0x002C; 624 return 0x002C;
@@ -576,6 +626,7 @@ static inline u16 DISPC_ACCU0_OFFSET(enum omap_plane plane)
576 return 0x0000; 626 return 0x0000;
577 default: 627 default:
578 BUG(); 628 BUG();
629 return 0;
579 } 630 }
580} 631}
581 632
@@ -584,6 +635,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
584 switch (plane) { 635 switch (plane) {
585 case OMAP_DSS_GFX: 636 case OMAP_DSS_GFX:
586 BUG(); 637 BUG();
638 return 0;
587 case OMAP_DSS_VIDEO1: 639 case OMAP_DSS_VIDEO1:
588 return 0x0584; 640 return 0x0584;
589 case OMAP_DSS_VIDEO2: 641 case OMAP_DSS_VIDEO2:
@@ -592,6 +644,7 @@ static inline u16 DISPC_ACCU2_0_OFFSET(enum omap_plane plane)
592 return 0x0428; 644 return 0x0428;
593 default: 645 default:
594 BUG(); 646 BUG();
647 return 0;
595 } 648 }
596} 649}
597 650
@@ -600,6 +653,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
600 switch (plane) { 653 switch (plane) {
601 case OMAP_DSS_GFX: 654 case OMAP_DSS_GFX:
602 BUG(); 655 BUG();
656 return 0;
603 case OMAP_DSS_VIDEO1: 657 case OMAP_DSS_VIDEO1:
604 case OMAP_DSS_VIDEO2: 658 case OMAP_DSS_VIDEO2:
605 return 0x0030; 659 return 0x0030;
@@ -607,6 +661,7 @@ static inline u16 DISPC_ACCU1_OFFSET(enum omap_plane plane)
607 return 0x0004; 661 return 0x0004;
608 default: 662 default:
609 BUG(); 663 BUG();
664 return 0;
610 } 665 }
611} 666}
612 667
@@ -615,6 +670,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
615 switch (plane) { 670 switch (plane) {
616 case OMAP_DSS_GFX: 671 case OMAP_DSS_GFX:
617 BUG(); 672 BUG();
673 return 0;
618 case OMAP_DSS_VIDEO1: 674 case OMAP_DSS_VIDEO1:
619 return 0x0588; 675 return 0x0588;
620 case OMAP_DSS_VIDEO2: 676 case OMAP_DSS_VIDEO2:
@@ -623,6 +679,7 @@ static inline u16 DISPC_ACCU2_1_OFFSET(enum omap_plane plane)
623 return 0x042C; 679 return 0x042C;
624 default: 680 default:
625 BUG(); 681 BUG();
682 return 0;
626 } 683 }
627} 684}
628 685
@@ -632,6 +689,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
632 switch (plane) { 689 switch (plane) {
633 case OMAP_DSS_GFX: 690 case OMAP_DSS_GFX:
634 BUG(); 691 BUG();
692 return 0;
635 case OMAP_DSS_VIDEO1: 693 case OMAP_DSS_VIDEO1:
636 case OMAP_DSS_VIDEO2: 694 case OMAP_DSS_VIDEO2:
637 return 0x0034 + i * 0x8; 695 return 0x0034 + i * 0x8;
@@ -639,6 +697,7 @@ static inline u16 DISPC_FIR_COEF_H_OFFSET(enum omap_plane plane, u16 i)
639 return 0x0010 + i * 0x8; 697 return 0x0010 + i * 0x8;
640 default: 698 default:
641 BUG(); 699 BUG();
700 return 0;
642 } 701 }
643} 702}
644 703
@@ -648,6 +707,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
648 switch (plane) { 707 switch (plane) {
649 case OMAP_DSS_GFX: 708 case OMAP_DSS_GFX:
650 BUG(); 709 BUG();
710 return 0;
651 case OMAP_DSS_VIDEO1: 711 case OMAP_DSS_VIDEO1:
652 return 0x058C + i * 0x8; 712 return 0x058C + i * 0x8;
653 case OMAP_DSS_VIDEO2: 713 case OMAP_DSS_VIDEO2:
@@ -656,6 +716,7 @@ static inline u16 DISPC_FIR_COEF_H2_OFFSET(enum omap_plane plane, u16 i)
656 return 0x0430 + i * 0x8; 716 return 0x0430 + i * 0x8;
657 default: 717 default:
658 BUG(); 718 BUG();
719 return 0;
659 } 720 }
660} 721}
661 722
@@ -665,6 +726,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
665 switch (plane) { 726 switch (plane) {
666 case OMAP_DSS_GFX: 727 case OMAP_DSS_GFX:
667 BUG(); 728 BUG();
729 return 0;
668 case OMAP_DSS_VIDEO1: 730 case OMAP_DSS_VIDEO1:
669 case OMAP_DSS_VIDEO2: 731 case OMAP_DSS_VIDEO2:
670 return 0x0038 + i * 0x8; 732 return 0x0038 + i * 0x8;
@@ -672,6 +734,7 @@ static inline u16 DISPC_FIR_COEF_HV_OFFSET(enum omap_plane plane, u16 i)
672 return 0x0014 + i * 0x8; 734 return 0x0014 + i * 0x8;
673 default: 735 default:
674 BUG(); 736 BUG();
737 return 0;
675 } 738 }
676} 739}
677 740
@@ -681,6 +744,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
681 switch (plane) { 744 switch (plane) {
682 case OMAP_DSS_GFX: 745 case OMAP_DSS_GFX:
683 BUG(); 746 BUG();
747 return 0;
684 case OMAP_DSS_VIDEO1: 748 case OMAP_DSS_VIDEO1:
685 return 0x0590 + i * 8; 749 return 0x0590 + i * 8;
686 case OMAP_DSS_VIDEO2: 750 case OMAP_DSS_VIDEO2:
@@ -689,6 +753,7 @@ static inline u16 DISPC_FIR_COEF_HV2_OFFSET(enum omap_plane plane, u16 i)
689 return 0x0434 + i * 0x8; 753 return 0x0434 + i * 0x8;
690 default: 754 default:
691 BUG(); 755 BUG();
756 return 0;
692 } 757 }
693} 758}
694 759
@@ -698,12 +763,14 @@ static inline u16 DISPC_CONV_COEF_OFFSET(enum omap_plane plane, u16 i)
698 switch (plane) { 763 switch (plane) {
699 case OMAP_DSS_GFX: 764 case OMAP_DSS_GFX:
700 BUG(); 765 BUG();
766 return 0;
701 case OMAP_DSS_VIDEO1: 767 case OMAP_DSS_VIDEO1:
702 case OMAP_DSS_VIDEO2: 768 case OMAP_DSS_VIDEO2:
703 case OMAP_DSS_VIDEO3: 769 case OMAP_DSS_VIDEO3:
704 return 0x0074 + i * 0x4; 770 return 0x0074 + i * 0x4;
705 default: 771 default:
706 BUG(); 772 BUG();
773 return 0;
707 } 774 }
708} 775}
709 776
@@ -713,6 +780,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
713 switch (plane) { 780 switch (plane) {
714 case OMAP_DSS_GFX: 781 case OMAP_DSS_GFX:
715 BUG(); 782 BUG();
783 return 0;
716 case OMAP_DSS_VIDEO1: 784 case OMAP_DSS_VIDEO1:
717 return 0x0124 + i * 0x4; 785 return 0x0124 + i * 0x4;
718 case OMAP_DSS_VIDEO2: 786 case OMAP_DSS_VIDEO2:
@@ -721,6 +789,7 @@ static inline u16 DISPC_FIR_COEF_V_OFFSET(enum omap_plane plane, u16 i)
721 return 0x0050 + i * 0x4; 789 return 0x0050 + i * 0x4;
722 default: 790 default:
723 BUG(); 791 BUG();
792 return 0;
724 } 793 }
725} 794}
726 795
@@ -730,6 +799,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
730 switch (plane) { 799 switch (plane) {
731 case OMAP_DSS_GFX: 800 case OMAP_DSS_GFX:
732 BUG(); 801 BUG();
802 return 0;
733 case OMAP_DSS_VIDEO1: 803 case OMAP_DSS_VIDEO1:
734 return 0x05CC + i * 0x4; 804 return 0x05CC + i * 0x4;
735 case OMAP_DSS_VIDEO2: 805 case OMAP_DSS_VIDEO2:
@@ -738,6 +808,7 @@ static inline u16 DISPC_FIR_COEF_V2_OFFSET(enum omap_plane plane, u16 i)
738 return 0x0470 + i * 0x4; 808 return 0x0470 + i * 0x4;
739 default: 809 default:
740 BUG(); 810 BUG();
811 return 0;
741 } 812 }
742} 813}
743 814
@@ -754,6 +825,7 @@ static inline u16 DISPC_PRELOAD_OFFSET(enum omap_plane plane)
754 return 0x00A0; 825 return 0x00A0;
755 default: 826 default:
756 BUG(); 827 BUG();
828 return 0;
757 } 829 }
758} 830}
759#endif 831#endif
diff --git a/drivers/video/omap2/dss/display.c b/drivers/video/omap2/dss/display.c
index 4424c198dbcd..249010630370 100644
--- a/drivers/video/omap2/dss/display.c
+++ b/drivers/video/omap2/dss/display.c
@@ -304,10 +304,18 @@ int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev)
304 return 24; 304 return 24;
305 default: 305 default:
306 BUG(); 306 BUG();
307 return 0;
307 } 308 }
308} 309}
309EXPORT_SYMBOL(omapdss_default_get_recommended_bpp); 310EXPORT_SYMBOL(omapdss_default_get_recommended_bpp);
310 311
312void omapdss_default_get_timings(struct omap_dss_device *dssdev,
313 struct omap_video_timings *timings)
314{
315 *timings = dssdev->panel.timings;
316}
317EXPORT_SYMBOL(omapdss_default_get_timings);
318
311/* Checks if replication logic should be used. Only use for active matrix, 319/* Checks if replication logic should be used. Only use for active matrix,
312 * when overlay is in RGB12U or RGB16 mode, and LCD interface is 320 * when overlay is in RGB12U or RGB16 mode, and LCD interface is
313 * 18bpp or 24bpp */ 321 * 18bpp or 24bpp */
@@ -340,6 +348,7 @@ bool dss_use_replication(struct omap_dss_device *dssdev,
340 break; 348 break;
341 default: 349 default:
342 BUG(); 350 BUG();
351 return false;
343 } 352 }
344 353
345 return bpp > 16; 354 return bpp > 16;
@@ -352,46 +361,6 @@ void dss_init_device(struct platform_device *pdev,
352 int i; 361 int i;
353 int r; 362 int r;
354 363
355 switch (dssdev->type) {
356#ifdef CONFIG_OMAP2_DSS_DPI
357 case OMAP_DISPLAY_TYPE_DPI:
358 r = dpi_init_display(dssdev);
359 break;
360#endif
361#ifdef CONFIG_OMAP2_DSS_RFBI
362 case OMAP_DISPLAY_TYPE_DBI:
363 r = rfbi_init_display(dssdev);
364 break;
365#endif
366#ifdef CONFIG_OMAP2_DSS_VENC
367 case OMAP_DISPLAY_TYPE_VENC:
368 r = venc_init_display(dssdev);
369 break;
370#endif
371#ifdef CONFIG_OMAP2_DSS_SDI
372 case OMAP_DISPLAY_TYPE_SDI:
373 r = sdi_init_display(dssdev);
374 break;
375#endif
376#ifdef CONFIG_OMAP2_DSS_DSI
377 case OMAP_DISPLAY_TYPE_DSI:
378 r = dsi_init_display(dssdev);
379 break;
380#endif
381 case OMAP_DISPLAY_TYPE_HDMI:
382 r = hdmi_init_display(dssdev);
383 break;
384 default:
385 DSSERR("Support for display '%s' not compiled in.\n",
386 dssdev->name);
387 return;
388 }
389
390 if (r) {
391 DSSERR("failed to init display %s\n", dssdev->name);
392 return;
393 }
394
395 /* create device sysfs files */ 364 /* create device sysfs files */
396 i = 0; 365 i = 0;
397 while ((attr = display_sysfs_attrs[i++]) != NULL) { 366 while ((attr = display_sysfs_attrs[i++]) != NULL) {
diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c
index faaf305fda27..8c2056c9537b 100644
--- a/drivers/video/omap2/dss/dpi.c
+++ b/drivers/video/omap2/dss/dpi.c
@@ -156,7 +156,7 @@ static int dpi_set_mode(struct omap_dss_device *dssdev)
156 t->pixel_clock = pck; 156 t->pixel_clock = pck;
157 } 157 }
158 158
159 dispc_mgr_set_lcd_timings(dssdev->manager->id, t); 159 dss_mgr_set_timings(dssdev->manager, t);
160 160
161 return 0; 161 return 0;
162} 162}
@@ -202,10 +202,6 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev)
202 goto err_reg_enable; 202 goto err_reg_enable;
203 } 203 }
204 204
205 r = dss_runtime_get();
206 if (r)
207 goto err_get_dss;
208
209 r = dispc_runtime_get(); 205 r = dispc_runtime_get();
210 if (r) 206 if (r)
211 goto err_get_dispc; 207 goto err_get_dispc;
@@ -244,8 +240,6 @@ err_dsi_pll_init:
244err_get_dsi: 240err_get_dsi:
245 dispc_runtime_put(); 241 dispc_runtime_put();
246err_get_dispc: 242err_get_dispc:
247 dss_runtime_put();
248err_get_dss:
249 if (cpu_is_omap34xx()) 243 if (cpu_is_omap34xx())
250 regulator_disable(dpi.vdds_dsi_reg); 244 regulator_disable(dpi.vdds_dsi_reg);
251err_reg_enable: 245err_reg_enable:
@@ -266,7 +260,6 @@ void omapdss_dpi_display_disable(struct omap_dss_device *dssdev)
266 } 260 }
267 261
268 dispc_runtime_put(); 262 dispc_runtime_put();
269 dss_runtime_put();
270 263
271 if (cpu_is_omap34xx()) 264 if (cpu_is_omap34xx())
272 regulator_disable(dpi.vdds_dsi_reg); 265 regulator_disable(dpi.vdds_dsi_reg);
@@ -283,21 +276,15 @@ void dpi_set_timings(struct omap_dss_device *dssdev,
283 DSSDBG("dpi_set_timings\n"); 276 DSSDBG("dpi_set_timings\n");
284 dssdev->panel.timings = *timings; 277 dssdev->panel.timings = *timings;
285 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { 278 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
286 r = dss_runtime_get();
287 if (r)
288 return;
289
290 r = dispc_runtime_get(); 279 r = dispc_runtime_get();
291 if (r) { 280 if (r)
292 dss_runtime_put();
293 return; 281 return;
294 }
295 282
296 dpi_set_mode(dssdev); 283 dpi_set_mode(dssdev);
297 dispc_mgr_go(dssdev->manager->id);
298 284
299 dispc_runtime_put(); 285 dispc_runtime_put();
300 dss_runtime_put(); 286 } else {
287 dss_mgr_set_timings(dssdev->manager, timings);
301 } 288 }
302} 289}
303EXPORT_SYMBOL(dpi_set_timings); 290EXPORT_SYMBOL(dpi_set_timings);
@@ -312,7 +299,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
312 unsigned long pck; 299 unsigned long pck;
313 struct dispc_clock_info dispc_cinfo; 300 struct dispc_clock_info dispc_cinfo;
314 301
315 if (!dispc_lcd_timings_ok(timings)) 302 if (dss_mgr_check_timings(dssdev->manager, timings))
316 return -EINVAL; 303 return -EINVAL;
317 304
318 if (timings->pixel_clock == 0) 305 if (timings->pixel_clock == 0)
@@ -352,7 +339,7 @@ int dpi_check_timings(struct omap_dss_device *dssdev,
352} 339}
353EXPORT_SYMBOL(dpi_check_timings); 340EXPORT_SYMBOL(dpi_check_timings);
354 341
355int dpi_init_display(struct omap_dss_device *dssdev) 342static int __init dpi_init_display(struct omap_dss_device *dssdev)
356{ 343{
357 DSSDBG("init_display\n"); 344 DSSDBG("init_display\n");
358 345
@@ -378,12 +365,58 @@ int dpi_init_display(struct omap_dss_device *dssdev)
378 return 0; 365 return 0;
379} 366}
380 367
381int dpi_init(void) 368static void __init dpi_probe_pdata(struct platform_device *pdev)
382{ 369{
370 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
371 int i, r;
372
373 for (i = 0; i < pdata->num_devices; ++i) {
374 struct omap_dss_device *dssdev = pdata->devices[i];
375
376 if (dssdev->type != OMAP_DISPLAY_TYPE_DPI)
377 continue;
378
379 r = dpi_init_display(dssdev);
380 if (r) {
381 DSSERR("device %s init failed: %d\n", dssdev->name, r);
382 continue;
383 }
384
385 r = omap_dss_register_device(dssdev, &pdev->dev, i);
386 if (r)
387 DSSERR("device %s register failed: %d\n",
388 dssdev->name, r);
389 }
390}
391
392static int __init omap_dpi_probe(struct platform_device *pdev)
393{
394 dpi_probe_pdata(pdev);
395
396 return 0;
397}
398
399static int __exit omap_dpi_remove(struct platform_device *pdev)
400{
401 omap_dss_unregister_child_devices(&pdev->dev);
402
383 return 0; 403 return 0;
384} 404}
385 405
386void dpi_exit(void) 406static struct platform_driver omap_dpi_driver = {
407 .remove = __exit_p(omap_dpi_remove),
408 .driver = {
409 .name = "omapdss_dpi",
410 .owner = THIS_MODULE,
411 },
412};
413
414int __init dpi_init_platform_driver(void)
387{ 415{
416 return platform_driver_probe(&omap_dpi_driver, omap_dpi_probe);
388} 417}
389 418
419void __exit dpi_uninit_platform_driver(void)
420{
421 platform_driver_unregister(&omap_dpi_driver);
422}
diff --git a/drivers/video/omap2/dss/dsi.c b/drivers/video/omap2/dss/dsi.c
index 210a3c4f6150..ca8382d346e9 100644
--- a/drivers/video/omap2/dss/dsi.c
+++ b/drivers/video/omap2/dss/dsi.c
@@ -256,14 +256,13 @@ struct dsi_data {
256 struct platform_device *pdev; 256 struct platform_device *pdev;
257 void __iomem *base; 257 void __iomem *base;
258 258
259 int module_id;
260
259 int irq; 261 int irq;
260 262
261 struct clk *dss_clk; 263 struct clk *dss_clk;
262 struct clk *sys_clk; 264 struct clk *sys_clk;
263 265
264 int (*enable_pads)(int dsi_id, unsigned lane_mask);
265 void (*disable_pads)(int dsi_id, unsigned lane_mask);
266
267 struct dsi_clock_info current_cinfo; 266 struct dsi_clock_info current_cinfo;
268 267
269 bool vdds_dsi_enabled; 268 bool vdds_dsi_enabled;
@@ -361,11 +360,6 @@ struct platform_device *dsi_get_dsidev_from_id(int module)
361 return dsi_pdev_map[module]; 360 return dsi_pdev_map[module];
362} 361}
363 362
364static inline int dsi_get_dsidev_id(struct platform_device *dsidev)
365{
366 return dsidev->id;
367}
368
369static inline void dsi_write_reg(struct platform_device *dsidev, 363static inline void dsi_write_reg(struct platform_device *dsidev,
370 const struct dsi_reg idx, u32 val) 364 const struct dsi_reg idx, u32 val)
371{ 365{
@@ -452,6 +446,7 @@ u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt)
452 return 16; 446 return 16;
453 default: 447 default:
454 BUG(); 448 BUG();
449 return 0;
455 } 450 }
456} 451}
457 452
@@ -1184,10 +1179,9 @@ static unsigned long dsi_get_txbyteclkhs(struct platform_device *dsidev)
1184static unsigned long dsi_fclk_rate(struct platform_device *dsidev) 1179static unsigned long dsi_fclk_rate(struct platform_device *dsidev)
1185{ 1180{
1186 unsigned long r; 1181 unsigned long r;
1187 int dsi_module = dsi_get_dsidev_id(dsidev);
1188 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1182 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1189 1183
1190 if (dss_get_dsi_clk_source(dsi_module) == OMAP_DSS_CLK_SRC_FCK) { 1184 if (dss_get_dsi_clk_source(dsi->module_id) == OMAP_DSS_CLK_SRC_FCK) {
1191 /* DSI FCLK source is DSS_CLK_FCK */ 1185 /* DSI FCLK source is DSS_CLK_FCK */
1192 r = clk_get_rate(dsi->dss_clk); 1186 r = clk_get_rate(dsi->dss_clk);
1193 } else { 1187 } else {
@@ -1279,10 +1273,9 @@ static int dsi_pll_power(struct platform_device *dsidev,
1279} 1273}
1280 1274
1281/* calculate clock rates using dividers in cinfo */ 1275/* calculate clock rates using dividers in cinfo */
1282static int dsi_calc_clock_rates(struct omap_dss_device *dssdev, 1276static int dsi_calc_clock_rates(struct platform_device *dsidev,
1283 struct dsi_clock_info *cinfo) 1277 struct dsi_clock_info *cinfo)
1284{ 1278{
1285 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
1286 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1279 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1287 1280
1288 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max) 1281 if (cinfo->regn == 0 || cinfo->regn > dsi->regn_max)
@@ -1297,21 +1290,8 @@ static int dsi_calc_clock_rates(struct omap_dss_device *dssdev,
1297 if (cinfo->regm_dsi > dsi->regm_dsi_max) 1290 if (cinfo->regm_dsi > dsi->regm_dsi_max)
1298 return -EINVAL; 1291 return -EINVAL;
1299 1292
1300 if (cinfo->use_sys_clk) { 1293 cinfo->clkin = clk_get_rate(dsi->sys_clk);
1301 cinfo->clkin = clk_get_rate(dsi->sys_clk); 1294 cinfo->fint = cinfo->clkin / cinfo->regn;
1302 /* XXX it is unclear if highfreq should be used
1303 * with DSS_SYS_CLK source also */
1304 cinfo->highfreq = 0;
1305 } else {
1306 cinfo->clkin = dispc_mgr_pclk_rate(dssdev->manager->id);
1307
1308 if (cinfo->clkin < 32000000)
1309 cinfo->highfreq = 0;
1310 else
1311 cinfo->highfreq = 1;
1312 }
1313
1314 cinfo->fint = cinfo->clkin / (cinfo->regn * (cinfo->highfreq ? 2 : 1));
1315 1295
1316 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min) 1296 if (cinfo->fint > dsi->fint_max || cinfo->fint < dsi->fint_min)
1317 return -EINVAL; 1297 return -EINVAL;
@@ -1378,27 +1358,21 @@ retry:
1378 1358
1379 memset(&cur, 0, sizeof(cur)); 1359 memset(&cur, 0, sizeof(cur));
1380 cur.clkin = dss_sys_clk; 1360 cur.clkin = dss_sys_clk;
1381 cur.use_sys_clk = 1;
1382 cur.highfreq = 0;
1383 1361
1384 /* no highfreq: 0.75MHz < Fint = clkin / regn < 2.1MHz */ 1362 /* 0.75MHz < Fint = clkin / regn < 2.1MHz */
1385 /* highfreq: 0.75MHz < Fint = clkin / (2*regn) < 2.1MHz */
1386 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */ 1363 /* To reduce PLL lock time, keep Fint high (around 2 MHz) */
1387 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) { 1364 for (cur.regn = 1; cur.regn < dsi->regn_max; ++cur.regn) {
1388 if (cur.highfreq == 0) 1365 cur.fint = cur.clkin / cur.regn;
1389 cur.fint = cur.clkin / cur.regn;
1390 else
1391 cur.fint = cur.clkin / (2 * cur.regn);
1392 1366
1393 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min) 1367 if (cur.fint > dsi->fint_max || cur.fint < dsi->fint_min)
1394 continue; 1368 continue;
1395 1369
1396 /* DSIPHY(MHz) = (2 * regm / regn) * (clkin / (highfreq + 1)) */ 1370 /* DSIPHY(MHz) = (2 * regm / regn) * clkin */
1397 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) { 1371 for (cur.regm = 1; cur.regm < dsi->regm_max; ++cur.regm) {
1398 unsigned long a, b; 1372 unsigned long a, b;
1399 1373
1400 a = 2 * cur.regm * (cur.clkin/1000); 1374 a = 2 * cur.regm * (cur.clkin/1000);
1401 b = cur.regn * (cur.highfreq + 1); 1375 b = cur.regn;
1402 cur.clkin4ddr = a / b * 1000; 1376 cur.clkin4ddr = a / b * 1000;
1403 1377
1404 if (cur.clkin4ddr > 1800 * 1000 * 1000) 1378 if (cur.clkin4ddr > 1800 * 1000 * 1000)
@@ -1486,9 +1460,7 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
1486 1460
1487 DSSDBGF(); 1461 DSSDBGF();
1488 1462
1489 dsi->current_cinfo.use_sys_clk = cinfo->use_sys_clk; 1463 dsi->current_cinfo.clkin = cinfo->clkin;
1490 dsi->current_cinfo.highfreq = cinfo->highfreq;
1491
1492 dsi->current_cinfo.fint = cinfo->fint; 1464 dsi->current_cinfo.fint = cinfo->fint;
1493 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr; 1465 dsi->current_cinfo.clkin4ddr = cinfo->clkin4ddr;
1494 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk = 1466 dsi->current_cinfo.dsi_pll_hsdiv_dispc_clk =
@@ -1503,17 +1475,13 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
1503 1475
1504 DSSDBG("DSI Fint %ld\n", cinfo->fint); 1476 DSSDBG("DSI Fint %ld\n", cinfo->fint);
1505 1477
1506 DSSDBG("clkin (%s) rate %ld, highfreq %d\n", 1478 DSSDBG("clkin rate %ld\n", cinfo->clkin);
1507 cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree",
1508 cinfo->clkin,
1509 cinfo->highfreq);
1510 1479
1511 /* DSIPHY == CLKIN4DDR */ 1480 /* DSIPHY == CLKIN4DDR */
1512 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu / %d = %lu\n", 1481 DSSDBG("CLKIN4DDR = 2 * %d / %d * %lu = %lu\n",
1513 cinfo->regm, 1482 cinfo->regm,
1514 cinfo->regn, 1483 cinfo->regn,
1515 cinfo->clkin, 1484 cinfo->clkin,
1516 cinfo->highfreq + 1,
1517 cinfo->clkin4ddr); 1485 cinfo->clkin4ddr);
1518 1486
1519 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n", 1487 DSSDBG("Data rate on 1 DSI lane %ld Mbps\n",
@@ -1568,10 +1536,6 @@ int dsi_pll_set_clock_div(struct platform_device *dsidev,
1568 1536
1569 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL)) 1537 if (dss_has_feature(FEAT_DSI_PLL_FREQSEL))
1570 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */ 1538 l = FLD_MOD(l, f, 4, 1); /* DSI_PLL_FREQSEL */
1571 l = FLD_MOD(l, cinfo->use_sys_clk ? 0 : 1,
1572 11, 11); /* DSI_PLL_CLKSEL */
1573 l = FLD_MOD(l, cinfo->highfreq,
1574 12, 12); /* DSI_PLL_HIGHFREQ */
1575 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */ 1539 l = FLD_MOD(l, 1, 13, 13); /* DSI_PLL_REFEN */
1576 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */ 1540 l = FLD_MOD(l, 0, 14, 14); /* DSIPHY_CLKINEN */
1577 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */ 1541 l = FLD_MOD(l, 1, 20, 20); /* DSI_HSDIVBYPASS */
@@ -1716,7 +1680,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1716 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1680 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1717 struct dsi_clock_info *cinfo = &dsi->current_cinfo; 1681 struct dsi_clock_info *cinfo = &dsi->current_cinfo;
1718 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src; 1682 enum omap_dss_clk_source dispc_clk_src, dsi_clk_src;
1719 int dsi_module = dsi_get_dsidev_id(dsidev); 1683 int dsi_module = dsi->module_id;
1720 1684
1721 dispc_clk_src = dss_get_dispc_clk_source(); 1685 dispc_clk_src = dss_get_dispc_clk_source();
1722 dsi_clk_src = dss_get_dsi_clk_source(dsi_module); 1686 dsi_clk_src = dss_get_dsi_clk_source(dsi_module);
@@ -1726,8 +1690,7 @@ static void dsi_dump_dsidev_clocks(struct platform_device *dsidev,
1726 1690
1727 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1); 1691 seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
1728 1692
1729 seq_printf(s, "dsi pll source = %s\n", 1693 seq_printf(s, "dsi pll clkin\t%lu\n", cinfo->clkin);
1730 cinfo->use_sys_clk ? "dss_sys_clk" : "pclkfree");
1731 1694
1732 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn); 1695 seq_printf(s, "Fint\t\t%-16luregn %u\n", cinfo->fint, cinfo->regn);
1733 1696
@@ -1789,7 +1752,6 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1789 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 1752 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
1790 unsigned long flags; 1753 unsigned long flags;
1791 struct dsi_irq_stats stats; 1754 struct dsi_irq_stats stats;
1792 int dsi_module = dsi_get_dsidev_id(dsidev);
1793 1755
1794 spin_lock_irqsave(&dsi->irq_stats_lock, flags); 1756 spin_lock_irqsave(&dsi->irq_stats_lock, flags);
1795 1757
@@ -1806,7 +1768,7 @@ static void dsi_dump_dsidev_irqs(struct platform_device *dsidev,
1806#define PIS(x) \ 1768#define PIS(x) \
1807 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]); 1769 seq_printf(s, "%-20s %10d\n", #x, stats.dsi_irqs[ffs(DSI_IRQ_##x)-1]);
1808 1770
1809 seq_printf(s, "-- DSI%d interrupts --\n", dsi_module + 1); 1771 seq_printf(s, "-- DSI%d interrupts --\n", dsi->module_id + 1);
1810 PIS(VC0); 1772 PIS(VC0);
1811 PIS(VC1); 1773 PIS(VC1);
1812 PIS(VC2); 1774 PIS(VC2);
@@ -1886,22 +1848,6 @@ static void dsi2_dump_irqs(struct seq_file *s)
1886 1848
1887 dsi_dump_dsidev_irqs(dsidev, s); 1849 dsi_dump_dsidev_irqs(dsidev, s);
1888} 1850}
1889
1890void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
1891 const struct file_operations *debug_fops)
1892{
1893 struct platform_device *dsidev;
1894
1895 dsidev = dsi_get_dsidev_from_id(0);
1896 if (dsidev)
1897 debugfs_create_file("dsi1_irqs", S_IRUGO, debugfs_dir,
1898 &dsi1_dump_irqs, debug_fops);
1899
1900 dsidev = dsi_get_dsidev_from_id(1);
1901 if (dsidev)
1902 debugfs_create_file("dsi2_irqs", S_IRUGO, debugfs_dir,
1903 &dsi2_dump_irqs, debug_fops);
1904}
1905#endif 1851#endif
1906 1852
1907static void dsi_dump_dsidev_regs(struct platform_device *dsidev, 1853static void dsi_dump_dsidev_regs(struct platform_device *dsidev,
@@ -2002,21 +1948,6 @@ static void dsi2_dump_regs(struct seq_file *s)
2002 dsi_dump_dsidev_regs(dsidev, s); 1948 dsi_dump_dsidev_regs(dsidev, s);
2003} 1949}
2004 1950
2005void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
2006 const struct file_operations *debug_fops)
2007{
2008 struct platform_device *dsidev;
2009
2010 dsidev = dsi_get_dsidev_from_id(0);
2011 if (dsidev)
2012 debugfs_create_file("dsi1_regs", S_IRUGO, debugfs_dir,
2013 &dsi1_dump_regs, debug_fops);
2014
2015 dsidev = dsi_get_dsidev_from_id(1);
2016 if (dsidev)
2017 debugfs_create_file("dsi2_regs", S_IRUGO, debugfs_dir,
2018 &dsi2_dump_regs, debug_fops);
2019}
2020enum dsi_cio_power_state { 1951enum dsi_cio_power_state {
2021 DSI_COMPLEXIO_POWER_OFF = 0x0, 1952 DSI_COMPLEXIO_POWER_OFF = 0x0,
2022 DSI_COMPLEXIO_POWER_ON = 0x1, 1953 DSI_COMPLEXIO_POWER_ON = 0x1,
@@ -2073,6 +2004,7 @@ static unsigned dsi_get_line_buf_size(struct platform_device *dsidev)
2073 return 1365 * 3; /* 1365x24 bits */ 2004 return 1365 * 3; /* 1365x24 bits */
2074 default: 2005 default:
2075 BUG(); 2006 BUG();
2007 return 0;
2076 } 2008 }
2077} 2009}
2078 2010
@@ -2337,7 +2269,7 @@ static int dsi_cio_init(struct omap_dss_device *dssdev)
2337 2269
2338 DSSDBGF(); 2270 DSSDBGF();
2339 2271
2340 r = dsi->enable_pads(dsidev->id, dsi_get_lane_mask(dssdev)); 2272 r = dss_dsi_enable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
2341 if (r) 2273 if (r)
2342 return r; 2274 return r;
2343 2275
@@ -2447,7 +2379,7 @@ err_cio_pwr:
2447 dsi_cio_disable_lane_override(dsidev); 2379 dsi_cio_disable_lane_override(dsidev);
2448err_scp_clk_dom: 2380err_scp_clk_dom:
2449 dsi_disable_scp_clk(dsidev); 2381 dsi_disable_scp_clk(dsidev);
2450 dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev)); 2382 dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
2451 return r; 2383 return r;
2452} 2384}
2453 2385
@@ -2461,7 +2393,7 @@ static void dsi_cio_uninit(struct omap_dss_device *dssdev)
2461 2393
2462 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF); 2394 dsi_cio_power(dsidev, DSI_COMPLEXIO_POWER_OFF);
2463 dsi_disable_scp_clk(dsidev); 2395 dsi_disable_scp_clk(dsidev);
2464 dsi->disable_pads(dsidev->id, dsi_get_lane_mask(dssdev)); 2396 dss_dsi_disable_pads(dsi->module_id, dsi_get_lane_mask(dssdev));
2465} 2397}
2466 2398
2467static void dsi_config_tx_fifo(struct platform_device *dsidev, 2399static void dsi_config_tx_fifo(struct platform_device *dsidev,
@@ -2485,6 +2417,7 @@ static void dsi_config_tx_fifo(struct platform_device *dsidev,
2485 if (add + size > 4) { 2417 if (add + size > 4) {
2486 DSSERR("Illegal FIFO configuration\n"); 2418 DSSERR("Illegal FIFO configuration\n");
2487 BUG(); 2419 BUG();
2420 return;
2488 } 2421 }
2489 2422
2490 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); 2423 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2517,6 +2450,7 @@ static void dsi_config_rx_fifo(struct platform_device *dsidev,
2517 if (add + size > 4) { 2450 if (add + size > 4) {
2518 DSSERR("Illegal FIFO configuration\n"); 2451 DSSERR("Illegal FIFO configuration\n");
2519 BUG(); 2452 BUG();
2453 return;
2520 } 2454 }
2521 2455
2522 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4); 2456 v = FLD_VAL(add, 2, 0) | FLD_VAL(size, 7, 4);
@@ -2658,6 +2592,7 @@ static int dsi_sync_vc(struct platform_device *dsidev, int channel)
2658 return dsi_sync_vc_l4(dsidev, channel); 2592 return dsi_sync_vc_l4(dsidev, channel);
2659 default: 2593 default:
2660 BUG(); 2594 BUG();
2595 return -EINVAL;
2661 } 2596 }
2662} 2597}
2663 2598
@@ -3226,6 +3161,7 @@ static int dsi_vc_generic_send_read_request(struct omap_dss_device *dssdev,
3226 data = reqdata[0] | (reqdata[1] << 8); 3161 data = reqdata[0] | (reqdata[1] << 8);
3227 } else { 3162 } else {
3228 BUG(); 3163 BUG();
3164 return -EINVAL;
3229 } 3165 }
3230 3166
3231 r = dsi_vc_send_short(dsidev, channel, data_type, data, 0); 3167 r = dsi_vc_send_short(dsidev, channel, data_type, data, 0);
@@ -3340,7 +3276,6 @@ static int dsi_vc_read_rx_fifo(struct platform_device *dsidev, int channel,
3340 goto err; 3276 goto err;
3341 } 3277 }
3342 3278
3343 BUG();
3344err: 3279err:
3345 DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel, 3280 DSSERR("dsi_vc_read_rx_fifo(ch %d type %s) failed\n", channel,
3346 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS"); 3281 type == DSS_DSI_CONTENT_GENERIC ? "GENERIC" : "DCS");
@@ -3735,6 +3670,186 @@ static void dsi_config_blanking_modes(struct omap_dss_device *dssdev)
3735 dsi_write_reg(dsidev, DSI_CTRL, r); 3670 dsi_write_reg(dsidev, DSI_CTRL, r);
3736} 3671}
3737 3672
3673/*
3674 * According to section 'HS Command Mode Interleaving' in OMAP TRM, Scenario 3
3675 * results in maximum transition time for data and clock lanes to enter and
3676 * exit HS mode. Hence, this is the scenario where the least amount of command
3677 * mode data can be interleaved. We program the minimum amount of TXBYTECLKHS
3678 * clock cycles that can be used to interleave command mode data in HS so that
3679 * all scenarios are satisfied.
3680 */
3681static int dsi_compute_interleave_hs(int blank, bool ddr_alwon, int enter_hs,
3682 int exit_hs, int exiths_clk, int ddr_pre, int ddr_post)
3683{
3684 int transition;
3685
3686 /*
3687 * If DDR_CLK_ALWAYS_ON is set, we need to consider HS mode transition
3688 * time of data lanes only, if it isn't set, we need to consider HS
3689 * transition time of both data and clock lanes. HS transition time
3690 * of Scenario 3 is considered.
3691 */
3692 if (ddr_alwon) {
3693 transition = enter_hs + exit_hs + max(enter_hs, 2) + 1;
3694 } else {
3695 int trans1, trans2;
3696 trans1 = ddr_pre + enter_hs + exit_hs + max(enter_hs, 2) + 1;
3697 trans2 = ddr_pre + enter_hs + exiths_clk + ddr_post + ddr_pre +
3698 enter_hs + 1;
3699 transition = max(trans1, trans2);
3700 }
3701
3702 return blank > transition ? blank - transition : 0;
3703}
3704
3705/*
3706 * According to section 'LP Command Mode Interleaving' in OMAP TRM, Scenario 1
3707 * results in maximum transition time for data lanes to enter and exit LP mode.
3708 * Hence, this is the scenario where the least amount of command mode data can
3709 * be interleaved. We program the minimum amount of bytes that can be
3710 * interleaved in LP so that all scenarios are satisfied.
3711 */
3712static int dsi_compute_interleave_lp(int blank, int enter_hs, int exit_hs,
3713 int lp_clk_div, int tdsi_fclk)
3714{
3715 int trans_lp; /* time required for a LP transition, in TXBYTECLKHS */
3716 int tlp_avail; /* time left for interleaving commands, in CLKIN4DDR */
3717 int ttxclkesc; /* period of LP transmit escape clock, in CLKIN4DDR */
3718 int thsbyte_clk = 16; /* Period of TXBYTECLKHS clock, in CLKIN4DDR */
3719 int lp_inter; /* cmd mode data that can be interleaved, in bytes */
3720
3721 /* maximum LP transition time according to Scenario 1 */
3722 trans_lp = exit_hs + max(enter_hs, 2) + 1;
3723
3724 /* CLKIN4DDR = 16 * TXBYTECLKHS */
3725 tlp_avail = thsbyte_clk * (blank - trans_lp);
3726
3727 ttxclkesc = tdsi_fclk * lp_clk_div;
3728
3729 lp_inter = ((tlp_avail - 8 * thsbyte_clk - 5 * tdsi_fclk) / ttxclkesc -
3730 26) / 16;
3731
3732 return max(lp_inter, 0);
3733}
3734
3735static void dsi_config_cmd_mode_interleaving(struct omap_dss_device *dssdev)
3736{
3737 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
3738 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
3739 int blanking_mode;
3740 int hfp_blanking_mode, hbp_blanking_mode, hsa_blanking_mode;
3741 int hsa, hfp, hbp, width_bytes, bllp, lp_clk_div;
3742 int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
3743 int tclk_trail, ths_exit, exiths_clk;
3744 bool ddr_alwon;
3745 struct omap_video_timings *timings = &dssdev->panel.timings;
3746 int bpp = dsi_get_pixel_size(dssdev->panel.dsi_pix_fmt);
3747 int ndl = dsi->num_lanes_used - 1;
3748 int dsi_fclk_hsdiv = dssdev->clocks.dsi.regm_dsi + 1;
3749 int hsa_interleave_hs = 0, hsa_interleave_lp = 0;
3750 int hfp_interleave_hs = 0, hfp_interleave_lp = 0;
3751 int hbp_interleave_hs = 0, hbp_interleave_lp = 0;
3752 int bl_interleave_hs = 0, bl_interleave_lp = 0;
3753 u32 r;
3754
3755 r = dsi_read_reg(dsidev, DSI_CTRL);
3756 blanking_mode = FLD_GET(r, 20, 20);
3757 hfp_blanking_mode = FLD_GET(r, 21, 21);
3758 hbp_blanking_mode = FLD_GET(r, 22, 22);
3759 hsa_blanking_mode = FLD_GET(r, 23, 23);
3760
3761 r = dsi_read_reg(dsidev, DSI_VM_TIMING1);
3762 hbp = FLD_GET(r, 11, 0);
3763 hfp = FLD_GET(r, 23, 12);
3764 hsa = FLD_GET(r, 31, 24);
3765
3766 r = dsi_read_reg(dsidev, DSI_CLK_TIMING);
3767 ddr_clk_post = FLD_GET(r, 7, 0);
3768 ddr_clk_pre = FLD_GET(r, 15, 8);
3769
3770 r = dsi_read_reg(dsidev, DSI_VM_TIMING7);
3771 exit_hs_mode_lat = FLD_GET(r, 15, 0);
3772 enter_hs_mode_lat = FLD_GET(r, 31, 16);
3773
3774 r = dsi_read_reg(dsidev, DSI_CLK_CTRL);
3775 lp_clk_div = FLD_GET(r, 12, 0);
3776 ddr_alwon = FLD_GET(r, 13, 13);
3777
3778 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG0);
3779 ths_exit = FLD_GET(r, 7, 0);
3780
3781 r = dsi_read_reg(dsidev, DSI_DSIPHY_CFG1);
3782 tclk_trail = FLD_GET(r, 15, 8);
3783
3784 exiths_clk = ths_exit + tclk_trail;
3785
3786 width_bytes = DIV_ROUND_UP(timings->x_res * bpp, 8);
3787 bllp = hbp + hfp + hsa + DIV_ROUND_UP(width_bytes + 6, ndl);
3788
3789 if (!hsa_blanking_mode) {
3790 hsa_interleave_hs = dsi_compute_interleave_hs(hsa, ddr_alwon,
3791 enter_hs_mode_lat, exit_hs_mode_lat,
3792 exiths_clk, ddr_clk_pre, ddr_clk_post);
3793 hsa_interleave_lp = dsi_compute_interleave_lp(hsa,
3794 enter_hs_mode_lat, exit_hs_mode_lat,
3795 lp_clk_div, dsi_fclk_hsdiv);
3796 }
3797
3798 if (!hfp_blanking_mode) {
3799 hfp_interleave_hs = dsi_compute_interleave_hs(hfp, ddr_alwon,
3800 enter_hs_mode_lat, exit_hs_mode_lat,
3801 exiths_clk, ddr_clk_pre, ddr_clk_post);
3802 hfp_interleave_lp = dsi_compute_interleave_lp(hfp,
3803 enter_hs_mode_lat, exit_hs_mode_lat,
3804 lp_clk_div, dsi_fclk_hsdiv);
3805 }
3806
3807 if (!hbp_blanking_mode) {
3808 hbp_interleave_hs = dsi_compute_interleave_hs(hbp, ddr_alwon,
3809 enter_hs_mode_lat, exit_hs_mode_lat,
3810 exiths_clk, ddr_clk_pre, ddr_clk_post);
3811
3812 hbp_interleave_lp = dsi_compute_interleave_lp(hbp,
3813 enter_hs_mode_lat, exit_hs_mode_lat,
3814 lp_clk_div, dsi_fclk_hsdiv);
3815 }
3816
3817 if (!blanking_mode) {
3818 bl_interleave_hs = dsi_compute_interleave_hs(bllp, ddr_alwon,
3819 enter_hs_mode_lat, exit_hs_mode_lat,
3820 exiths_clk, ddr_clk_pre, ddr_clk_post);
3821
3822 bl_interleave_lp = dsi_compute_interleave_lp(bllp,
3823 enter_hs_mode_lat, exit_hs_mode_lat,
3824 lp_clk_div, dsi_fclk_hsdiv);
3825 }
3826
3827 DSSDBG("DSI HS interleaving(TXBYTECLKHS) HSA %d, HFP %d, HBP %d, BLLP %d\n",
3828 hsa_interleave_hs, hfp_interleave_hs, hbp_interleave_hs,
3829 bl_interleave_hs);
3830
3831 DSSDBG("DSI LP interleaving(bytes) HSA %d, HFP %d, HBP %d, BLLP %d\n",
3832 hsa_interleave_lp, hfp_interleave_lp, hbp_interleave_lp,
3833 bl_interleave_lp);
3834
3835 r = dsi_read_reg(dsidev, DSI_VM_TIMING4);
3836 r = FLD_MOD(r, hsa_interleave_hs, 23, 16);
3837 r = FLD_MOD(r, hfp_interleave_hs, 15, 8);
3838 r = FLD_MOD(r, hbp_interleave_hs, 7, 0);
3839 dsi_write_reg(dsidev, DSI_VM_TIMING4, r);
3840
3841 r = dsi_read_reg(dsidev, DSI_VM_TIMING5);
3842 r = FLD_MOD(r, hsa_interleave_lp, 23, 16);
3843 r = FLD_MOD(r, hfp_interleave_lp, 15, 8);
3844 r = FLD_MOD(r, hbp_interleave_lp, 7, 0);
3845 dsi_write_reg(dsidev, DSI_VM_TIMING5, r);
3846
3847 r = dsi_read_reg(dsidev, DSI_VM_TIMING6);
3848 r = FLD_MOD(r, bl_interleave_hs, 31, 15);
3849 r = FLD_MOD(r, bl_interleave_lp, 16, 0);
3850 dsi_write_reg(dsidev, DSI_VM_TIMING6, r);
3851}
3852
3738static int dsi_proto_config(struct omap_dss_device *dssdev) 3853static int dsi_proto_config(struct omap_dss_device *dssdev)
3739{ 3854{
3740 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 3855 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
@@ -3769,6 +3884,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
3769 break; 3884 break;
3770 default: 3885 default:
3771 BUG(); 3886 BUG();
3887 return -EINVAL;
3772 } 3888 }
3773 3889
3774 r = dsi_read_reg(dsidev, DSI_CTRL); 3890 r = dsi_read_reg(dsidev, DSI_CTRL);
@@ -3793,6 +3909,7 @@ static int dsi_proto_config(struct omap_dss_device *dssdev)
3793 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) { 3909 if (dssdev->panel.dsi_mode == OMAP_DSS_DSI_VIDEO_MODE) {
3794 dsi_config_vp_sync_events(dssdev); 3910 dsi_config_vp_sync_events(dssdev);
3795 dsi_config_blanking_modes(dssdev); 3911 dsi_config_blanking_modes(dssdev);
3912 dsi_config_cmd_mode_interleaving(dssdev);
3796 } 3913 }
3797 3914
3798 dsi_vc_initial_config(dsidev, 0); 3915 dsi_vc_initial_config(dsidev, 0);
@@ -4008,6 +4125,7 @@ int dsi_enable_video_output(struct omap_dss_device *dssdev, int channel)
4008 break; 4125 break;
4009 default: 4126 default:
4010 BUG(); 4127 BUG();
4128 return -EINVAL;
4011 }; 4129 };
4012 4130
4013 dsi_if_enable(dsidev, false); 4131 dsi_if_enable(dsidev, false);
@@ -4192,10 +4310,6 @@ static void dsi_framedone_irq_callback(void *data, u32 mask)
4192 __cancel_delayed_work(&dsi->framedone_timeout_work); 4310 __cancel_delayed_work(&dsi->framedone_timeout_work);
4193 4311
4194 dsi_handle_framedone(dsidev, 0); 4312 dsi_handle_framedone(dsidev, 0);
4195
4196#ifdef CONFIG_OMAP2_DSS_FAKE_VSYNC
4197 dispc_fake_vsync_irq();
4198#endif
4199} 4313}
4200 4314
4201int omap_dsi_update(struct omap_dss_device *dssdev, int channel, 4315int omap_dsi_update(struct omap_dss_device *dssdev, int channel,
@@ -4259,13 +4373,12 @@ static int dsi_display_init_dispc(struct omap_dss_device *dssdev)
4259 dispc_mgr_enable_stallmode(dssdev->manager->id, true); 4373 dispc_mgr_enable_stallmode(dssdev->manager->id, true);
4260 dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1); 4374 dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 1);
4261 4375
4262 dispc_mgr_set_lcd_timings(dssdev->manager->id, &timings); 4376 dss_mgr_set_timings(dssdev->manager, &timings);
4263 } else { 4377 } else {
4264 dispc_mgr_enable_stallmode(dssdev->manager->id, false); 4378 dispc_mgr_enable_stallmode(dssdev->manager->id, false);
4265 dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0); 4379 dispc_mgr_enable_fifohandcheck(dssdev->manager->id, 0);
4266 4380
4267 dispc_mgr_set_lcd_timings(dssdev->manager->id, 4381 dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
4268 &dssdev->panel.timings);
4269 } 4382 }
4270 4383
4271 dispc_mgr_set_lcd_display_type(dssdev->manager->id, 4384 dispc_mgr_set_lcd_display_type(dssdev->manager->id,
@@ -4294,13 +4407,11 @@ static int dsi_configure_dsi_clocks(struct omap_dss_device *dssdev)
4294 struct dsi_clock_info cinfo; 4407 struct dsi_clock_info cinfo;
4295 int r; 4408 int r;
4296 4409
4297 /* we always use DSS_CLK_SYSCK as input clock */
4298 cinfo.use_sys_clk = true;
4299 cinfo.regn = dssdev->clocks.dsi.regn; 4410 cinfo.regn = dssdev->clocks.dsi.regn;
4300 cinfo.regm = dssdev->clocks.dsi.regm; 4411 cinfo.regm = dssdev->clocks.dsi.regm;
4301 cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc; 4412 cinfo.regm_dispc = dssdev->clocks.dsi.regm_dispc;
4302 cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi; 4413 cinfo.regm_dsi = dssdev->clocks.dsi.regm_dsi;
4303 r = dsi_calc_clock_rates(dssdev, &cinfo); 4414 r = dsi_calc_clock_rates(dsidev, &cinfo);
4304 if (r) { 4415 if (r) {
4305 DSSERR("Failed to calc dsi clocks\n"); 4416 DSSERR("Failed to calc dsi clocks\n");
4306 return r; 4417 return r;
@@ -4345,7 +4456,7 @@ static int dsi_configure_dispc_clocks(struct omap_dss_device *dssdev)
4345static int dsi_display_init_dsi(struct omap_dss_device *dssdev) 4456static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
4346{ 4457{
4347 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4458 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4348 int dsi_module = dsi_get_dsidev_id(dsidev); 4459 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4349 int r; 4460 int r;
4350 4461
4351 r = dsi_pll_init(dsidev, true, true); 4462 r = dsi_pll_init(dsidev, true, true);
@@ -4357,7 +4468,7 @@ static int dsi_display_init_dsi(struct omap_dss_device *dssdev)
4357 goto err1; 4468 goto err1;
4358 4469
4359 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src); 4470 dss_select_dispc_clk_source(dssdev->clocks.dispc.dispc_fclk_src);
4360 dss_select_dsi_clk_source(dsi_module, dssdev->clocks.dsi.dsi_fclk_src); 4471 dss_select_dsi_clk_source(dsi->module_id, dssdev->clocks.dsi.dsi_fclk_src);
4361 dss_select_lcd_clk_source(dssdev->manager->id, 4472 dss_select_lcd_clk_source(dssdev->manager->id,
4362 dssdev->clocks.dispc.channel.lcd_clk_src); 4473 dssdev->clocks.dispc.channel.lcd_clk_src);
4363 4474
@@ -4396,7 +4507,7 @@ err3:
4396 dsi_cio_uninit(dssdev); 4507 dsi_cio_uninit(dssdev);
4397err2: 4508err2:
4398 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); 4509 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4399 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK); 4510 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4400 dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK); 4511 dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
4401 4512
4402err1: 4513err1:
@@ -4410,7 +4521,6 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4410{ 4521{
4411 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4522 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4412 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4523 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4413 int dsi_module = dsi_get_dsidev_id(dsidev);
4414 4524
4415 if (enter_ulps && !dsi->ulps_enabled) 4525 if (enter_ulps && !dsi->ulps_enabled)
4416 dsi_enter_ulps(dsidev); 4526 dsi_enter_ulps(dsidev);
@@ -4423,7 +4533,7 @@ static void dsi_display_uninit_dsi(struct omap_dss_device *dssdev,
4423 dsi_vc_enable(dsidev, 3, 0); 4533 dsi_vc_enable(dsidev, 3, 0);
4424 4534
4425 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK); 4535 dss_select_dispc_clk_source(OMAP_DSS_CLK_SRC_FCK);
4426 dss_select_dsi_clk_source(dsi_module, OMAP_DSS_CLK_SRC_FCK); 4536 dss_select_dsi_clk_source(dsi->module_id, OMAP_DSS_CLK_SRC_FCK);
4427 dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK); 4537 dss_select_lcd_clk_source(dssdev->manager->id, OMAP_DSS_CLK_SRC_FCK);
4428 dsi_cio_uninit(dssdev); 4538 dsi_cio_uninit(dssdev);
4429 dsi_pll_uninit(dsidev, disconnect_lanes); 4539 dsi_pll_uninit(dsidev, disconnect_lanes);
@@ -4527,7 +4637,7 @@ int omapdss_dsi_enable_te(struct omap_dss_device *dssdev, bool enable)
4527} 4637}
4528EXPORT_SYMBOL(omapdss_dsi_enable_te); 4638EXPORT_SYMBOL(omapdss_dsi_enable_te);
4529 4639
4530int dsi_init_display(struct omap_dss_device *dssdev) 4640static int __init dsi_init_display(struct omap_dss_device *dssdev)
4531{ 4641{
4532 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev); 4642 struct platform_device *dsidev = dsi_get_dsidev_from_dssdev(dssdev);
4533 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4643 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
@@ -4680,13 +4790,39 @@ static void dsi_put_clocks(struct platform_device *dsidev)
4680 clk_put(dsi->sys_clk); 4790 clk_put(dsi->sys_clk);
4681} 4791}
4682 4792
4793static void __init dsi_probe_pdata(struct platform_device *dsidev)
4794{
4795 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4796 struct omap_dss_board_info *pdata = dsidev->dev.platform_data;
4797 int i, r;
4798
4799 for (i = 0; i < pdata->num_devices; ++i) {
4800 struct omap_dss_device *dssdev = pdata->devices[i];
4801
4802 if (dssdev->type != OMAP_DISPLAY_TYPE_DSI)
4803 continue;
4804
4805 if (dssdev->phy.dsi.module != dsi->module_id)
4806 continue;
4807
4808 r = dsi_init_display(dssdev);
4809 if (r) {
4810 DSSERR("device %s init failed: %d\n", dssdev->name, r);
4811 continue;
4812 }
4813
4814 r = omap_dss_register_device(dssdev, &dsidev->dev, i);
4815 if (r)
4816 DSSERR("device %s register failed: %d\n",
4817 dssdev->name, r);
4818 }
4819}
4820
4683/* DSI1 HW IP initialisation */ 4821/* DSI1 HW IP initialisation */
4684static int omap_dsihw_probe(struct platform_device *dsidev) 4822static int __init omap_dsihw_probe(struct platform_device *dsidev)
4685{ 4823{
4686 struct omap_display_platform_data *dss_plat_data;
4687 struct omap_dss_board_info *board_info;
4688 u32 rev; 4824 u32 rev;
4689 int r, i, dsi_module = dsi_get_dsidev_id(dsidev); 4825 int r, i;
4690 struct resource *dsi_mem; 4826 struct resource *dsi_mem;
4691 struct dsi_data *dsi; 4827 struct dsi_data *dsi;
4692 4828
@@ -4694,15 +4830,11 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
4694 if (!dsi) 4830 if (!dsi)
4695 return -ENOMEM; 4831 return -ENOMEM;
4696 4832
4833 dsi->module_id = dsidev->id;
4697 dsi->pdev = dsidev; 4834 dsi->pdev = dsidev;
4698 dsi_pdev_map[dsi_module] = dsidev; 4835 dsi_pdev_map[dsi->module_id] = dsidev;
4699 dev_set_drvdata(&dsidev->dev, dsi); 4836 dev_set_drvdata(&dsidev->dev, dsi);
4700 4837
4701 dss_plat_data = dsidev->dev.platform_data;
4702 board_info = dss_plat_data->board_data;
4703 dsi->enable_pads = board_info->dsi_enable_pads;
4704 dsi->disable_pads = board_info->dsi_disable_pads;
4705
4706 spin_lock_init(&dsi->irq_lock); 4838 spin_lock_init(&dsi->irq_lock);
4707 spin_lock_init(&dsi->errors_lock); 4839 spin_lock_init(&dsi->errors_lock);
4708 dsi->errors = 0; 4840 dsi->errors = 0;
@@ -4780,8 +4912,21 @@ static int omap_dsihw_probe(struct platform_device *dsidev)
4780 else 4912 else
4781 dsi->num_lanes_supported = 3; 4913 dsi->num_lanes_supported = 3;
4782 4914
4915 dsi_probe_pdata(dsidev);
4916
4783 dsi_runtime_put(dsidev); 4917 dsi_runtime_put(dsidev);
4784 4918
4919 if (dsi->module_id == 0)
4920 dss_debugfs_create_file("dsi1_regs", dsi1_dump_regs);
4921 else if (dsi->module_id == 1)
4922 dss_debugfs_create_file("dsi2_regs", dsi2_dump_regs);
4923
4924#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
4925 if (dsi->module_id == 0)
4926 dss_debugfs_create_file("dsi1_irqs", dsi1_dump_irqs);
4927 else if (dsi->module_id == 1)
4928 dss_debugfs_create_file("dsi2_irqs", dsi2_dump_irqs);
4929#endif
4785 return 0; 4930 return 0;
4786 4931
4787err_runtime_get: 4932err_runtime_get:
@@ -4790,12 +4935,14 @@ err_runtime_get:
4790 return r; 4935 return r;
4791} 4936}
4792 4937
4793static int omap_dsihw_remove(struct platform_device *dsidev) 4938static int __exit omap_dsihw_remove(struct platform_device *dsidev)
4794{ 4939{
4795 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev); 4940 struct dsi_data *dsi = dsi_get_dsidrv_data(dsidev);
4796 4941
4797 WARN_ON(dsi->scp_clk_refcount > 0); 4942 WARN_ON(dsi->scp_clk_refcount > 0);
4798 4943
4944 omap_dss_unregister_child_devices(&dsidev->dev);
4945
4799 pm_runtime_disable(&dsidev->dev); 4946 pm_runtime_disable(&dsidev->dev);
4800 4947
4801 dsi_put_clocks(dsidev); 4948 dsi_put_clocks(dsidev);
@@ -4816,7 +4963,6 @@ static int omap_dsihw_remove(struct platform_device *dsidev)
4816static int dsi_runtime_suspend(struct device *dev) 4963static int dsi_runtime_suspend(struct device *dev)
4817{ 4964{
4818 dispc_runtime_put(); 4965 dispc_runtime_put();
4819 dss_runtime_put();
4820 4966
4821 return 0; 4967 return 0;
4822} 4968}
@@ -4825,20 +4971,11 @@ static int dsi_runtime_resume(struct device *dev)
4825{ 4971{
4826 int r; 4972 int r;
4827 4973
4828 r = dss_runtime_get();
4829 if (r)
4830 goto err_get_dss;
4831
4832 r = dispc_runtime_get(); 4974 r = dispc_runtime_get();
4833 if (r) 4975 if (r)
4834 goto err_get_dispc; 4976 return r;
4835 4977
4836 return 0; 4978 return 0;
4837
4838err_get_dispc:
4839 dss_runtime_put();
4840err_get_dss:
4841 return r;
4842} 4979}
4843 4980
4844static const struct dev_pm_ops dsi_pm_ops = { 4981static const struct dev_pm_ops dsi_pm_ops = {
@@ -4847,8 +4984,7 @@ static const struct dev_pm_ops dsi_pm_ops = {
4847}; 4984};
4848 4985
4849static struct platform_driver omap_dsihw_driver = { 4986static struct platform_driver omap_dsihw_driver = {
4850 .probe = omap_dsihw_probe, 4987 .remove = __exit_p(omap_dsihw_remove),
4851 .remove = omap_dsihw_remove,
4852 .driver = { 4988 .driver = {
4853 .name = "omapdss_dsi", 4989 .name = "omapdss_dsi",
4854 .owner = THIS_MODULE, 4990 .owner = THIS_MODULE,
@@ -4856,12 +4992,12 @@ static struct platform_driver omap_dsihw_driver = {
4856 }, 4992 },
4857}; 4993};
4858 4994
4859int dsi_init_platform_driver(void) 4995int __init dsi_init_platform_driver(void)
4860{ 4996{
4861 return platform_driver_register(&omap_dsihw_driver); 4997 return platform_driver_probe(&omap_dsihw_driver, omap_dsihw_probe);
4862} 4998}
4863 4999
4864void dsi_uninit_platform_driver(void) 5000void __exit dsi_uninit_platform_driver(void)
4865{ 5001{
4866 return platform_driver_unregister(&omap_dsihw_driver); 5002 platform_driver_unregister(&omap_dsihw_driver);
4867} 5003}
diff --git a/drivers/video/omap2/dss/dss.c b/drivers/video/omap2/dss/dss.c
index bd2d5e159463..770632359a17 100644
--- a/drivers/video/omap2/dss/dss.c
+++ b/drivers/video/omap2/dss/dss.c
@@ -62,6 +62,9 @@ struct dss_reg {
62#define REG_FLD_MOD(idx, val, start, end) \ 62#define REG_FLD_MOD(idx, val, start, end) \
63 dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end)) 63 dss_write_reg(idx, FLD_MOD(dss_read_reg(idx), val, start, end))
64 64
65static int dss_runtime_get(void);
66static void dss_runtime_put(void);
67
65static struct { 68static struct {
66 struct platform_device *pdev; 69 struct platform_device *pdev;
67 void __iomem *base; 70 void __iomem *base;
@@ -277,7 +280,7 @@ void dss_dump_clocks(struct seq_file *s)
277 dss_runtime_put(); 280 dss_runtime_put();
278} 281}
279 282
280void dss_dump_regs(struct seq_file *s) 283static void dss_dump_regs(struct seq_file *s)
281{ 284{
282#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r)) 285#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dss_read_reg(r))
283 286
@@ -322,6 +325,7 @@ void dss_select_dispc_clk_source(enum omap_dss_clk_source clk_src)
322 break; 325 break;
323 default: 326 default:
324 BUG(); 327 BUG();
328 return;
325 } 329 }
326 330
327 dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end); 331 dss_feat_get_reg_field(FEAT_REG_DISPC_CLK_SWITCH, &start, &end);
@@ -335,7 +339,7 @@ void dss_select_dsi_clk_source(int dsi_module,
335 enum omap_dss_clk_source clk_src) 339 enum omap_dss_clk_source clk_src)
336{ 340{
337 struct platform_device *dsidev; 341 struct platform_device *dsidev;
338 int b; 342 int b, pos;
339 343
340 switch (clk_src) { 344 switch (clk_src) {
341 case OMAP_DSS_CLK_SRC_FCK: 345 case OMAP_DSS_CLK_SRC_FCK:
@@ -355,9 +359,11 @@ void dss_select_dsi_clk_source(int dsi_module,
355 break; 359 break;
356 default: 360 default:
357 BUG(); 361 BUG();
362 return;
358 } 363 }
359 364
360 REG_FLD_MOD(DSS_CONTROL, b, 1, 1); /* DSI_CLK_SWITCH */ 365 pos = dsi_module == 0 ? 1 : 10;
366 REG_FLD_MOD(DSS_CONTROL, b, pos, pos); /* DSIx_CLK_SWITCH */
361 367
362 dss.dsi_clk_source[dsi_module] = clk_src; 368 dss.dsi_clk_source[dsi_module] = clk_src;
363} 369}
@@ -389,6 +395,7 @@ void dss_select_lcd_clk_source(enum omap_channel channel,
389 break; 395 break;
390 default: 396 default:
391 BUG(); 397 BUG();
398 return;
392 } 399 }
393 400
394 pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12; 401 pos = channel == OMAP_DSS_CHANNEL_LCD ? 0 : 12;
@@ -706,7 +713,7 @@ static void dss_put_clocks(void)
706 clk_put(dss.dss_clk); 713 clk_put(dss.dss_clk);
707} 714}
708 715
709int dss_runtime_get(void) 716static int dss_runtime_get(void)
710{ 717{
711 int r; 718 int r;
712 719
@@ -717,14 +724,14 @@ int dss_runtime_get(void)
717 return r < 0 ? r : 0; 724 return r < 0 ? r : 0;
718} 725}
719 726
720void dss_runtime_put(void) 727static void dss_runtime_put(void)
721{ 728{
722 int r; 729 int r;
723 730
724 DSSDBG("dss_runtime_put\n"); 731 DSSDBG("dss_runtime_put\n");
725 732
726 r = pm_runtime_put_sync(&dss.pdev->dev); 733 r = pm_runtime_put_sync(&dss.pdev->dev);
727 WARN_ON(r < 0); 734 WARN_ON(r < 0 && r != -EBUSY);
728} 735}
729 736
730/* DEBUGFS */ 737/* DEBUGFS */
@@ -740,7 +747,7 @@ void dss_debug_dump_clocks(struct seq_file *s)
740#endif 747#endif
741 748
742/* DSS HW IP initialisation */ 749/* DSS HW IP initialisation */
743static int omap_dsshw_probe(struct platform_device *pdev) 750static int __init omap_dsshw_probe(struct platform_device *pdev)
744{ 751{
745 struct resource *dss_mem; 752 struct resource *dss_mem;
746 u32 rev; 753 u32 rev;
@@ -785,40 +792,24 @@ static int omap_dsshw_probe(struct platform_device *pdev)
785 dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK; 792 dss.lcd_clk_source[0] = OMAP_DSS_CLK_SRC_FCK;
786 dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK; 793 dss.lcd_clk_source[1] = OMAP_DSS_CLK_SRC_FCK;
787 794
788 r = dpi_init();
789 if (r) {
790 DSSERR("Failed to initialize DPI\n");
791 goto err_dpi;
792 }
793
794 r = sdi_init();
795 if (r) {
796 DSSERR("Failed to initialize SDI\n");
797 goto err_sdi;
798 }
799
800 rev = dss_read_reg(DSS_REVISION); 795 rev = dss_read_reg(DSS_REVISION);
801 printk(KERN_INFO "OMAP DSS rev %d.%d\n", 796 printk(KERN_INFO "OMAP DSS rev %d.%d\n",
802 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0)); 797 FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
803 798
804 dss_runtime_put(); 799 dss_runtime_put();
805 800
801 dss_debugfs_create_file("dss", dss_dump_regs);
802
806 return 0; 803 return 0;
807err_sdi: 804
808 dpi_exit();
809err_dpi:
810 dss_runtime_put();
811err_runtime_get: 805err_runtime_get:
812 pm_runtime_disable(&pdev->dev); 806 pm_runtime_disable(&pdev->dev);
813 dss_put_clocks(); 807 dss_put_clocks();
814 return r; 808 return r;
815} 809}
816 810
817static int omap_dsshw_remove(struct platform_device *pdev) 811static int __exit omap_dsshw_remove(struct platform_device *pdev)
818{ 812{
819 dpi_exit();
820 sdi_exit();
821
822 pm_runtime_disable(&pdev->dev); 813 pm_runtime_disable(&pdev->dev);
823 814
824 dss_put_clocks(); 815 dss_put_clocks();
@@ -829,11 +820,24 @@ static int omap_dsshw_remove(struct platform_device *pdev)
829static int dss_runtime_suspend(struct device *dev) 820static int dss_runtime_suspend(struct device *dev)
830{ 821{
831 dss_save_context(); 822 dss_save_context();
823 dss_set_min_bus_tput(dev, 0);
832 return 0; 824 return 0;
833} 825}
834 826
835static int dss_runtime_resume(struct device *dev) 827static int dss_runtime_resume(struct device *dev)
836{ 828{
829 int r;
830 /*
831 * Set an arbitrarily high tput request to ensure OPP100.
832 * What we should really do is to make a request to stay in OPP100,
833 * without any tput requirements, but that is not currently possible
834 * via the PM layer.
835 */
836
837 r = dss_set_min_bus_tput(dev, 1000000000);
838 if (r)
839 return r;
840
837 dss_restore_context(); 841 dss_restore_context();
838 return 0; 842 return 0;
839} 843}
@@ -844,8 +848,7 @@ static const struct dev_pm_ops dss_pm_ops = {
844}; 848};
845 849
846static struct platform_driver omap_dsshw_driver = { 850static struct platform_driver omap_dsshw_driver = {
847 .probe = omap_dsshw_probe, 851 .remove = __exit_p(omap_dsshw_remove),
848 .remove = omap_dsshw_remove,
849 .driver = { 852 .driver = {
850 .name = "omapdss_dss", 853 .name = "omapdss_dss",
851 .owner = THIS_MODULE, 854 .owner = THIS_MODULE,
@@ -853,12 +856,12 @@ static struct platform_driver omap_dsshw_driver = {
853 }, 856 },
854}; 857};
855 858
856int dss_init_platform_driver(void) 859int __init dss_init_platform_driver(void)
857{ 860{
858 return platform_driver_register(&omap_dsshw_driver); 861 return platform_driver_probe(&omap_dsshw_driver, omap_dsshw_probe);
859} 862}
860 863
861void dss_uninit_platform_driver(void) 864void dss_uninit_platform_driver(void)
862{ 865{
863 return platform_driver_unregister(&omap_dsshw_driver); 866 platform_driver_unregister(&omap_dsshw_driver);
864} 867}
diff --git a/drivers/video/omap2/dss/dss.h b/drivers/video/omap2/dss/dss.h
index d4b3dff2ead3..dd1092ceaeef 100644
--- a/drivers/video/omap2/dss/dss.h
+++ b/drivers/video/omap2/dss/dss.h
@@ -150,9 +150,6 @@ struct dsi_clock_info {
150 u16 regm_dsi; /* OMAP3: REGM4 150 u16 regm_dsi; /* OMAP3: REGM4
151 * OMAP4: REGM5 */ 151 * OMAP4: REGM5 */
152 u16 lp_clk_div; 152 u16 lp_clk_div;
153
154 u8 highfreq;
155 bool use_sys_clk;
156}; 153};
157 154
158struct seq_file; 155struct seq_file;
@@ -162,6 +159,16 @@ struct platform_device;
162struct bus_type *dss_get_bus(void); 159struct bus_type *dss_get_bus(void);
163struct regulator *dss_get_vdds_dsi(void); 160struct regulator *dss_get_vdds_dsi(void);
164struct regulator *dss_get_vdds_sdi(void); 161struct regulator *dss_get_vdds_sdi(void);
162int dss_get_ctx_loss_count(struct device *dev);
163int dss_dsi_enable_pads(int dsi_id, unsigned lane_mask);
164void dss_dsi_disable_pads(int dsi_id, unsigned lane_mask);
165int dss_set_min_bus_tput(struct device *dev, unsigned long tput);
166int dss_debugfs_create_file(const char *name, void (*write)(struct seq_file *));
167
168int omap_dss_register_device(struct omap_dss_device *dssdev,
169 struct device *parent, int disp_num);
170void omap_dss_unregister_device(struct omap_dss_device *dssdev);
171void omap_dss_unregister_child_devices(struct device *parent);
165 172
166/* apply */ 173/* apply */
167void dss_apply_init(void); 174void dss_apply_init(void);
@@ -179,6 +186,9 @@ void dss_mgr_get_info(struct omap_overlay_manager *mgr,
179int dss_mgr_set_device(struct omap_overlay_manager *mgr, 186int dss_mgr_set_device(struct omap_overlay_manager *mgr,
180 struct omap_dss_device *dssdev); 187 struct omap_dss_device *dssdev);
181int dss_mgr_unset_device(struct omap_overlay_manager *mgr); 188int dss_mgr_unset_device(struct omap_overlay_manager *mgr);
189void dss_mgr_set_timings(struct omap_overlay_manager *mgr,
190 struct omap_video_timings *timings);
191const struct omap_video_timings *dss_mgr_get_timings(struct omap_overlay_manager *mgr);
182 192
183bool dss_ovl_is_enabled(struct omap_overlay *ovl); 193bool dss_ovl_is_enabled(struct omap_overlay *ovl);
184int dss_ovl_enable(struct omap_overlay *ovl); 194int dss_ovl_enable(struct omap_overlay *ovl);
@@ -208,9 +218,11 @@ int dss_init_overlay_managers(struct platform_device *pdev);
208void dss_uninit_overlay_managers(struct platform_device *pdev); 218void dss_uninit_overlay_managers(struct platform_device *pdev);
209int dss_mgr_simple_check(struct omap_overlay_manager *mgr, 219int dss_mgr_simple_check(struct omap_overlay_manager *mgr,
210 const struct omap_overlay_manager_info *info); 220 const struct omap_overlay_manager_info *info);
221int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
222 const struct omap_video_timings *timings);
211int dss_mgr_check(struct omap_overlay_manager *mgr, 223int dss_mgr_check(struct omap_overlay_manager *mgr,
212 struct omap_dss_device *dssdev,
213 struct omap_overlay_manager_info *info, 224 struct omap_overlay_manager_info *info,
225 const struct omap_video_timings *mgr_timings,
214 struct omap_overlay_info **overlay_infos); 226 struct omap_overlay_info **overlay_infos);
215 227
216/* overlay */ 228/* overlay */
@@ -220,22 +232,18 @@ void dss_overlay_setup_dispc_manager(struct omap_overlay_manager *mgr);
220void dss_recheck_connections(struct omap_dss_device *dssdev, bool force); 232void dss_recheck_connections(struct omap_dss_device *dssdev, bool force);
221int dss_ovl_simple_check(struct omap_overlay *ovl, 233int dss_ovl_simple_check(struct omap_overlay *ovl,
222 const struct omap_overlay_info *info); 234 const struct omap_overlay_info *info);
223int dss_ovl_check(struct omap_overlay *ovl, 235int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
224 struct omap_overlay_info *info, struct omap_dss_device *dssdev); 236 const struct omap_video_timings *mgr_timings);
225 237
226/* DSS */ 238/* DSS */
227int dss_init_platform_driver(void); 239int dss_init_platform_driver(void) __init;
228void dss_uninit_platform_driver(void); 240void dss_uninit_platform_driver(void);
229 241
230int dss_runtime_get(void);
231void dss_runtime_put(void);
232
233void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select); 242void dss_select_hdmi_venc_clk_source(enum dss_hdmi_venc_clk_source_select);
234enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void); 243enum dss_hdmi_venc_clk_source_select dss_get_hdmi_venc_clk_source(void);
235const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src); 244const char *dss_get_generic_clk_source_name(enum omap_dss_clk_source clk_src);
236void dss_dump_clocks(struct seq_file *s); 245void dss_dump_clocks(struct seq_file *s);
237 246
238void dss_dump_regs(struct seq_file *s);
239#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT) 247#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_OMAP2_DSS_DEBUG_SUPPORT)
240void dss_debug_dump_clocks(struct seq_file *s); 248void dss_debug_dump_clocks(struct seq_file *s);
241#endif 249#endif
@@ -265,19 +273,8 @@ int dss_calc_clock_div(bool is_tft, unsigned long req_pck,
265 struct dispc_clock_info *dispc_cinfo); 273 struct dispc_clock_info *dispc_cinfo);
266 274
267/* SDI */ 275/* SDI */
268#ifdef CONFIG_OMAP2_DSS_SDI 276int sdi_init_platform_driver(void) __init;
269int sdi_init(void); 277void sdi_uninit_platform_driver(void) __exit;
270void sdi_exit(void);
271int sdi_init_display(struct omap_dss_device *display);
272#else
273static inline int sdi_init(void)
274{
275 return 0;
276}
277static inline void sdi_exit(void)
278{
279}
280#endif
281 278
282/* DSI */ 279/* DSI */
283#ifdef CONFIG_OMAP2_DSS_DSI 280#ifdef CONFIG_OMAP2_DSS_DSI
@@ -285,19 +282,14 @@ static inline void sdi_exit(void)
285struct dentry; 282struct dentry;
286struct file_operations; 283struct file_operations;
287 284
288int dsi_init_platform_driver(void); 285int dsi_init_platform_driver(void) __init;
289void dsi_uninit_platform_driver(void); 286void dsi_uninit_platform_driver(void) __exit;
290 287
291int dsi_runtime_get(struct platform_device *dsidev); 288int dsi_runtime_get(struct platform_device *dsidev);
292void dsi_runtime_put(struct platform_device *dsidev); 289void dsi_runtime_put(struct platform_device *dsidev);
293 290
294void dsi_dump_clocks(struct seq_file *s); 291void dsi_dump_clocks(struct seq_file *s);
295void dsi_create_debugfs_files_irq(struct dentry *debugfs_dir,
296 const struct file_operations *debug_fops);
297void dsi_create_debugfs_files_reg(struct dentry *debugfs_dir,
298 const struct file_operations *debug_fops);
299 292
300int dsi_init_display(struct omap_dss_device *display);
301void dsi_irq_handler(void); 293void dsi_irq_handler(void);
302u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt); 294u8 dsi_get_pixel_size(enum omap_dss_dsi_pixel_format fmt);
303 295
@@ -314,13 +306,6 @@ void dsi_wait_pll_hsdiv_dispc_active(struct platform_device *dsidev);
314void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev); 306void dsi_wait_pll_hsdiv_dsi_active(struct platform_device *dsidev);
315struct platform_device *dsi_get_dsidev_from_id(int module); 307struct platform_device *dsi_get_dsidev_from_id(int module);
316#else 308#else
317static inline int dsi_init_platform_driver(void)
318{
319 return 0;
320}
321static inline void dsi_uninit_platform_driver(void)
322{
323}
324static inline int dsi_runtime_get(struct platform_device *dsidev) 309static inline int dsi_runtime_get(struct platform_device *dsidev)
325{ 310{
326 return 0; 311 return 0;
@@ -377,28 +362,14 @@ static inline struct platform_device *dsi_get_dsidev_from_id(int module)
377#endif 362#endif
378 363
379/* DPI */ 364/* DPI */
380#ifdef CONFIG_OMAP2_DSS_DPI 365int dpi_init_platform_driver(void) __init;
381int dpi_init(void); 366void dpi_uninit_platform_driver(void) __exit;
382void dpi_exit(void);
383int dpi_init_display(struct omap_dss_device *dssdev);
384#else
385static inline int dpi_init(void)
386{
387 return 0;
388}
389static inline void dpi_exit(void)
390{
391}
392#endif
393 367
394/* DISPC */ 368/* DISPC */
395int dispc_init_platform_driver(void); 369int dispc_init_platform_driver(void) __init;
396void dispc_uninit_platform_driver(void); 370void dispc_uninit_platform_driver(void) __exit;
397void dispc_dump_clocks(struct seq_file *s); 371void dispc_dump_clocks(struct seq_file *s);
398void dispc_dump_irqs(struct seq_file *s);
399void dispc_dump_regs(struct seq_file *s);
400void dispc_irq_handler(void); 372void dispc_irq_handler(void);
401void dispc_fake_vsync_irq(void);
402 373
403int dispc_runtime_get(void); 374int dispc_runtime_get(void);
404void dispc_runtime_put(void); 375void dispc_runtime_put(void);
@@ -409,12 +380,12 @@ void dispc_disable_sidle(void);
409void dispc_lcd_enable_signal_polarity(bool act_high); 380void dispc_lcd_enable_signal_polarity(bool act_high);
410void dispc_lcd_enable_signal(bool enable); 381void dispc_lcd_enable_signal(bool enable);
411void dispc_pck_free_enable(bool enable); 382void dispc_pck_free_enable(bool enable);
412void dispc_set_digit_size(u16 width, u16 height);
413void dispc_enable_fifomerge(bool enable); 383void dispc_enable_fifomerge(bool enable);
414void dispc_enable_gamma_table(bool enable); 384void dispc_enable_gamma_table(bool enable);
415void dispc_set_loadmode(enum omap_dss_load_mode mode); 385void dispc_set_loadmode(enum omap_dss_load_mode mode);
416 386
417bool dispc_lcd_timings_ok(struct omap_video_timings *timings); 387bool dispc_mgr_timings_ok(enum omap_channel channel,
388 const struct omap_video_timings *timings);
418unsigned long dispc_fclk_rate(void); 389unsigned long dispc_fclk_rate(void);
419void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck, 390void dispc_find_clk_divs(bool is_tft, unsigned long req_pck, unsigned long fck,
420 struct dispc_clock_info *cinfo); 391 struct dispc_clock_info *cinfo);
@@ -424,15 +395,16 @@ int dispc_calc_clock_rates(unsigned long dispc_fclk_rate,
424 395
425void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high); 396void dispc_ovl_set_fifo_threshold(enum omap_plane plane, u32 low, u32 high);
426void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane, 397void dispc_ovl_compute_fifo_thresholds(enum omap_plane plane,
427 u32 *fifo_low, u32 *fifo_high, bool use_fifomerge); 398 u32 *fifo_low, u32 *fifo_high, bool use_fifomerge,
399 bool manual_update);
428int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi, 400int dispc_ovl_setup(enum omap_plane plane, struct omap_overlay_info *oi,
429 bool ilace, bool replication); 401 bool ilace, bool replication,
402 const struct omap_video_timings *mgr_timings);
430int dispc_ovl_enable(enum omap_plane plane, bool enable); 403int dispc_ovl_enable(enum omap_plane plane, bool enable);
431void dispc_ovl_set_channel_out(enum omap_plane plane, 404void dispc_ovl_set_channel_out(enum omap_plane plane,
432 enum omap_channel channel); 405 enum omap_channel channel);
433 406
434void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable); 407void dispc_mgr_enable_fifohandcheck(enum omap_channel channel, bool enable);
435void dispc_mgr_set_lcd_size(enum omap_channel channel, u16 width, u16 height);
436u32 dispc_mgr_get_vsync_irq(enum omap_channel channel); 408u32 dispc_mgr_get_vsync_irq(enum omap_channel channel);
437u32 dispc_mgr_get_framedone_irq(enum omap_channel channel); 409u32 dispc_mgr_get_framedone_irq(enum omap_channel channel);
438bool dispc_mgr_go_busy(enum omap_channel channel); 410bool dispc_mgr_go_busy(enum omap_channel channel);
@@ -445,12 +417,13 @@ void dispc_mgr_enable_stallmode(enum omap_channel channel, bool enable);
445void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines); 417void dispc_mgr_set_tft_data_lines(enum omap_channel channel, u8 data_lines);
446void dispc_mgr_set_lcd_display_type(enum omap_channel channel, 418void dispc_mgr_set_lcd_display_type(enum omap_channel channel,
447 enum omap_lcd_display_type type); 419 enum omap_lcd_display_type type);
448void dispc_mgr_set_lcd_timings(enum omap_channel channel, 420void dispc_mgr_set_timings(enum omap_channel channel,
449 struct omap_video_timings *timings); 421 struct omap_video_timings *timings);
450void dispc_mgr_set_pol_freq(enum omap_channel channel, 422void dispc_mgr_set_pol_freq(enum omap_channel channel,
451 enum omap_panel_config config, u8 acbi, u8 acb); 423 enum omap_panel_config config, u8 acbi, u8 acb);
452unsigned long dispc_mgr_lclk_rate(enum omap_channel channel); 424unsigned long dispc_mgr_lclk_rate(enum omap_channel channel);
453unsigned long dispc_mgr_pclk_rate(enum omap_channel channel); 425unsigned long dispc_mgr_pclk_rate(enum omap_channel channel);
426unsigned long dispc_core_clk_rate(void);
454int dispc_mgr_set_clock_div(enum omap_channel channel, 427int dispc_mgr_set_clock_div(enum omap_channel channel,
455 struct dispc_clock_info *cinfo); 428 struct dispc_clock_info *cinfo);
456int dispc_mgr_get_clock_div(enum omap_channel channel, 429int dispc_mgr_get_clock_div(enum omap_channel channel,
@@ -460,19 +433,10 @@ void dispc_mgr_setup(enum omap_channel channel,
460 433
461/* VENC */ 434/* VENC */
462#ifdef CONFIG_OMAP2_DSS_VENC 435#ifdef CONFIG_OMAP2_DSS_VENC
463int venc_init_platform_driver(void); 436int venc_init_platform_driver(void) __init;
464void venc_uninit_platform_driver(void); 437void venc_uninit_platform_driver(void) __exit;
465void venc_dump_regs(struct seq_file *s);
466int venc_init_display(struct omap_dss_device *display);
467unsigned long venc_get_pixel_clock(void); 438unsigned long venc_get_pixel_clock(void);
468#else 439#else
469static inline int venc_init_platform_driver(void)
470{
471 return 0;
472}
473static inline void venc_uninit_platform_driver(void)
474{
475}
476static inline unsigned long venc_get_pixel_clock(void) 440static inline unsigned long venc_get_pixel_clock(void)
477{ 441{
478 WARN("%s: VENC not compiled in, returning pclk as 0\n", __func__); 442 WARN("%s: VENC not compiled in, returning pclk as 0\n", __func__);
@@ -482,23 +446,10 @@ static inline unsigned long venc_get_pixel_clock(void)
482 446
483/* HDMI */ 447/* HDMI */
484#ifdef CONFIG_OMAP4_DSS_HDMI 448#ifdef CONFIG_OMAP4_DSS_HDMI
485int hdmi_init_platform_driver(void); 449int hdmi_init_platform_driver(void) __init;
486void hdmi_uninit_platform_driver(void); 450void hdmi_uninit_platform_driver(void) __exit;
487int hdmi_init_display(struct omap_dss_device *dssdev);
488unsigned long hdmi_get_pixel_clock(void); 451unsigned long hdmi_get_pixel_clock(void);
489void hdmi_dump_regs(struct seq_file *s);
490#else 452#else
491static inline int hdmi_init_display(struct omap_dss_device *dssdev)
492{
493 return 0;
494}
495static inline int hdmi_init_platform_driver(void)
496{
497 return 0;
498}
499static inline void hdmi_uninit_platform_driver(void)
500{
501}
502static inline unsigned long hdmi_get_pixel_clock(void) 453static inline unsigned long hdmi_get_pixel_clock(void)
503{ 454{
504 WARN("%s: HDMI not compiled in, returning pclk as 0\n", __func__); 455 WARN("%s: HDMI not compiled in, returning pclk as 0\n", __func__);
@@ -514,22 +465,18 @@ int omapdss_hdmi_read_edid(u8 *buf, int len);
514bool omapdss_hdmi_detect(void); 465bool omapdss_hdmi_detect(void);
515int hdmi_panel_init(void); 466int hdmi_panel_init(void);
516void hdmi_panel_exit(void); 467void hdmi_panel_exit(void);
468#ifdef CONFIG_OMAP4_DSS_HDMI_AUDIO
469int hdmi_audio_enable(void);
470void hdmi_audio_disable(void);
471int hdmi_audio_start(void);
472void hdmi_audio_stop(void);
473bool hdmi_mode_has_audio(void);
474int hdmi_audio_config(struct omap_dss_audio *audio);
475#endif
517 476
518/* RFBI */ 477/* RFBI */
519#ifdef CONFIG_OMAP2_DSS_RFBI 478int rfbi_init_platform_driver(void) __init;
520int rfbi_init_platform_driver(void); 479void rfbi_uninit_platform_driver(void) __exit;
521void rfbi_uninit_platform_driver(void);
522void rfbi_dump_regs(struct seq_file *s);
523int rfbi_init_display(struct omap_dss_device *display);
524#else
525static inline int rfbi_init_platform_driver(void)
526{
527 return 0;
528}
529static inline void rfbi_uninit_platform_driver(void)
530{
531}
532#endif
533 480
534 481
535#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS 482#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
diff --git a/drivers/video/omap2/dss/dss_features.c b/drivers/video/omap2/dss/dss_features.c
index ce14aa6dd672..938709724f0c 100644
--- a/drivers/video/omap2/dss/dss_features.c
+++ b/drivers/video/omap2/dss/dss_features.c
@@ -52,6 +52,8 @@ struct omap_dss_features {
52 const char * const *clksrc_names; 52 const char * const *clksrc_names;
53 const struct dss_param_range *dss_params; 53 const struct dss_param_range *dss_params;
54 54
55 const enum omap_dss_rotation_type supported_rotation_types;
56
55 const u32 buffer_size_unit; 57 const u32 buffer_size_unit;
56 const u32 burst_size_unit; 58 const u32 burst_size_unit;
57}; 59};
@@ -311,6 +313,8 @@ static const struct dss_param_range omap2_dss_param_range[] = {
311 * scaler cannot scale a image with width more than 768. 313 * scaler cannot scale a image with width more than 768.
312 */ 314 */
313 [FEAT_PARAM_LINEWIDTH] = { 1, 768 }, 315 [FEAT_PARAM_LINEWIDTH] = { 1, 768 },
316 [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
317 [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
314}; 318};
315 319
316static const struct dss_param_range omap3_dss_param_range[] = { 320static const struct dss_param_range omap3_dss_param_range[] = {
@@ -324,6 +328,8 @@ static const struct dss_param_range omap3_dss_param_range[] = {
324 [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1}, 328 [FEAT_PARAM_DSIPLL_LPDIV] = { 1, (1 << 13) - 1},
325 [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, 329 [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
326 [FEAT_PARAM_LINEWIDTH] = { 1, 1024 }, 330 [FEAT_PARAM_LINEWIDTH] = { 1, 1024 },
331 [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
332 [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
327}; 333};
328 334
329static const struct dss_param_range omap4_dss_param_range[] = { 335static const struct dss_param_range omap4_dss_param_range[] = {
@@ -337,6 +343,8 @@ static const struct dss_param_range omap4_dss_param_range[] = {
337 [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 }, 343 [FEAT_PARAM_DSIPLL_LPDIV] = { 0, (1 << 13) - 1 },
338 [FEAT_PARAM_DOWNSCALE] = { 1, 4 }, 344 [FEAT_PARAM_DOWNSCALE] = { 1, 4 },
339 [FEAT_PARAM_LINEWIDTH] = { 1, 2048 }, 345 [FEAT_PARAM_LINEWIDTH] = { 1, 2048 },
346 [FEAT_PARAM_MGR_WIDTH] = { 1, 2048 },
347 [FEAT_PARAM_MGR_HEIGHT] = { 1, 2048 },
340}; 348};
341 349
342static const enum dss_feat_id omap2_dss_feat_list[] = { 350static const enum dss_feat_id omap2_dss_feat_list[] = {
@@ -399,6 +407,7 @@ static const enum dss_feat_id omap4430_es1_0_dss_feat_list[] = {
399 FEAT_FIR_COEF_V, 407 FEAT_FIR_COEF_V,
400 FEAT_ALPHA_FREE_ZORDER, 408 FEAT_ALPHA_FREE_ZORDER,
401 FEAT_FIFO_MERGE, 409 FEAT_FIFO_MERGE,
410 FEAT_BURST_2D,
402}; 411};
403 412
404static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = { 413static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
@@ -416,6 +425,7 @@ static const enum dss_feat_id omap4430_es2_0_1_2_dss_feat_list[] = {
416 FEAT_FIR_COEF_V, 425 FEAT_FIR_COEF_V,
417 FEAT_ALPHA_FREE_ZORDER, 426 FEAT_ALPHA_FREE_ZORDER,
418 FEAT_FIFO_MERGE, 427 FEAT_FIFO_MERGE,
428 FEAT_BURST_2D,
419}; 429};
420 430
421static const enum dss_feat_id omap4_dss_feat_list[] = { 431static const enum dss_feat_id omap4_dss_feat_list[] = {
@@ -434,6 +444,7 @@ static const enum dss_feat_id omap4_dss_feat_list[] = {
434 FEAT_FIR_COEF_V, 444 FEAT_FIR_COEF_V,
435 FEAT_ALPHA_FREE_ZORDER, 445 FEAT_ALPHA_FREE_ZORDER,
436 FEAT_FIFO_MERGE, 446 FEAT_FIFO_MERGE,
447 FEAT_BURST_2D,
437}; 448};
438 449
439/* OMAP2 DSS Features */ 450/* OMAP2 DSS Features */
@@ -451,6 +462,7 @@ static const struct omap_dss_features omap2_dss_features = {
451 .overlay_caps = omap2_dss_overlay_caps, 462 .overlay_caps = omap2_dss_overlay_caps,
452 .clksrc_names = omap2_dss_clk_source_names, 463 .clksrc_names = omap2_dss_clk_source_names,
453 .dss_params = omap2_dss_param_range, 464 .dss_params = omap2_dss_param_range,
465 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
454 .buffer_size_unit = 1, 466 .buffer_size_unit = 1,
455 .burst_size_unit = 8, 467 .burst_size_unit = 8,
456}; 468};
@@ -470,6 +482,7 @@ static const struct omap_dss_features omap3430_dss_features = {
470 .overlay_caps = omap3430_dss_overlay_caps, 482 .overlay_caps = omap3430_dss_overlay_caps,
471 .clksrc_names = omap3_dss_clk_source_names, 483 .clksrc_names = omap3_dss_clk_source_names,
472 .dss_params = omap3_dss_param_range, 484 .dss_params = omap3_dss_param_range,
485 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
473 .buffer_size_unit = 1, 486 .buffer_size_unit = 1,
474 .burst_size_unit = 8, 487 .burst_size_unit = 8,
475}; 488};
@@ -488,6 +501,7 @@ static const struct omap_dss_features omap3630_dss_features = {
488 .overlay_caps = omap3630_dss_overlay_caps, 501 .overlay_caps = omap3630_dss_overlay_caps,
489 .clksrc_names = omap3_dss_clk_source_names, 502 .clksrc_names = omap3_dss_clk_source_names,
490 .dss_params = omap3_dss_param_range, 503 .dss_params = omap3_dss_param_range,
504 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_VRFB,
491 .buffer_size_unit = 1, 505 .buffer_size_unit = 1,
492 .burst_size_unit = 8, 506 .burst_size_unit = 8,
493}; 507};
@@ -508,6 +522,7 @@ static const struct omap_dss_features omap4430_es1_0_dss_features = {
508 .overlay_caps = omap4_dss_overlay_caps, 522 .overlay_caps = omap4_dss_overlay_caps,
509 .clksrc_names = omap4_dss_clk_source_names, 523 .clksrc_names = omap4_dss_clk_source_names,
510 .dss_params = omap4_dss_param_range, 524 .dss_params = omap4_dss_param_range,
525 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
511 .buffer_size_unit = 16, 526 .buffer_size_unit = 16,
512 .burst_size_unit = 16, 527 .burst_size_unit = 16,
513}; 528};
@@ -527,6 +542,7 @@ static const struct omap_dss_features omap4430_es2_0_1_2_dss_features = {
527 .overlay_caps = omap4_dss_overlay_caps, 542 .overlay_caps = omap4_dss_overlay_caps,
528 .clksrc_names = omap4_dss_clk_source_names, 543 .clksrc_names = omap4_dss_clk_source_names,
529 .dss_params = omap4_dss_param_range, 544 .dss_params = omap4_dss_param_range,
545 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
530 .buffer_size_unit = 16, 546 .buffer_size_unit = 16,
531 .burst_size_unit = 16, 547 .burst_size_unit = 16,
532}; 548};
@@ -546,6 +562,7 @@ static const struct omap_dss_features omap4_dss_features = {
546 .overlay_caps = omap4_dss_overlay_caps, 562 .overlay_caps = omap4_dss_overlay_caps,
547 .clksrc_names = omap4_dss_clk_source_names, 563 .clksrc_names = omap4_dss_clk_source_names,
548 .dss_params = omap4_dss_param_range, 564 .dss_params = omap4_dss_param_range,
565 .supported_rotation_types = OMAP_DSS_ROT_DMA | OMAP_DSS_ROT_TILER,
549 .buffer_size_unit = 16, 566 .buffer_size_unit = 16,
550 .burst_size_unit = 16, 567 .burst_size_unit = 16,
551}; 568};
@@ -562,13 +579,17 @@ static const struct ti_hdmi_ip_ops omap4_hdmi_functions = {
562 .pll_enable = ti_hdmi_4xxx_pll_enable, 579 .pll_enable = ti_hdmi_4xxx_pll_enable,
563 .pll_disable = ti_hdmi_4xxx_pll_disable, 580 .pll_disable = ti_hdmi_4xxx_pll_disable,
564 .video_enable = ti_hdmi_4xxx_wp_video_start, 581 .video_enable = ti_hdmi_4xxx_wp_video_start,
582 .video_disable = ti_hdmi_4xxx_wp_video_stop,
565 .dump_wrapper = ti_hdmi_4xxx_wp_dump, 583 .dump_wrapper = ti_hdmi_4xxx_wp_dump,
566 .dump_core = ti_hdmi_4xxx_core_dump, 584 .dump_core = ti_hdmi_4xxx_core_dump,
567 .dump_pll = ti_hdmi_4xxx_pll_dump, 585 .dump_pll = ti_hdmi_4xxx_pll_dump,
568 .dump_phy = ti_hdmi_4xxx_phy_dump, 586 .dump_phy = ti_hdmi_4xxx_phy_dump,
569#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 587#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
570 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
571 .audio_enable = ti_hdmi_4xxx_wp_audio_enable, 588 .audio_enable = ti_hdmi_4xxx_wp_audio_enable,
589 .audio_disable = ti_hdmi_4xxx_wp_audio_disable,
590 .audio_start = ti_hdmi_4xxx_audio_start,
591 .audio_stop = ti_hdmi_4xxx_audio_stop,
592 .audio_config = ti_hdmi_4xxx_audio_config,
572#endif 593#endif
573 594
574}; 595};
@@ -662,6 +683,11 @@ void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end)
662 *end = omap_current_dss_features->reg_fields[id].end; 683 *end = omap_current_dss_features->reg_fields[id].end;
663} 684}
664 685
686bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type)
687{
688 return omap_current_dss_features->supported_rotation_types & rot_type;
689}
690
665void dss_features_init(void) 691void dss_features_init(void)
666{ 692{
667 if (cpu_is_omap24xx()) 693 if (cpu_is_omap24xx())
diff --git a/drivers/video/omap2/dss/dss_features.h b/drivers/video/omap2/dss/dss_features.h
index c332e7ddfce1..bdf469f080e7 100644
--- a/drivers/video/omap2/dss/dss_features.h
+++ b/drivers/video/omap2/dss/dss_features.h
@@ -62,6 +62,7 @@ enum dss_feat_id {
62 FEAT_FIFO_MERGE, 62 FEAT_FIFO_MERGE,
63 /* An unknown HW bug causing the normal FIFO thresholds not to work */ 63 /* An unknown HW bug causing the normal FIFO thresholds not to work */
64 FEAT_OMAP3_DSI_FIFO_BUG, 64 FEAT_OMAP3_DSI_FIFO_BUG,
65 FEAT_BURST_2D,
65}; 66};
66 67
67/* DSS register field id */ 68/* DSS register field id */
@@ -91,6 +92,8 @@ enum dss_range_param {
91 FEAT_PARAM_DSIPLL_LPDIV, 92 FEAT_PARAM_DSIPLL_LPDIV,
92 FEAT_PARAM_DOWNSCALE, 93 FEAT_PARAM_DOWNSCALE,
93 FEAT_PARAM_LINEWIDTH, 94 FEAT_PARAM_LINEWIDTH,
95 FEAT_PARAM_MGR_WIDTH,
96 FEAT_PARAM_MGR_HEIGHT,
94}; 97};
95 98
96/* DSS Feature Functions */ 99/* DSS Feature Functions */
@@ -108,6 +111,8 @@ const char *dss_feat_get_clk_source_name(enum omap_dss_clk_source id);
108u32 dss_feat_get_buffer_size_unit(void); /* in bytes */ 111u32 dss_feat_get_buffer_size_unit(void); /* in bytes */
109u32 dss_feat_get_burst_size_unit(void); /* in bytes */ 112u32 dss_feat_get_burst_size_unit(void); /* in bytes */
110 113
114bool dss_feat_rotation_type_supported(enum omap_dss_rotation_type rot_type);
115
111bool dss_has_feature(enum dss_feat_id id); 116bool dss_has_feature(enum dss_feat_id id);
112void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end); 117void dss_feat_get_reg_field(enum dss_feat_reg_field id, u8 *start, u8 *end);
113void dss_features_init(void); 118void dss_features_init(void);
diff --git a/drivers/video/omap2/dss/hdmi.c b/drivers/video/omap2/dss/hdmi.c
index c4b4f6950a92..8195c7166d20 100644
--- a/drivers/video/omap2/dss/hdmi.c
+++ b/drivers/video/omap2/dss/hdmi.c
@@ -33,12 +33,6 @@
33#include <linux/pm_runtime.h> 33#include <linux/pm_runtime.h>
34#include <linux/clk.h> 34#include <linux/clk.h>
35#include <video/omapdss.h> 35#include <video/omapdss.h>
36#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
37 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
38#include <sound/soc.h>
39#include <sound/pcm_params.h>
40#include "ti_hdmi_4xxx_ip.h"
41#endif
42 36
43#include "ti_hdmi.h" 37#include "ti_hdmi.h"
44#include "dss.h" 38#include "dss.h"
@@ -63,7 +57,6 @@
63 57
64static struct { 58static struct {
65 struct mutex lock; 59 struct mutex lock;
66 struct omap_display_platform_data *pdata;
67 struct platform_device *pdev; 60 struct platform_device *pdev;
68 struct hdmi_ip_data ip_data; 61 struct hdmi_ip_data ip_data;
69 62
@@ -130,25 +123,12 @@ static int hdmi_runtime_get(void)
130 123
131 DSSDBG("hdmi_runtime_get\n"); 124 DSSDBG("hdmi_runtime_get\n");
132 125
133 /*
134 * HACK: Add dss_runtime_get() to ensure DSS clock domain is enabled.
135 * This should be removed later.
136 */
137 r = dss_runtime_get();
138 if (r < 0)
139 goto err_get_dss;
140
141 r = pm_runtime_get_sync(&hdmi.pdev->dev); 126 r = pm_runtime_get_sync(&hdmi.pdev->dev);
142 WARN_ON(r < 0); 127 WARN_ON(r < 0);
143 if (r < 0) 128 if (r < 0)
144 goto err_get_hdmi; 129 return r;
145 130
146 return 0; 131 return 0;
147
148err_get_hdmi:
149 dss_runtime_put();
150err_get_dss:
151 return r;
152} 132}
153 133
154static void hdmi_runtime_put(void) 134static void hdmi_runtime_put(void)
@@ -159,15 +139,9 @@ static void hdmi_runtime_put(void)
159 139
160 r = pm_runtime_put_sync(&hdmi.pdev->dev); 140 r = pm_runtime_put_sync(&hdmi.pdev->dev);
161 WARN_ON(r < 0); 141 WARN_ON(r < 0);
162
163 /*
164 * HACK: This is added to complement the dss_runtime_get() call in
165 * hdmi_runtime_get(). This should be removed later.
166 */
167 dss_runtime_put();
168} 142}
169 143
170int hdmi_init_display(struct omap_dss_device *dssdev) 144static int __init hdmi_init_display(struct omap_dss_device *dssdev)
171{ 145{
172 DSSDBG("init_display\n"); 146 DSSDBG("init_display\n");
173 147
@@ -344,7 +318,7 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
344 318
345 hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data); 319 hdmi_compute_pll(dssdev, phy, &hdmi.ip_data.pll_data);
346 320
347 hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0); 321 hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
348 322
349 /* config the PLL and PHY hdmi_set_pll_pwrfirst */ 323 /* config the PLL and PHY hdmi_set_pll_pwrfirst */
350 r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data); 324 r = hdmi.ip_data.ops->pll_enable(&hdmi.ip_data);
@@ -376,10 +350,11 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
376 dispc_enable_gamma_table(0); 350 dispc_enable_gamma_table(0);
377 351
378 /* tv size */ 352 /* tv size */
379 dispc_set_digit_size(dssdev->panel.timings.x_res, 353 dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
380 dssdev->panel.timings.y_res);
381 354
382 hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 1); 355 r = hdmi.ip_data.ops->video_enable(&hdmi.ip_data);
356 if (r)
357 goto err_vid_enable;
383 358
384 r = dss_mgr_enable(dssdev->manager); 359 r = dss_mgr_enable(dssdev->manager);
385 if (r) 360 if (r)
@@ -388,7 +363,8 @@ static int hdmi_power_on(struct omap_dss_device *dssdev)
388 return 0; 363 return 0;
389 364
390err_mgr_enable: 365err_mgr_enable:
391 hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0); 366 hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
367err_vid_enable:
392 hdmi.ip_data.ops->phy_disable(&hdmi.ip_data); 368 hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
393 hdmi.ip_data.ops->pll_disable(&hdmi.ip_data); 369 hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
394err: 370err:
@@ -400,7 +376,7 @@ static void hdmi_power_off(struct omap_dss_device *dssdev)
400{ 376{
401 dss_mgr_disable(dssdev->manager); 377 dss_mgr_disable(dssdev->manager);
402 378
403 hdmi.ip_data.ops->video_enable(&hdmi.ip_data, 0); 379 hdmi.ip_data.ops->video_disable(&hdmi.ip_data);
404 hdmi.ip_data.ops->phy_disable(&hdmi.ip_data); 380 hdmi.ip_data.ops->phy_disable(&hdmi.ip_data);
405 hdmi.ip_data.ops->pll_disable(&hdmi.ip_data); 381 hdmi.ip_data.ops->pll_disable(&hdmi.ip_data);
406 hdmi_runtime_put(); 382 hdmi_runtime_put();
@@ -436,10 +412,12 @@ void omapdss_hdmi_display_set_timing(struct omap_dss_device *dssdev)
436 r = hdmi_power_on(dssdev); 412 r = hdmi_power_on(dssdev);
437 if (r) 413 if (r)
438 DSSERR("failed to power on device\n"); 414 DSSERR("failed to power on device\n");
415 } else {
416 dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
439 } 417 }
440} 418}
441 419
442void hdmi_dump_regs(struct seq_file *s) 420static void hdmi_dump_regs(struct seq_file *s)
443{ 421{
444 mutex_lock(&hdmi.lock); 422 mutex_lock(&hdmi.lock);
445 423
@@ -555,248 +533,201 @@ void omapdss_hdmi_display_disable(struct omap_dss_device *dssdev)
555 mutex_unlock(&hdmi.lock); 533 mutex_unlock(&hdmi.lock);
556} 534}
557 535
558#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 536static int hdmi_get_clocks(struct platform_device *pdev)
559 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
560
561static int hdmi_audio_trigger(struct snd_pcm_substream *substream, int cmd,
562 struct snd_soc_dai *dai)
563{ 537{
564 struct snd_soc_pcm_runtime *rtd = substream->private_data; 538 struct clk *clk;
565 struct snd_soc_codec *codec = rtd->codec;
566 struct platform_device *pdev = to_platform_device(codec->dev);
567 struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
568 int err = 0;
569 539
570 if (!(ip_data->ops) && !(ip_data->ops->audio_enable)) { 540 clk = clk_get(&pdev->dev, "sys_clk");
571 dev_err(&pdev->dev, "Cannot enable/disable audio\n"); 541 if (IS_ERR(clk)) {
572 return -ENODEV; 542 DSSERR("can't get sys_clk\n");
543 return PTR_ERR(clk);
573 } 544 }
574 545
575 switch (cmd) { 546 hdmi.sys_clk = clk;
576 case SNDRV_PCM_TRIGGER_START: 547
577 case SNDRV_PCM_TRIGGER_RESUME: 548 return 0;
578 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: 549}
579 ip_data->ops->audio_enable(ip_data, true); 550
580 break; 551static void hdmi_put_clocks(void)
581 case SNDRV_PCM_TRIGGER_STOP: 552{
582 case SNDRV_PCM_TRIGGER_SUSPEND: 553 if (hdmi.sys_clk)
583 case SNDRV_PCM_TRIGGER_PAUSE_PUSH: 554 clk_put(hdmi.sys_clk);
584 ip_data->ops->audio_enable(ip_data, false); 555}
585 break; 556
586 default: 557#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
587 err = -EINVAL; 558int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts)
588 } 559{
589 return err; 560 u32 deep_color;
590} 561 bool deep_color_correct = false;
591 562 u32 pclk = hdmi.ip_data.cfg.timings.pixel_clock;
592static int hdmi_audio_hw_params(struct snd_pcm_substream *substream, 563
593 struct snd_pcm_hw_params *params, 564 if (n == NULL || cts == NULL)
594 struct snd_soc_dai *dai)
595{
596 struct snd_soc_pcm_runtime *rtd = substream->private_data;
597 struct snd_soc_codec *codec = rtd->codec;
598 struct hdmi_ip_data *ip_data = snd_soc_codec_get_drvdata(codec);
599 struct hdmi_audio_format audio_format;
600 struct hdmi_audio_dma audio_dma;
601 struct hdmi_core_audio_config core_cfg;
602 struct hdmi_core_infoframe_audio aud_if_cfg;
603 int err, n, cts;
604 enum hdmi_core_audio_sample_freq sample_freq;
605
606 switch (params_format(params)) {
607 case SNDRV_PCM_FORMAT_S16_LE:
608 core_cfg.i2s_cfg.word_max_length =
609 HDMI_AUDIO_I2S_MAX_WORD_20BITS;
610 core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_16_BITS;
611 core_cfg.i2s_cfg.in_length_bits =
612 HDMI_AUDIO_I2S_INPUT_LENGTH_16;
613 core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
614 audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
615 audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
616 audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
617 audio_dma.transfer_size = 0x10;
618 break;
619 case SNDRV_PCM_FORMAT_S24_LE:
620 core_cfg.i2s_cfg.word_max_length =
621 HDMI_AUDIO_I2S_MAX_WORD_24BITS;
622 core_cfg.i2s_cfg.word_length = HDMI_AUDIO_I2S_CHST_WORD_24_BITS;
623 core_cfg.i2s_cfg.in_length_bits =
624 HDMI_AUDIO_I2S_INPUT_LENGTH_24;
625 audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
626 audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
627 audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
628 core_cfg.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
629 audio_dma.transfer_size = 0x20;
630 break;
631 default:
632 return -EINVAL; 565 return -EINVAL;
633 }
634 566
635 switch (params_rate(params)) { 567 /* TODO: When implemented, query deep color mode here. */
568 deep_color = 100;
569
570 /*
571 * When using deep color, the default N value (as in the HDMI
572 * specification) yields to an non-integer CTS. Hence, we
573 * modify it while keeping the restrictions described in
574 * section 7.2.1 of the HDMI 1.4a specification.
575 */
576 switch (sample_freq) {
636 case 32000: 577 case 32000:
637 sample_freq = HDMI_AUDIO_FS_32000; 578 case 48000:
579 case 96000:
580 case 192000:
581 if (deep_color == 125)
582 if (pclk == 27027 || pclk == 74250)
583 deep_color_correct = true;
584 if (deep_color == 150)
585 if (pclk == 27027)
586 deep_color_correct = true;
638 break; 587 break;
639 case 44100: 588 case 44100:
640 sample_freq = HDMI_AUDIO_FS_44100; 589 case 88200:
641 break; 590 case 176400:
642 case 48000: 591 if (deep_color == 125)
643 sample_freq = HDMI_AUDIO_FS_48000; 592 if (pclk == 27027)
593 deep_color_correct = true;
644 break; 594 break;
645 default: 595 default:
646 return -EINVAL; 596 return -EINVAL;
647 } 597 }
648 598
649 err = hdmi_config_audio_acr(ip_data, params_rate(params), &n, &cts); 599 if (deep_color_correct) {
650 if (err < 0) 600 switch (sample_freq) {
651 return err; 601 case 32000:
652 602 *n = 8192;
653 /* Audio wrapper config */ 603 break;
654 audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL; 604 case 44100:
655 audio_format.active_chnnls_msk = 0x03; 605 *n = 12544;
656 audio_format.type = HDMI_AUDIO_TYPE_LPCM; 606 break;
657 audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST; 607 case 48000:
658 /* Disable start/stop signals of IEC 60958 blocks */ 608 *n = 8192;
659 audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_OFF; 609 break;
610 case 88200:
611 *n = 25088;
612 break;
613 case 96000:
614 *n = 16384;
615 break;
616 case 176400:
617 *n = 50176;
618 break;
619 case 192000:
620 *n = 32768;
621 break;
622 default:
623 return -EINVAL;
624 }
625 } else {
626 switch (sample_freq) {
627 case 32000:
628 *n = 4096;
629 break;
630 case 44100:
631 *n = 6272;
632 break;
633 case 48000:
634 *n = 6144;
635 break;
636 case 88200:
637 *n = 12544;
638 break;
639 case 96000:
640 *n = 12288;
641 break;
642 case 176400:
643 *n = 25088;
644 break;
645 case 192000:
646 *n = 24576;
647 break;
648 default:
649 return -EINVAL;
650 }
651 }
652 /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */
653 *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10);
660 654
661 audio_dma.block_size = 0xC0; 655 return 0;
662 audio_dma.mode = HDMI_AUDIO_TRANSF_DMA; 656}
663 audio_dma.fifo_threshold = 0x20; /* in number of samples */
664 657
665 hdmi_wp_audio_config_dma(ip_data, &audio_dma); 658int hdmi_audio_enable(void)
666 hdmi_wp_audio_config_format(ip_data, &audio_format); 659{
660 DSSDBG("audio_enable\n");
667 661
668 /* 662 return hdmi.ip_data.ops->audio_enable(&hdmi.ip_data);
669 * I2S config 663}
670 */
671 core_cfg.i2s_cfg.en_high_bitrate_aud = false;
672 /* Only used with high bitrate audio */
673 core_cfg.i2s_cfg.cbit_order = false;
674 /* Serial data and word select should change on sck rising edge */
675 core_cfg.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
676 core_cfg.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
677 /* Set I2S word select polarity */
678 core_cfg.i2s_cfg.ws_polarity = HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT;
679 core_cfg.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
680 /* Set serial data to word select shift. See Phillips spec. */
681 core_cfg.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
682 /* Enable one of the four available serial data channels */
683 core_cfg.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
684
685 /* Core audio config */
686 core_cfg.freq_sample = sample_freq;
687 core_cfg.n = n;
688 core_cfg.cts = cts;
689 if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
690 core_cfg.aud_par_busclk = 0;
691 core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
692 core_cfg.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
693 } else {
694 core_cfg.aud_par_busclk = (((128 * 31) - 1) << 8);
695 core_cfg.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
696 core_cfg.use_mclk = true;
697 }
698 664
699 if (core_cfg.use_mclk) 665void hdmi_audio_disable(void)
700 core_cfg.mclk_mode = HDMI_AUDIO_MCLK_128FS; 666{
701 core_cfg.layout = HDMI_AUDIO_LAYOUT_2CH; 667 DSSDBG("audio_disable\n");
702 core_cfg.en_spdif = false;
703 /* Use sample frequency from channel status word */
704 core_cfg.fs_override = true;
705 /* Enable ACR packets */
706 core_cfg.en_acr_pkt = true;
707 /* Disable direct streaming digital audio */
708 core_cfg.en_dsd_audio = false;
709 /* Use parallel audio interface */
710 core_cfg.en_parallel_aud_input = true;
711
712 hdmi_core_audio_config(ip_data, &core_cfg);
713 668
714 /* 669 hdmi.ip_data.ops->audio_disable(&hdmi.ip_data);
715 * Configure packet
716 * info frame audio see doc CEA861-D page 74
717 */
718 aud_if_cfg.db1_coding_type = HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM;
719 aud_if_cfg.db1_channel_count = 2;
720 aud_if_cfg.db2_sample_freq = HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM;
721 aud_if_cfg.db2_sample_size = HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM;
722 aud_if_cfg.db4_channel_alloc = 0x00;
723 aud_if_cfg.db5_downmix_inh = false;
724 aud_if_cfg.db5_lsv = 0;
725
726 hdmi_core_audio_infoframe_config(ip_data, &aud_if_cfg);
727 return 0;
728} 670}
729 671
730static int hdmi_audio_startup(struct snd_pcm_substream *substream, 672int hdmi_audio_start(void)
731 struct snd_soc_dai *dai)
732{ 673{
733 if (!hdmi.ip_data.cfg.cm.mode) { 674 DSSDBG("audio_start\n");
734 pr_err("Current video settings do not support audio.\n"); 675
735 return -EIO; 676 return hdmi.ip_data.ops->audio_start(&hdmi.ip_data);
736 }
737 return 0;
738} 677}
739 678
740static int hdmi_audio_codec_probe(struct snd_soc_codec *codec) 679void hdmi_audio_stop(void)
741{ 680{
742 struct hdmi_ip_data *priv = &hdmi.ip_data; 681 DSSDBG("audio_stop\n");
743 682
744 snd_soc_codec_set_drvdata(codec, priv); 683 hdmi.ip_data.ops->audio_stop(&hdmi.ip_data);
745 return 0;
746} 684}
747 685
748static struct snd_soc_codec_driver hdmi_audio_codec_drv = { 686bool hdmi_mode_has_audio(void)
749 .probe = hdmi_audio_codec_probe, 687{
750}; 688 if (hdmi.ip_data.cfg.cm.mode == HDMI_HDMI)
689 return true;
690 else
691 return false;
692}
751 693
752static struct snd_soc_dai_ops hdmi_audio_codec_ops = { 694int hdmi_audio_config(struct omap_dss_audio *audio)
753 .hw_params = hdmi_audio_hw_params, 695{
754 .trigger = hdmi_audio_trigger, 696 return hdmi.ip_data.ops->audio_config(&hdmi.ip_data, audio);
755 .startup = hdmi_audio_startup, 697}
756};
757 698
758static struct snd_soc_dai_driver hdmi_codec_dai_drv = {
759 .name = "hdmi-audio-codec",
760 .playback = {
761 .channels_min = 2,
762 .channels_max = 2,
763 .rates = SNDRV_PCM_RATE_32000 |
764 SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000,
765 .formats = SNDRV_PCM_FMTBIT_S16_LE |
766 SNDRV_PCM_FMTBIT_S24_LE,
767 },
768 .ops = &hdmi_audio_codec_ops,
769};
770#endif 699#endif
771 700
772static int hdmi_get_clocks(struct platform_device *pdev) 701static void __init hdmi_probe_pdata(struct platform_device *pdev)
773{ 702{
774 struct clk *clk; 703 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
704 int r, i;
775 705
776 clk = clk_get(&pdev->dev, "sys_clk"); 706 for (i = 0; i < pdata->num_devices; ++i) {
777 if (IS_ERR(clk)) { 707 struct omap_dss_device *dssdev = pdata->devices[i];
778 DSSERR("can't get sys_clk\n");
779 return PTR_ERR(clk);
780 }
781 708
782 hdmi.sys_clk = clk; 709 if (dssdev->type != OMAP_DISPLAY_TYPE_HDMI)
710 continue;
783 711
784 return 0; 712 r = hdmi_init_display(dssdev);
785} 713 if (r) {
714 DSSERR("device %s init failed: %d\n", dssdev->name, r);
715 continue;
716 }
786 717
787static void hdmi_put_clocks(void) 718 r = omap_dss_register_device(dssdev, &pdev->dev, i);
788{ 719 if (r)
789 if (hdmi.sys_clk) 720 DSSERR("device %s register failed: %d\n",
790 clk_put(hdmi.sys_clk); 721 dssdev->name, r);
722 }
791} 723}
792 724
793/* HDMI HW IP initialisation */ 725/* HDMI HW IP initialisation */
794static int omapdss_hdmihw_probe(struct platform_device *pdev) 726static int __init omapdss_hdmihw_probe(struct platform_device *pdev)
795{ 727{
796 struct resource *hdmi_mem; 728 struct resource *hdmi_mem;
797 int r; 729 int r;
798 730
799 hdmi.pdata = pdev->dev.platform_data;
800 hdmi.pdev = pdev; 731 hdmi.pdev = pdev;
801 732
802 mutex_init(&hdmi.lock); 733 mutex_init(&hdmi.lock);
@@ -830,28 +761,18 @@ static int omapdss_hdmihw_probe(struct platform_device *pdev)
830 761
831 hdmi_panel_init(); 762 hdmi_panel_init();
832 763
833#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 764 dss_debugfs_create_file("hdmi", hdmi_dump_regs);
834 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) 765
766 hdmi_probe_pdata(pdev);
835 767
836 /* Register ASoC codec DAI */
837 r = snd_soc_register_codec(&pdev->dev, &hdmi_audio_codec_drv,
838 &hdmi_codec_dai_drv, 1);
839 if (r) {
840 DSSERR("can't register ASoC HDMI audio codec\n");
841 return r;
842 }
843#endif
844 return 0; 768 return 0;
845} 769}
846 770
847static int omapdss_hdmihw_remove(struct platform_device *pdev) 771static int __exit omapdss_hdmihw_remove(struct platform_device *pdev)
848{ 772{
849 hdmi_panel_exit(); 773 omap_dss_unregister_child_devices(&pdev->dev);
850 774
851#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 775 hdmi_panel_exit();
852 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
853 snd_soc_unregister_codec(&pdev->dev);
854#endif
855 776
856 pm_runtime_disable(&pdev->dev); 777 pm_runtime_disable(&pdev->dev);
857 778
@@ -867,7 +788,6 @@ static int hdmi_runtime_suspend(struct device *dev)
867 clk_disable(hdmi.sys_clk); 788 clk_disable(hdmi.sys_clk);
868 789
869 dispc_runtime_put(); 790 dispc_runtime_put();
870 dss_runtime_put();
871 791
872 return 0; 792 return 0;
873} 793}
@@ -876,23 +796,13 @@ static int hdmi_runtime_resume(struct device *dev)
876{ 796{
877 int r; 797 int r;
878 798
879 r = dss_runtime_get();
880 if (r < 0)
881 goto err_get_dss;
882
883 r = dispc_runtime_get(); 799 r = dispc_runtime_get();
884 if (r < 0) 800 if (r < 0)
885 goto err_get_dispc; 801 return r;
886
887 802
888 clk_enable(hdmi.sys_clk); 803 clk_enable(hdmi.sys_clk);
889 804
890 return 0; 805 return 0;
891
892err_get_dispc:
893 dss_runtime_put();
894err_get_dss:
895 return r;
896} 806}
897 807
898static const struct dev_pm_ops hdmi_pm_ops = { 808static const struct dev_pm_ops hdmi_pm_ops = {
@@ -901,8 +811,7 @@ static const struct dev_pm_ops hdmi_pm_ops = {
901}; 811};
902 812
903static struct platform_driver omapdss_hdmihw_driver = { 813static struct platform_driver omapdss_hdmihw_driver = {
904 .probe = omapdss_hdmihw_probe, 814 .remove = __exit_p(omapdss_hdmihw_remove),
905 .remove = omapdss_hdmihw_remove,
906 .driver = { 815 .driver = {
907 .name = "omapdss_hdmi", 816 .name = "omapdss_hdmi",
908 .owner = THIS_MODULE, 817 .owner = THIS_MODULE,
@@ -910,12 +819,12 @@ static struct platform_driver omapdss_hdmihw_driver = {
910 }, 819 },
911}; 820};
912 821
913int hdmi_init_platform_driver(void) 822int __init hdmi_init_platform_driver(void)
914{ 823{
915 return platform_driver_register(&omapdss_hdmihw_driver); 824 return platform_driver_probe(&omapdss_hdmihw_driver, omapdss_hdmihw_probe);
916} 825}
917 826
918void hdmi_uninit_platform_driver(void) 827void __exit hdmi_uninit_platform_driver(void)
919{ 828{
920 return platform_driver_unregister(&omapdss_hdmihw_driver); 829 platform_driver_unregister(&omapdss_hdmihw_driver);
921} 830}
diff --git a/drivers/video/omap2/dss/hdmi_panel.c b/drivers/video/omap2/dss/hdmi_panel.c
index 533d5dc634d2..1179e3c4b1c7 100644
--- a/drivers/video/omap2/dss/hdmi_panel.c
+++ b/drivers/video/omap2/dss/hdmi_panel.c
@@ -30,7 +30,12 @@
30#include "dss.h" 30#include "dss.h"
31 31
32static struct { 32static struct {
33 struct mutex hdmi_lock; 33 /* This protects the panel ops, mainly when accessing the HDMI IP. */
34 struct mutex lock;
35#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
36 /* This protects the audio ops, specifically. */
37 spinlock_t audio_lock;
38#endif
34} hdmi; 39} hdmi;
35 40
36 41
@@ -54,12 +59,168 @@ static void hdmi_panel_remove(struct omap_dss_device *dssdev)
54 59
55} 60}
56 61
62#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
63static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
64{
65 unsigned long flags;
66 int r;
67
68 mutex_lock(&hdmi.lock);
69 spin_lock_irqsave(&hdmi.audio_lock, flags);
70
71 /* enable audio only if the display is active and supports audio */
72 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
73 !hdmi_mode_has_audio()) {
74 DSSERR("audio not supported or display is off\n");
75 r = -EPERM;
76 goto err;
77 }
78
79 r = hdmi_audio_enable();
80
81 if (!r)
82 dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
83
84err:
85 spin_unlock_irqrestore(&hdmi.audio_lock, flags);
86 mutex_unlock(&hdmi.lock);
87 return r;
88}
89
90static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
91{
92 unsigned long flags;
93
94 spin_lock_irqsave(&hdmi.audio_lock, flags);
95
96 hdmi_audio_disable();
97
98 dssdev->audio_state = OMAP_DSS_AUDIO_DISABLED;
99
100 spin_unlock_irqrestore(&hdmi.audio_lock, flags);
101}
102
103static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
104{
105 unsigned long flags;
106 int r;
107
108 spin_lock_irqsave(&hdmi.audio_lock, flags);
109 /*
110 * No need to check the panel state. It was checked when trasitioning
111 * to AUDIO_ENABLED.
112 */
113 if (dssdev->audio_state != OMAP_DSS_AUDIO_ENABLED) {
114 DSSERR("audio start from invalid state\n");
115 r = -EPERM;
116 goto err;
117 }
118
119 r = hdmi_audio_start();
120
121 if (!r)
122 dssdev->audio_state = OMAP_DSS_AUDIO_PLAYING;
123
124err:
125 spin_unlock_irqrestore(&hdmi.audio_lock, flags);
126 return r;
127}
128
129static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
130{
131 unsigned long flags;
132
133 spin_lock_irqsave(&hdmi.audio_lock, flags);
134
135 hdmi_audio_stop();
136 dssdev->audio_state = OMAP_DSS_AUDIO_ENABLED;
137
138 spin_unlock_irqrestore(&hdmi.audio_lock, flags);
139}
140
141static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
142{
143 bool r = false;
144
145 mutex_lock(&hdmi.lock);
146
147 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
148 goto err;
149
150 if (!hdmi_mode_has_audio())
151 goto err;
152
153 r = true;
154err:
155 mutex_unlock(&hdmi.lock);
156 return r;
157}
158
159static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
160 struct omap_dss_audio *audio)
161{
162 unsigned long flags;
163 int r;
164
165 mutex_lock(&hdmi.lock);
166 spin_lock_irqsave(&hdmi.audio_lock, flags);
167
168 /* config audio only if the display is active and supports audio */
169 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE ||
170 !hdmi_mode_has_audio()) {
171 DSSERR("audio not supported or display is off\n");
172 r = -EPERM;
173 goto err;
174 }
175
176 r = hdmi_audio_config(audio);
177
178 if (!r)
179 dssdev->audio_state = OMAP_DSS_AUDIO_CONFIGURED;
180
181err:
182 spin_unlock_irqrestore(&hdmi.audio_lock, flags);
183 mutex_unlock(&hdmi.lock);
184 return r;
185}
186
187#else
188static int hdmi_panel_audio_enable(struct omap_dss_device *dssdev)
189{
190 return -EPERM;
191}
192
193static void hdmi_panel_audio_disable(struct omap_dss_device *dssdev)
194{
195}
196
197static int hdmi_panel_audio_start(struct omap_dss_device *dssdev)
198{
199 return -EPERM;
200}
201
202static void hdmi_panel_audio_stop(struct omap_dss_device *dssdev)
203{
204}
205
206static bool hdmi_panel_audio_supported(struct omap_dss_device *dssdev)
207{
208 return false;
209}
210
211static int hdmi_panel_audio_config(struct omap_dss_device *dssdev,
212 struct omap_dss_audio *audio)
213{
214 return -EPERM;
215}
216#endif
217
57static int hdmi_panel_enable(struct omap_dss_device *dssdev) 218static int hdmi_panel_enable(struct omap_dss_device *dssdev)
58{ 219{
59 int r = 0; 220 int r = 0;
60 DSSDBG("ENTER hdmi_panel_enable\n"); 221 DSSDBG("ENTER hdmi_panel_enable\n");
61 222
62 mutex_lock(&hdmi.hdmi_lock); 223 mutex_lock(&hdmi.lock);
63 224
64 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { 225 if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) {
65 r = -EINVAL; 226 r = -EINVAL;
@@ -75,40 +236,52 @@ static int hdmi_panel_enable(struct omap_dss_device *dssdev)
75 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 236 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
76 237
77err: 238err:
78 mutex_unlock(&hdmi.hdmi_lock); 239 mutex_unlock(&hdmi.lock);
79 240
80 return r; 241 return r;
81} 242}
82 243
83static void hdmi_panel_disable(struct omap_dss_device *dssdev) 244static void hdmi_panel_disable(struct omap_dss_device *dssdev)
84{ 245{
85 mutex_lock(&hdmi.hdmi_lock); 246 mutex_lock(&hdmi.lock);
86 247
87 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) 248 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
249 /*
250 * TODO: notify audio users that the display was disabled. For
251 * now, disable audio locally to not break our audio state
252 * machine.
253 */
254 hdmi_panel_audio_disable(dssdev);
88 omapdss_hdmi_display_disable(dssdev); 255 omapdss_hdmi_display_disable(dssdev);
256 }
89 257
90 dssdev->state = OMAP_DSS_DISPLAY_DISABLED; 258 dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
91 259
92 mutex_unlock(&hdmi.hdmi_lock); 260 mutex_unlock(&hdmi.lock);
93} 261}
94 262
95static int hdmi_panel_suspend(struct omap_dss_device *dssdev) 263static int hdmi_panel_suspend(struct omap_dss_device *dssdev)
96{ 264{
97 int r = 0; 265 int r = 0;
98 266
99 mutex_lock(&hdmi.hdmi_lock); 267 mutex_lock(&hdmi.lock);
100 268
101 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { 269 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
102 r = -EINVAL; 270 r = -EINVAL;
103 goto err; 271 goto err;
104 } 272 }
105 273
106 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED; 274 /*
275 * TODO: notify audio users that the display was suspended. For now,
276 * disable audio locally to not break our audio state machine.
277 */
278 hdmi_panel_audio_disable(dssdev);
107 279
280 dssdev->state = OMAP_DSS_DISPLAY_SUSPENDED;
108 omapdss_hdmi_display_disable(dssdev); 281 omapdss_hdmi_display_disable(dssdev);
109 282
110err: 283err:
111 mutex_unlock(&hdmi.hdmi_lock); 284 mutex_unlock(&hdmi.lock);
112 285
113 return r; 286 return r;
114} 287}
@@ -117,7 +290,7 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev)
117{ 290{
118 int r = 0; 291 int r = 0;
119 292
120 mutex_lock(&hdmi.hdmi_lock); 293 mutex_lock(&hdmi.lock);
121 294
122 if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) { 295 if (dssdev->state != OMAP_DSS_DISPLAY_SUSPENDED) {
123 r = -EINVAL; 296 r = -EINVAL;
@@ -129,11 +302,12 @@ static int hdmi_panel_resume(struct omap_dss_device *dssdev)
129 DSSERR("failed to power on\n"); 302 DSSERR("failed to power on\n");
130 goto err; 303 goto err;
131 } 304 }
305 /* TODO: notify audio users that the panel resumed. */
132 306
133 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; 307 dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
134 308
135err: 309err:
136 mutex_unlock(&hdmi.hdmi_lock); 310 mutex_unlock(&hdmi.lock);
137 311
138 return r; 312 return r;
139} 313}
@@ -141,11 +315,11 @@ err:
141static void hdmi_get_timings(struct omap_dss_device *dssdev, 315static void hdmi_get_timings(struct omap_dss_device *dssdev,
142 struct omap_video_timings *timings) 316 struct omap_video_timings *timings)
143{ 317{
144 mutex_lock(&hdmi.hdmi_lock); 318 mutex_lock(&hdmi.lock);
145 319
146 *timings = dssdev->panel.timings; 320 *timings = dssdev->panel.timings;
147 321
148 mutex_unlock(&hdmi.hdmi_lock); 322 mutex_unlock(&hdmi.lock);
149} 323}
150 324
151static void hdmi_set_timings(struct omap_dss_device *dssdev, 325static void hdmi_set_timings(struct omap_dss_device *dssdev,
@@ -153,12 +327,18 @@ static void hdmi_set_timings(struct omap_dss_device *dssdev,
153{ 327{
154 DSSDBG("hdmi_set_timings\n"); 328 DSSDBG("hdmi_set_timings\n");
155 329
156 mutex_lock(&hdmi.hdmi_lock); 330 mutex_lock(&hdmi.lock);
331
332 /*
333 * TODO: notify audio users that there was a timings change. For
334 * now, disable audio locally to not break our audio state machine.
335 */
336 hdmi_panel_audio_disable(dssdev);
157 337
158 dssdev->panel.timings = *timings; 338 dssdev->panel.timings = *timings;
159 omapdss_hdmi_display_set_timing(dssdev); 339 omapdss_hdmi_display_set_timing(dssdev);
160 340
161 mutex_unlock(&hdmi.hdmi_lock); 341 mutex_unlock(&hdmi.lock);
162} 342}
163 343
164static int hdmi_check_timings(struct omap_dss_device *dssdev, 344static int hdmi_check_timings(struct omap_dss_device *dssdev,
@@ -168,11 +348,11 @@ static int hdmi_check_timings(struct omap_dss_device *dssdev,
168 348
169 DSSDBG("hdmi_check_timings\n"); 349 DSSDBG("hdmi_check_timings\n");
170 350
171 mutex_lock(&hdmi.hdmi_lock); 351 mutex_lock(&hdmi.lock);
172 352
173 r = omapdss_hdmi_display_check_timing(dssdev, timings); 353 r = omapdss_hdmi_display_check_timing(dssdev, timings);
174 354
175 mutex_unlock(&hdmi.hdmi_lock); 355 mutex_unlock(&hdmi.lock);
176 return r; 356 return r;
177} 357}
178 358
@@ -180,7 +360,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
180{ 360{
181 int r; 361 int r;
182 362
183 mutex_lock(&hdmi.hdmi_lock); 363 mutex_lock(&hdmi.lock);
184 364
185 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { 365 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
186 r = omapdss_hdmi_display_enable(dssdev); 366 r = omapdss_hdmi_display_enable(dssdev);
@@ -194,7 +374,7 @@ static int hdmi_read_edid(struct omap_dss_device *dssdev, u8 *buf, int len)
194 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) 374 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
195 omapdss_hdmi_display_disable(dssdev); 375 omapdss_hdmi_display_disable(dssdev);
196err: 376err:
197 mutex_unlock(&hdmi.hdmi_lock); 377 mutex_unlock(&hdmi.lock);
198 378
199 return r; 379 return r;
200} 380}
@@ -203,7 +383,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev)
203{ 383{
204 int r; 384 int r;
205 385
206 mutex_lock(&hdmi.hdmi_lock); 386 mutex_lock(&hdmi.lock);
207 387
208 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) { 388 if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE) {
209 r = omapdss_hdmi_display_enable(dssdev); 389 r = omapdss_hdmi_display_enable(dssdev);
@@ -217,7 +397,7 @@ static bool hdmi_detect(struct omap_dss_device *dssdev)
217 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) 397 dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED)
218 omapdss_hdmi_display_disable(dssdev); 398 omapdss_hdmi_display_disable(dssdev);
219err: 399err:
220 mutex_unlock(&hdmi.hdmi_lock); 400 mutex_unlock(&hdmi.lock);
221 401
222 return r; 402 return r;
223} 403}
@@ -234,6 +414,12 @@ static struct omap_dss_driver hdmi_driver = {
234 .check_timings = hdmi_check_timings, 414 .check_timings = hdmi_check_timings,
235 .read_edid = hdmi_read_edid, 415 .read_edid = hdmi_read_edid,
236 .detect = hdmi_detect, 416 .detect = hdmi_detect,
417 .audio_enable = hdmi_panel_audio_enable,
418 .audio_disable = hdmi_panel_audio_disable,
419 .audio_start = hdmi_panel_audio_start,
420 .audio_stop = hdmi_panel_audio_stop,
421 .audio_supported = hdmi_panel_audio_supported,
422 .audio_config = hdmi_panel_audio_config,
237 .driver = { 423 .driver = {
238 .name = "hdmi_panel", 424 .name = "hdmi_panel",
239 .owner = THIS_MODULE, 425 .owner = THIS_MODULE,
@@ -242,7 +428,11 @@ static struct omap_dss_driver hdmi_driver = {
242 428
243int hdmi_panel_init(void) 429int hdmi_panel_init(void)
244{ 430{
245 mutex_init(&hdmi.hdmi_lock); 431 mutex_init(&hdmi.lock);
432
433#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
434 spin_lock_init(&hdmi.audio_lock);
435#endif
246 436
247 omap_dss_register_driver(&hdmi_driver); 437 omap_dss_register_driver(&hdmi_driver);
248 438
diff --git a/drivers/video/omap2/dss/manager.c b/drivers/video/omap2/dss/manager.c
index e7364603f6a1..0cbcde4c688a 100644
--- a/drivers/video/omap2/dss/manager.c
+++ b/drivers/video/omap2/dss/manager.c
@@ -654,9 +654,20 @@ static int dss_mgr_check_zorder(struct omap_overlay_manager *mgr,
654 return 0; 654 return 0;
655} 655}
656 656
657int dss_mgr_check_timings(struct omap_overlay_manager *mgr,
658 const struct omap_video_timings *timings)
659{
660 if (!dispc_mgr_timings_ok(mgr->id, timings)) {
661 DSSERR("check_manager: invalid timings\n");
662 return -EINVAL;
663 }
664
665 return 0;
666}
667
657int dss_mgr_check(struct omap_overlay_manager *mgr, 668int dss_mgr_check(struct omap_overlay_manager *mgr,
658 struct omap_dss_device *dssdev,
659 struct omap_overlay_manager_info *info, 669 struct omap_overlay_manager_info *info,
670 const struct omap_video_timings *mgr_timings,
660 struct omap_overlay_info **overlay_infos) 671 struct omap_overlay_info **overlay_infos)
661{ 672{
662 struct omap_overlay *ovl; 673 struct omap_overlay *ovl;
@@ -668,6 +679,10 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
668 return r; 679 return r;
669 } 680 }
670 681
682 r = dss_mgr_check_timings(mgr, mgr_timings);
683 if (r)
684 return r;
685
671 list_for_each_entry(ovl, &mgr->overlays, list) { 686 list_for_each_entry(ovl, &mgr->overlays, list) {
672 struct omap_overlay_info *oi; 687 struct omap_overlay_info *oi;
673 int r; 688 int r;
@@ -677,7 +692,7 @@ int dss_mgr_check(struct omap_overlay_manager *mgr,
677 if (oi == NULL) 692 if (oi == NULL)
678 continue; 693 continue;
679 694
680 r = dss_ovl_check(ovl, oi, dssdev); 695 r = dss_ovl_check(ovl, oi, mgr_timings);
681 if (r) 696 if (r)
682 return r; 697 return r;
683 } 698 }
diff --git a/drivers/video/omap2/dss/overlay.c b/drivers/video/omap2/dss/overlay.c
index 6e821810deec..b0ba60f88dd2 100644
--- a/drivers/video/omap2/dss/overlay.c
+++ b/drivers/video/omap2/dss/overlay.c
@@ -628,19 +628,23 @@ int dss_ovl_simple_check(struct omap_overlay *ovl,
628 return -EINVAL; 628 return -EINVAL;
629 } 629 }
630 630
631 if (dss_feat_rotation_type_supported(info->rotation_type) == 0) {
632 DSSERR("check_overlay: rotation type %d not supported\n",
633 info->rotation_type);
634 return -EINVAL;
635 }
636
631 return 0; 637 return 0;
632} 638}
633 639
634int dss_ovl_check(struct omap_overlay *ovl, 640int dss_ovl_check(struct omap_overlay *ovl, struct omap_overlay_info *info,
635 struct omap_overlay_info *info, struct omap_dss_device *dssdev) 641 const struct omap_video_timings *mgr_timings)
636{ 642{
637 u16 outw, outh; 643 u16 outw, outh;
638 u16 dw, dh; 644 u16 dw, dh;
639 645
640 if (dssdev == NULL) 646 dw = mgr_timings->x_res;
641 return 0; 647 dh = mgr_timings->y_res;
642
643 dssdev->driver->get_resolution(dssdev, &dw, &dh);
644 648
645 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) { 649 if ((ovl->caps & OMAP_DSS_OVL_CAP_SCALE) == 0) {
646 outw = info->width; 650 outw = info->width;
diff --git a/drivers/video/omap2/dss/rfbi.c b/drivers/video/omap2/dss/rfbi.c
index 788a0ef6323a..3d8c206e90e5 100644
--- a/drivers/video/omap2/dss/rfbi.c
+++ b/drivers/video/omap2/dss/rfbi.c
@@ -304,13 +304,23 @@ static void rfbi_transfer_area(struct omap_dss_device *dssdev, u16 width,
304 u16 height, void (*callback)(void *data), void *data) 304 u16 height, void (*callback)(void *data), void *data)
305{ 305{
306 u32 l; 306 u32 l;
307 struct omap_video_timings timings = {
308 .hsw = 1,
309 .hfp = 1,
310 .hbp = 1,
311 .vsw = 1,
312 .vfp = 0,
313 .vbp = 0,
314 .x_res = width,
315 .y_res = height,
316 };
307 317
308 /*BUG_ON(callback == 0);*/ 318 /*BUG_ON(callback == 0);*/
309 BUG_ON(rfbi.framedone_callback != NULL); 319 BUG_ON(rfbi.framedone_callback != NULL);
310 320
311 DSSDBG("rfbi_transfer_area %dx%d\n", width, height); 321 DSSDBG("rfbi_transfer_area %dx%d\n", width, height);
312 322
313 dispc_mgr_set_lcd_size(dssdev->manager->id, width, height); 323 dss_mgr_set_timings(dssdev->manager, &timings);
314 324
315 dispc_mgr_enable(dssdev->manager->id, true); 325 dispc_mgr_enable(dssdev->manager->id, true);
316 326
@@ -766,6 +776,16 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
766 u16 *x, u16 *y, u16 *w, u16 *h) 776 u16 *x, u16 *y, u16 *w, u16 *h)
767{ 777{
768 u16 dw, dh; 778 u16 dw, dh;
779 struct omap_video_timings timings = {
780 .hsw = 1,
781 .hfp = 1,
782 .hbp = 1,
783 .vsw = 1,
784 .vfp = 0,
785 .vbp = 0,
786 .x_res = *w,
787 .y_res = *h,
788 };
769 789
770 dssdev->driver->get_resolution(dssdev, &dw, &dh); 790 dssdev->driver->get_resolution(dssdev, &dw, &dh);
771 791
@@ -784,7 +804,7 @@ int omap_rfbi_prepare_update(struct omap_dss_device *dssdev,
784 if (*w == 0 || *h == 0) 804 if (*w == 0 || *h == 0)
785 return -EINVAL; 805 return -EINVAL;
786 806
787 dispc_mgr_set_lcd_size(dssdev->manager->id, *w, *h); 807 dss_mgr_set_timings(dssdev->manager, &timings);
788 808
789 return 0; 809 return 0;
790} 810}
@@ -799,7 +819,7 @@ int omap_rfbi_update(struct omap_dss_device *dssdev,
799} 819}
800EXPORT_SYMBOL(omap_rfbi_update); 820EXPORT_SYMBOL(omap_rfbi_update);
801 821
802void rfbi_dump_regs(struct seq_file *s) 822static void rfbi_dump_regs(struct seq_file *s)
803{ 823{
804#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r)) 824#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, rfbi_read_reg(r))
805 825
@@ -900,15 +920,39 @@ void omapdss_rfbi_display_disable(struct omap_dss_device *dssdev)
900} 920}
901EXPORT_SYMBOL(omapdss_rfbi_display_disable); 921EXPORT_SYMBOL(omapdss_rfbi_display_disable);
902 922
903int rfbi_init_display(struct omap_dss_device *dssdev) 923static int __init rfbi_init_display(struct omap_dss_device *dssdev)
904{ 924{
905 rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev; 925 rfbi.dssdev[dssdev->phy.rfbi.channel] = dssdev;
906 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE; 926 dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE;
907 return 0; 927 return 0;
908} 928}
909 929
930static void __init rfbi_probe_pdata(struct platform_device *pdev)
931{
932 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
933 int i, r;
934
935 for (i = 0; i < pdata->num_devices; ++i) {
936 struct omap_dss_device *dssdev = pdata->devices[i];
937
938 if (dssdev->type != OMAP_DISPLAY_TYPE_DBI)
939 continue;
940
941 r = rfbi_init_display(dssdev);
942 if (r) {
943 DSSERR("device %s init failed: %d\n", dssdev->name, r);
944 continue;
945 }
946
947 r = omap_dss_register_device(dssdev, &pdev->dev, i);
948 if (r)
949 DSSERR("device %s register failed: %d\n",
950 dssdev->name, r);
951 }
952}
953
910/* RFBI HW IP initialisation */ 954/* RFBI HW IP initialisation */
911static int omap_rfbihw_probe(struct platform_device *pdev) 955static int __init omap_rfbihw_probe(struct platform_device *pdev)
912{ 956{
913 u32 rev; 957 u32 rev;
914 struct resource *rfbi_mem; 958 struct resource *rfbi_mem;
@@ -956,6 +1000,10 @@ static int omap_rfbihw_probe(struct platform_device *pdev)
956 1000
957 rfbi_runtime_put(); 1001 rfbi_runtime_put();
958 1002
1003 dss_debugfs_create_file("rfbi", rfbi_dump_regs);
1004
1005 rfbi_probe_pdata(pdev);
1006
959 return 0; 1007 return 0;
960 1008
961err_runtime_get: 1009err_runtime_get:
@@ -963,8 +1011,9 @@ err_runtime_get:
963 return r; 1011 return r;
964} 1012}
965 1013
966static int omap_rfbihw_remove(struct platform_device *pdev) 1014static int __exit omap_rfbihw_remove(struct platform_device *pdev)
967{ 1015{
1016 omap_dss_unregister_child_devices(&pdev->dev);
968 pm_runtime_disable(&pdev->dev); 1017 pm_runtime_disable(&pdev->dev);
969 return 0; 1018 return 0;
970} 1019}
@@ -972,7 +1021,6 @@ static int omap_rfbihw_remove(struct platform_device *pdev)
972static int rfbi_runtime_suspend(struct device *dev) 1021static int rfbi_runtime_suspend(struct device *dev)
973{ 1022{
974 dispc_runtime_put(); 1023 dispc_runtime_put();
975 dss_runtime_put();
976 1024
977 return 0; 1025 return 0;
978} 1026}
@@ -981,20 +1029,11 @@ static int rfbi_runtime_resume(struct device *dev)
981{ 1029{
982 int r; 1030 int r;
983 1031
984 r = dss_runtime_get();
985 if (r < 0)
986 goto err_get_dss;
987
988 r = dispc_runtime_get(); 1032 r = dispc_runtime_get();
989 if (r < 0) 1033 if (r < 0)
990 goto err_get_dispc; 1034 return r;
991 1035
992 return 0; 1036 return 0;
993
994err_get_dispc:
995 dss_runtime_put();
996err_get_dss:
997 return r;
998} 1037}
999 1038
1000static const struct dev_pm_ops rfbi_pm_ops = { 1039static const struct dev_pm_ops rfbi_pm_ops = {
@@ -1003,8 +1042,7 @@ static const struct dev_pm_ops rfbi_pm_ops = {
1003}; 1042};
1004 1043
1005static struct platform_driver omap_rfbihw_driver = { 1044static struct platform_driver omap_rfbihw_driver = {
1006 .probe = omap_rfbihw_probe, 1045 .remove = __exit_p(omap_rfbihw_remove),
1007 .remove = omap_rfbihw_remove,
1008 .driver = { 1046 .driver = {
1009 .name = "omapdss_rfbi", 1047 .name = "omapdss_rfbi",
1010 .owner = THIS_MODULE, 1048 .owner = THIS_MODULE,
@@ -1012,12 +1050,12 @@ static struct platform_driver omap_rfbihw_driver = {
1012 }, 1050 },
1013}; 1051};
1014 1052
1015int rfbi_init_platform_driver(void) 1053int __init rfbi_init_platform_driver(void)
1016{ 1054{
1017 return platform_driver_register(&omap_rfbihw_driver); 1055 return platform_driver_probe(&omap_rfbihw_driver, omap_rfbihw_probe);
1018} 1056}
1019 1057
1020void rfbi_uninit_platform_driver(void) 1058void __exit rfbi_uninit_platform_driver(void)
1021{ 1059{
1022 return platform_driver_unregister(&omap_rfbihw_driver); 1060 platform_driver_unregister(&omap_rfbihw_driver);
1023} 1061}
diff --git a/drivers/video/omap2/dss/sdi.c b/drivers/video/omap2/dss/sdi.c
index 8266ca0d666b..3a43dc2a9b46 100644
--- a/drivers/video/omap2/dss/sdi.c
+++ b/drivers/video/omap2/dss/sdi.c
@@ -24,6 +24,7 @@
24#include <linux/err.h> 24#include <linux/err.h>
25#include <linux/regulator/consumer.h> 25#include <linux/regulator/consumer.h>
26#include <linux/export.h> 26#include <linux/export.h>
27#include <linux/platform_device.h>
27 28
28#include <video/omapdss.h> 29#include <video/omapdss.h>
29#include "dss.h" 30#include "dss.h"
@@ -71,10 +72,6 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
71 if (r) 72 if (r)
72 goto err_reg_enable; 73 goto err_reg_enable;
73 74
74 r = dss_runtime_get();
75 if (r)
76 goto err_get_dss;
77
78 r = dispc_runtime_get(); 75 r = dispc_runtime_get();
79 if (r) 76 if (r)
80 goto err_get_dispc; 77 goto err_get_dispc;
@@ -107,7 +104,7 @@ int omapdss_sdi_display_enable(struct omap_dss_device *dssdev)
107 } 104 }
108 105
109 106
110 dispc_mgr_set_lcd_timings(dssdev->manager->id, t); 107 dss_mgr_set_timings(dssdev->manager, t);
111 108
112 r = dss_set_clock_div(&dss_cinfo); 109 r = dss_set_clock_div(&dss_cinfo);
113 if (r) 110 if (r)
@@ -137,8 +134,6 @@ err_set_dss_clock_div:
137err_calc_clock_div: 134err_calc_clock_div:
138 dispc_runtime_put(); 135 dispc_runtime_put();
139err_get_dispc: 136err_get_dispc:
140 dss_runtime_put();
141err_get_dss:
142 regulator_disable(sdi.vdds_sdi_reg); 137 regulator_disable(sdi.vdds_sdi_reg);
143err_reg_enable: 138err_reg_enable:
144 omap_dss_stop_device(dssdev); 139 omap_dss_stop_device(dssdev);
@@ -154,7 +149,6 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
154 dss_sdi_disable(); 149 dss_sdi_disable();
155 150
156 dispc_runtime_put(); 151 dispc_runtime_put();
157 dss_runtime_put();
158 152
159 regulator_disable(sdi.vdds_sdi_reg); 153 regulator_disable(sdi.vdds_sdi_reg);
160 154
@@ -162,7 +156,7 @@ void omapdss_sdi_display_disable(struct omap_dss_device *dssdev)
162} 156}
163EXPORT_SYMBOL(omapdss_sdi_display_disable); 157EXPORT_SYMBOL(omapdss_sdi_display_disable);
164 158
165int sdi_init_display(struct omap_dss_device *dssdev) 159static int __init sdi_init_display(struct omap_dss_device *dssdev)
166{ 160{
167 DSSDBG("SDI init\n"); 161 DSSDBG("SDI init\n");
168 162
@@ -182,11 +176,58 @@ int sdi_init_display(struct omap_dss_device *dssdev)
182 return 0; 176 return 0;
183} 177}
184 178
185int sdi_init(void) 179static void __init sdi_probe_pdata(struct platform_device *pdev)
180{
181 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
182 int i, r;
183
184 for (i = 0; i < pdata->num_devices; ++i) {
185 struct omap_dss_device *dssdev = pdata->devices[i];
186
187 if (dssdev->type != OMAP_DISPLAY_TYPE_SDI)
188 continue;
189
190 r = sdi_init_display(dssdev);
191 if (r) {
192 DSSERR("device %s init failed: %d\n", dssdev->name, r);
193 continue;
194 }
195
196 r = omap_dss_register_device(dssdev, &pdev->dev, i);
197 if (r)
198 DSSERR("device %s register failed: %d\n",
199 dssdev->name, r);
200 }
201}
202
203static int __init omap_sdi_probe(struct platform_device *pdev)
186{ 204{
205 sdi_probe_pdata(pdev);
206
207 return 0;
208}
209
210static int __exit omap_sdi_remove(struct platform_device *pdev)
211{
212 omap_dss_unregister_child_devices(&pdev->dev);
213
187 return 0; 214 return 0;
188} 215}
189 216
190void sdi_exit(void) 217static struct platform_driver omap_sdi_driver = {
218 .remove = __exit_p(omap_sdi_remove),
219 .driver = {
220 .name = "omapdss_sdi",
221 .owner = THIS_MODULE,
222 },
223};
224
225int __init sdi_init_platform_driver(void)
226{
227 return platform_driver_probe(&omap_sdi_driver, omap_sdi_probe);
228}
229
230void __exit sdi_uninit_platform_driver(void)
191{ 231{
232 platform_driver_unregister(&omap_sdi_driver);
192} 233}
diff --git a/drivers/video/omap2/dss/ti_hdmi.h b/drivers/video/omap2/dss/ti_hdmi.h
index 1f58b84d6901..e734cb444bc7 100644
--- a/drivers/video/omap2/dss/ti_hdmi.h
+++ b/drivers/video/omap2/dss/ti_hdmi.h
@@ -96,7 +96,9 @@ struct ti_hdmi_ip_ops {
96 96
97 void (*pll_disable)(struct hdmi_ip_data *ip_data); 97 void (*pll_disable)(struct hdmi_ip_data *ip_data);
98 98
99 void (*video_enable)(struct hdmi_ip_data *ip_data, bool start); 99 int (*video_enable)(struct hdmi_ip_data *ip_data);
100
101 void (*video_disable)(struct hdmi_ip_data *ip_data);
100 102
101 void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s); 103 void (*dump_wrapper)(struct hdmi_ip_data *ip_data, struct seq_file *s);
102 104
@@ -106,9 +108,17 @@ struct ti_hdmi_ip_ops {
106 108
107 void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s); 109 void (*dump_phy)(struct hdmi_ip_data *ip_data, struct seq_file *s);
108 110
109#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 111#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
110 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) 112 int (*audio_enable)(struct hdmi_ip_data *ip_data);
111 void (*audio_enable)(struct hdmi_ip_data *ip_data, bool start); 113
114 void (*audio_disable)(struct hdmi_ip_data *ip_data);
115
116 int (*audio_start)(struct hdmi_ip_data *ip_data);
117
118 void (*audio_stop)(struct hdmi_ip_data *ip_data);
119
120 int (*audio_config)(struct hdmi_ip_data *ip_data,
121 struct omap_dss_audio *audio);
112#endif 122#endif
113 123
114}; 124};
@@ -173,7 +183,8 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data);
173void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data); 183void ti_hdmi_4xxx_phy_disable(struct hdmi_ip_data *ip_data);
174int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len); 184int ti_hdmi_4xxx_read_edid(struct hdmi_ip_data *ip_data, u8 *edid, int len);
175bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data); 185bool ti_hdmi_4xxx_detect(struct hdmi_ip_data *ip_data);
176void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start); 186int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data);
187void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data);
177int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data); 188int ti_hdmi_4xxx_pll_enable(struct hdmi_ip_data *ip_data);
178void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data); 189void ti_hdmi_4xxx_pll_disable(struct hdmi_ip_data *ip_data);
179void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data); 190void ti_hdmi_4xxx_basic_configure(struct hdmi_ip_data *ip_data);
@@ -181,8 +192,13 @@ void ti_hdmi_4xxx_wp_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
181void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s); 192void ti_hdmi_4xxx_pll_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
182void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s); 193void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
183void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s); 194void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s);
184#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 195#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
185 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) 196int hdmi_compute_acr(u32 sample_freq, u32 *n, u32 *cts);
186void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable); 197int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data);
198void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data);
199int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data);
200void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data);
201int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
202 struct omap_dss_audio *audio);
187#endif 203#endif
188#endif 204#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
index bfe6fe65c8be..4dae1b291079 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.c
@@ -29,9 +29,14 @@
29#include <linux/string.h> 29#include <linux/string.h>
30#include <linux/seq_file.h> 30#include <linux/seq_file.h>
31#include <linux/gpio.h> 31#include <linux/gpio.h>
32#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
33#include <sound/asound.h>
34#include <sound/asoundef.h>
35#endif
32 36
33#include "ti_hdmi_4xxx_ip.h" 37#include "ti_hdmi_4xxx_ip.h"
34#include "dss.h" 38#include "dss.h"
39#include "dss_features.h"
35 40
36static inline void hdmi_write_reg(void __iomem *base_addr, 41static inline void hdmi_write_reg(void __iomem *base_addr,
37 const u16 idx, u32 val) 42 const u16 idx, u32 val)
@@ -298,9 +303,9 @@ int ti_hdmi_4xxx_phy_enable(struct hdmi_ip_data *ip_data)
298 REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27); 303 REG_FLD_MOD(phy_base, HDMI_TXPHY_PAD_CFG_CTRL, 0x1, 27, 27);
299 304
300 r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio), 305 r = request_threaded_irq(gpio_to_irq(ip_data->hpd_gpio),
301 NULL, hpd_irq_handler, 306 NULL, hpd_irq_handler,
302 IRQF_DISABLED | IRQF_TRIGGER_RISING | 307 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
303 IRQF_TRIGGER_FALLING, "hpd", ip_data); 308 IRQF_ONESHOT, "hpd", ip_data);
304 if (r) { 309 if (r) {
305 DSSERR("HPD IRQ request failed\n"); 310 DSSERR("HPD IRQ request failed\n");
306 hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF); 311 hdmi_set_phy_pwr(ip_data, HDMI_PHYPWRCMD_OFF);
@@ -699,9 +704,15 @@ static void hdmi_wp_init(struct omap_video_timings *timings,
699 704
700} 705}
701 706
702void ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data, bool start) 707int ti_hdmi_4xxx_wp_video_start(struct hdmi_ip_data *ip_data)
708{
709 REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, true, 31, 31);
710 return 0;
711}
712
713void ti_hdmi_4xxx_wp_video_stop(struct hdmi_ip_data *ip_data)
703{ 714{
704 REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, start, 31, 31); 715 REG_FLD_MOD(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, false, 31, 31);
705} 716}
706 717
707static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt, 718static void hdmi_wp_video_init_format(struct hdmi_video_format *video_fmt,
@@ -886,10 +897,12 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
886 897
887#define CORE_REG(i, name) name(i) 898#define CORE_REG(i, name) name(i)
888#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\ 899#define DUMPCORE(r) seq_printf(s, "%-35s %08x\n", #r,\
889 hdmi_read_reg(hdmi_pll_base(ip_data), r)) 900 hdmi_read_reg(hdmi_core_sys_base(ip_data), r))
890#define DUMPCOREAV(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \ 901#define DUMPCOREAV(r) seq_printf(s, "%-35s %08x\n", #r,\
902 hdmi_read_reg(hdmi_av_base(ip_data), r))
903#define DUMPCOREAV2(i, r) seq_printf(s, "%s[%d]%*s %08x\n", #r, i, \
891 (i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \ 904 (i < 10) ? 32 - strlen(#r) : 31 - strlen(#r), " ", \
892 hdmi_read_reg(hdmi_pll_base(ip_data), CORE_REG(i, r))) 905 hdmi_read_reg(hdmi_av_base(ip_data), CORE_REG(i, r)))
893 906
894 DUMPCORE(HDMI_CORE_SYS_VND_IDL); 907 DUMPCORE(HDMI_CORE_SYS_VND_IDL);
895 DUMPCORE(HDMI_CORE_SYS_DEV_IDL); 908 DUMPCORE(HDMI_CORE_SYS_DEV_IDL);
@@ -898,6 +911,13 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
898 DUMPCORE(HDMI_CORE_SYS_SRST); 911 DUMPCORE(HDMI_CORE_SYS_SRST);
899 DUMPCORE(HDMI_CORE_CTRL1); 912 DUMPCORE(HDMI_CORE_CTRL1);
900 DUMPCORE(HDMI_CORE_SYS_SYS_STAT); 913 DUMPCORE(HDMI_CORE_SYS_SYS_STAT);
914 DUMPCORE(HDMI_CORE_SYS_DE_DLY);
915 DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
916 DUMPCORE(HDMI_CORE_SYS_DE_TOP);
917 DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
918 DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
919 DUMPCORE(HDMI_CORE_SYS_DE_LINL);
920 DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
901 DUMPCORE(HDMI_CORE_SYS_VID_ACEN); 921 DUMPCORE(HDMI_CORE_SYS_VID_ACEN);
902 DUMPCORE(HDMI_CORE_SYS_VID_MODE); 922 DUMPCORE(HDMI_CORE_SYS_VID_MODE);
903 DUMPCORE(HDMI_CORE_SYS_INTR_STATE); 923 DUMPCORE(HDMI_CORE_SYS_INTR_STATE);
@@ -907,102 +927,91 @@ void ti_hdmi_4xxx_core_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
907 DUMPCORE(HDMI_CORE_SYS_INTR4); 927 DUMPCORE(HDMI_CORE_SYS_INTR4);
908 DUMPCORE(HDMI_CORE_SYS_UMASK1); 928 DUMPCORE(HDMI_CORE_SYS_UMASK1);
909 DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL); 929 DUMPCORE(HDMI_CORE_SYS_TMDS_CTRL);
910 DUMPCORE(HDMI_CORE_SYS_DE_DLY);
911 DUMPCORE(HDMI_CORE_SYS_DE_CTRL);
912 DUMPCORE(HDMI_CORE_SYS_DE_TOP);
913 DUMPCORE(HDMI_CORE_SYS_DE_CNTL);
914 DUMPCORE(HDMI_CORE_SYS_DE_CNTH);
915 DUMPCORE(HDMI_CORE_SYS_DE_LINL);
916 DUMPCORE(HDMI_CORE_SYS_DE_LINH_1);
917 930
918 DUMPCORE(HDMI_CORE_DDC_CMD);
919 DUMPCORE(HDMI_CORE_DDC_STATUS);
920 DUMPCORE(HDMI_CORE_DDC_ADDR); 931 DUMPCORE(HDMI_CORE_DDC_ADDR);
932 DUMPCORE(HDMI_CORE_DDC_SEGM);
921 DUMPCORE(HDMI_CORE_DDC_OFFSET); 933 DUMPCORE(HDMI_CORE_DDC_OFFSET);
922 DUMPCORE(HDMI_CORE_DDC_COUNT1); 934 DUMPCORE(HDMI_CORE_DDC_COUNT1);
923 DUMPCORE(HDMI_CORE_DDC_COUNT2); 935 DUMPCORE(HDMI_CORE_DDC_COUNT2);
936 DUMPCORE(HDMI_CORE_DDC_STATUS);
937 DUMPCORE(HDMI_CORE_DDC_CMD);
924 DUMPCORE(HDMI_CORE_DDC_DATA); 938 DUMPCORE(HDMI_CORE_DDC_DATA);
925 DUMPCORE(HDMI_CORE_DDC_SEGM);
926 939
927 DUMPCORE(HDMI_CORE_AV_HDMI_CTRL); 940 DUMPCOREAV(HDMI_CORE_AV_ACR_CTRL);
928 DUMPCORE(HDMI_CORE_AV_DPD); 941 DUMPCOREAV(HDMI_CORE_AV_FREQ_SVAL);
929 DUMPCORE(HDMI_CORE_AV_PB_CTRL1); 942 DUMPCOREAV(HDMI_CORE_AV_N_SVAL1);
930 DUMPCORE(HDMI_CORE_AV_PB_CTRL2); 943 DUMPCOREAV(HDMI_CORE_AV_N_SVAL2);
931 DUMPCORE(HDMI_CORE_AV_AVI_TYPE); 944 DUMPCOREAV(HDMI_CORE_AV_N_SVAL3);
932 DUMPCORE(HDMI_CORE_AV_AVI_VERS); 945 DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL1);
933 DUMPCORE(HDMI_CORE_AV_AVI_LEN); 946 DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL2);
934 DUMPCORE(HDMI_CORE_AV_AVI_CHSUM); 947 DUMPCOREAV(HDMI_CORE_AV_CTS_SVAL3);
948 DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL1);
949 DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL2);
950 DUMPCOREAV(HDMI_CORE_AV_CTS_HVAL3);
951 DUMPCOREAV(HDMI_CORE_AV_AUD_MODE);
952 DUMPCOREAV(HDMI_CORE_AV_SPDIF_CTRL);
953 DUMPCOREAV(HDMI_CORE_AV_HW_SPDIF_FS);
954 DUMPCOREAV(HDMI_CORE_AV_SWAP_I2S);
955 DUMPCOREAV(HDMI_CORE_AV_SPDIF_ERTH);
956 DUMPCOREAV(HDMI_CORE_AV_I2S_IN_MAP);
957 DUMPCOREAV(HDMI_CORE_AV_I2S_IN_CTRL);
958 DUMPCOREAV(HDMI_CORE_AV_I2S_CHST0);
959 DUMPCOREAV(HDMI_CORE_AV_I2S_CHST1);
960 DUMPCOREAV(HDMI_CORE_AV_I2S_CHST2);
961 DUMPCOREAV(HDMI_CORE_AV_I2S_CHST4);
962 DUMPCOREAV(HDMI_CORE_AV_I2S_CHST5);
963 DUMPCOREAV(HDMI_CORE_AV_ASRC);
964 DUMPCOREAV(HDMI_CORE_AV_I2S_IN_LEN);
965 DUMPCOREAV(HDMI_CORE_AV_HDMI_CTRL);
966 DUMPCOREAV(HDMI_CORE_AV_AUDO_TXSTAT);
967 DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
968 DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
969 DUMPCOREAV(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
970 DUMPCOREAV(HDMI_CORE_AV_TEST_TXCTRL);
971 DUMPCOREAV(HDMI_CORE_AV_DPD);
972 DUMPCOREAV(HDMI_CORE_AV_PB_CTRL1);
973 DUMPCOREAV(HDMI_CORE_AV_PB_CTRL2);
974 DUMPCOREAV(HDMI_CORE_AV_AVI_TYPE);
975 DUMPCOREAV(HDMI_CORE_AV_AVI_VERS);
976 DUMPCOREAV(HDMI_CORE_AV_AVI_LEN);
977 DUMPCOREAV(HDMI_CORE_AV_AVI_CHSUM);
935 978
936 for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++) 979 for (i = 0; i < HDMI_CORE_AV_AVI_DBYTE_NELEMS; i++)
937 DUMPCOREAV(i, HDMI_CORE_AV_AVI_DBYTE); 980 DUMPCOREAV2(i, HDMI_CORE_AV_AVI_DBYTE);
981
982 DUMPCOREAV(HDMI_CORE_AV_SPD_TYPE);
983 DUMPCOREAV(HDMI_CORE_AV_SPD_VERS);
984 DUMPCOREAV(HDMI_CORE_AV_SPD_LEN);
985 DUMPCOREAV(HDMI_CORE_AV_SPD_CHSUM);
938 986
939 for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++) 987 for (i = 0; i < HDMI_CORE_AV_SPD_DBYTE_NELEMS; i++)
940 DUMPCOREAV(i, HDMI_CORE_AV_SPD_DBYTE); 988 DUMPCOREAV2(i, HDMI_CORE_AV_SPD_DBYTE);
989
990 DUMPCOREAV(HDMI_CORE_AV_AUDIO_TYPE);
991 DUMPCOREAV(HDMI_CORE_AV_AUDIO_VERS);
992 DUMPCOREAV(HDMI_CORE_AV_AUDIO_LEN);
993 DUMPCOREAV(HDMI_CORE_AV_AUDIO_CHSUM);
941 994
942 for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++) 995 for (i = 0; i < HDMI_CORE_AV_AUD_DBYTE_NELEMS; i++)
943 DUMPCOREAV(i, HDMI_CORE_AV_AUD_DBYTE); 996 DUMPCOREAV2(i, HDMI_CORE_AV_AUD_DBYTE);
997
998 DUMPCOREAV(HDMI_CORE_AV_MPEG_TYPE);
999 DUMPCOREAV(HDMI_CORE_AV_MPEG_VERS);
1000 DUMPCOREAV(HDMI_CORE_AV_MPEG_LEN);
1001 DUMPCOREAV(HDMI_CORE_AV_MPEG_CHSUM);
944 1002
945 for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++) 1003 for (i = 0; i < HDMI_CORE_AV_MPEG_DBYTE_NELEMS; i++)
946 DUMPCOREAV(i, HDMI_CORE_AV_MPEG_DBYTE); 1004 DUMPCOREAV2(i, HDMI_CORE_AV_MPEG_DBYTE);
947 1005
948 for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++) 1006 for (i = 0; i < HDMI_CORE_AV_GEN_DBYTE_NELEMS; i++)
949 DUMPCOREAV(i, HDMI_CORE_AV_GEN_DBYTE); 1007 DUMPCOREAV2(i, HDMI_CORE_AV_GEN_DBYTE);
1008
1009 DUMPCOREAV(HDMI_CORE_AV_CP_BYTE1);
950 1010
951 for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++) 1011 for (i = 0; i < HDMI_CORE_AV_GEN2_DBYTE_NELEMS; i++)
952 DUMPCOREAV(i, HDMI_CORE_AV_GEN2_DBYTE); 1012 DUMPCOREAV2(i, HDMI_CORE_AV_GEN2_DBYTE);
953 1013
954 DUMPCORE(HDMI_CORE_AV_ACR_CTRL); 1014 DUMPCOREAV(HDMI_CORE_AV_CEC_ADDR_ID);
955 DUMPCORE(HDMI_CORE_AV_FREQ_SVAL);
956 DUMPCORE(HDMI_CORE_AV_N_SVAL1);
957 DUMPCORE(HDMI_CORE_AV_N_SVAL2);
958 DUMPCORE(HDMI_CORE_AV_N_SVAL3);
959 DUMPCORE(HDMI_CORE_AV_CTS_SVAL1);
960 DUMPCORE(HDMI_CORE_AV_CTS_SVAL2);
961 DUMPCORE(HDMI_CORE_AV_CTS_SVAL3);
962 DUMPCORE(HDMI_CORE_AV_CTS_HVAL1);
963 DUMPCORE(HDMI_CORE_AV_CTS_HVAL2);
964 DUMPCORE(HDMI_CORE_AV_CTS_HVAL3);
965 DUMPCORE(HDMI_CORE_AV_AUD_MODE);
966 DUMPCORE(HDMI_CORE_AV_SPDIF_CTRL);
967 DUMPCORE(HDMI_CORE_AV_HW_SPDIF_FS);
968 DUMPCORE(HDMI_CORE_AV_SWAP_I2S);
969 DUMPCORE(HDMI_CORE_AV_SPDIF_ERTH);
970 DUMPCORE(HDMI_CORE_AV_I2S_IN_MAP);
971 DUMPCORE(HDMI_CORE_AV_I2S_IN_CTRL);
972 DUMPCORE(HDMI_CORE_AV_I2S_CHST0);
973 DUMPCORE(HDMI_CORE_AV_I2S_CHST1);
974 DUMPCORE(HDMI_CORE_AV_I2S_CHST2);
975 DUMPCORE(HDMI_CORE_AV_I2S_CHST4);
976 DUMPCORE(HDMI_CORE_AV_I2S_CHST5);
977 DUMPCORE(HDMI_CORE_AV_ASRC);
978 DUMPCORE(HDMI_CORE_AV_I2S_IN_LEN);
979 DUMPCORE(HDMI_CORE_AV_HDMI_CTRL);
980 DUMPCORE(HDMI_CORE_AV_AUDO_TXSTAT);
981 DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_1);
982 DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_2);
983 DUMPCORE(HDMI_CORE_AV_AUD_PAR_BUSCLK_3);
984 DUMPCORE(HDMI_CORE_AV_TEST_TXCTRL);
985 DUMPCORE(HDMI_CORE_AV_DPD);
986 DUMPCORE(HDMI_CORE_AV_PB_CTRL1);
987 DUMPCORE(HDMI_CORE_AV_PB_CTRL2);
988 DUMPCORE(HDMI_CORE_AV_AVI_TYPE);
989 DUMPCORE(HDMI_CORE_AV_AVI_VERS);
990 DUMPCORE(HDMI_CORE_AV_AVI_LEN);
991 DUMPCORE(HDMI_CORE_AV_AVI_CHSUM);
992 DUMPCORE(HDMI_CORE_AV_SPD_TYPE);
993 DUMPCORE(HDMI_CORE_AV_SPD_VERS);
994 DUMPCORE(HDMI_CORE_AV_SPD_LEN);
995 DUMPCORE(HDMI_CORE_AV_SPD_CHSUM);
996 DUMPCORE(HDMI_CORE_AV_AUDIO_TYPE);
997 DUMPCORE(HDMI_CORE_AV_AUDIO_VERS);
998 DUMPCORE(HDMI_CORE_AV_AUDIO_LEN);
999 DUMPCORE(HDMI_CORE_AV_AUDIO_CHSUM);
1000 DUMPCORE(HDMI_CORE_AV_MPEG_TYPE);
1001 DUMPCORE(HDMI_CORE_AV_MPEG_VERS);
1002 DUMPCORE(HDMI_CORE_AV_MPEG_LEN);
1003 DUMPCORE(HDMI_CORE_AV_MPEG_CHSUM);
1004 DUMPCORE(HDMI_CORE_AV_CP_BYTE1);
1005 DUMPCORE(HDMI_CORE_AV_CEC_ADDR_ID);
1006} 1015}
1007 1016
1008void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s) 1017void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
@@ -1016,9 +1025,8 @@ void ti_hdmi_4xxx_phy_dump(struct hdmi_ip_data *ip_data, struct seq_file *s)
1016 DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL); 1025 DUMPPHY(HDMI_TXPHY_PAD_CFG_CTRL);
1017} 1026}
1018 1027
1019#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \ 1028#if defined(CONFIG_OMAP4_DSS_HDMI_AUDIO)
1020 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE) 1029static void ti_hdmi_4xxx_wp_audio_config_format(struct hdmi_ip_data *ip_data,
1021void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
1022 struct hdmi_audio_format *aud_fmt) 1030 struct hdmi_audio_format *aud_fmt)
1023{ 1031{
1024 u32 r; 1032 u32 r;
@@ -1037,7 +1045,7 @@ void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
1037 hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r); 1045 hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CFG, r);
1038} 1046}
1039 1047
1040void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data, 1048static void ti_hdmi_4xxx_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
1041 struct hdmi_audio_dma *aud_dma) 1049 struct hdmi_audio_dma *aud_dma)
1042{ 1050{
1043 u32 r; 1051 u32 r;
@@ -1055,7 +1063,7 @@ void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
1055 hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r); 1063 hdmi_write_reg(hdmi_wp_base(ip_data), HDMI_WP_AUDIO_CTRL, r);
1056} 1064}
1057 1065
1058void hdmi_core_audio_config(struct hdmi_ip_data *ip_data, 1066static void ti_hdmi_4xxx_core_audio_config(struct hdmi_ip_data *ip_data,
1059 struct hdmi_core_audio_config *cfg) 1067 struct hdmi_core_audio_config *cfg)
1060{ 1068{
1061 u32 r; 1069 u32 r;
@@ -1106,27 +1114,33 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
1106 REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL, 1114 REG_FLD_MOD(av_base, HDMI_CORE_AV_SPDIF_CTRL,
1107 cfg->fs_override, 1, 1); 1115 cfg->fs_override, 1, 1);
1108 1116
1109 /* I2S parameters */ 1117 /*
1110 REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_CHST4, 1118 * Set IEC-60958-3 channel status word. It is passed to the IP
1111 cfg->freq_sample, 3, 0); 1119 * just as it is received. The user of the driver is responsible
1112 1120 * for its contents.
1121 */
1122 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST0,
1123 cfg->iec60958_cfg->status[0]);
1124 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST1,
1125 cfg->iec60958_cfg->status[1]);
1126 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST2,
1127 cfg->iec60958_cfg->status[2]);
1128 /* yes, this is correct: status[3] goes to CHST4 register */
1129 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST4,
1130 cfg->iec60958_cfg->status[3]);
1131 /* yes, this is correct: status[4] goes to CHST5 register */
1132 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5,
1133 cfg->iec60958_cfg->status[4]);
1134
1135 /* set I2S parameters */
1113 r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL); 1136 r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL);
1114 r = FLD_MOD(r, cfg->i2s_cfg.en_high_bitrate_aud, 7, 7);
1115 r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6); 1137 r = FLD_MOD(r, cfg->i2s_cfg.sck_edge_mode, 6, 6);
1116 r = FLD_MOD(r, cfg->i2s_cfg.cbit_order, 5, 5);
1117 r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4); 1138 r = FLD_MOD(r, cfg->i2s_cfg.vbit, 4, 4);
1118 r = FLD_MOD(r, cfg->i2s_cfg.ws_polarity, 3, 3);
1119 r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2); 1139 r = FLD_MOD(r, cfg->i2s_cfg.justification, 2, 2);
1120 r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1); 1140 r = FLD_MOD(r, cfg->i2s_cfg.direction, 1, 1);
1121 r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0); 1141 r = FLD_MOD(r, cfg->i2s_cfg.shift, 0, 0);
1122 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r); 1142 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_CTRL, r);
1123 1143
1124 r = hdmi_read_reg(av_base, HDMI_CORE_AV_I2S_CHST5);
1125 r = FLD_MOD(r, cfg->freq_sample, 7, 4);
1126 r = FLD_MOD(r, cfg->i2s_cfg.word_length, 3, 1);
1127 r = FLD_MOD(r, cfg->i2s_cfg.word_max_length, 0, 0);
1128 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_CHST5, r);
1129
1130 REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN, 1144 REG_FLD_MOD(av_base, HDMI_CORE_AV_I2S_IN_LEN,
1131 cfg->i2s_cfg.in_length_bits, 3, 0); 1145 cfg->i2s_cfg.in_length_bits, 3, 0);
1132 1146
@@ -1138,12 +1152,19 @@ void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
1138 r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2); 1152 r = FLD_MOD(r, cfg->en_parallel_aud_input, 2, 2);
1139 r = FLD_MOD(r, cfg->en_spdif, 1, 1); 1153 r = FLD_MOD(r, cfg->en_spdif, 1, 1);
1140 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r); 1154 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_MODE, r);
1155
1156 /* Audio channel mappings */
1157 /* TODO: Make channel mapping dynamic. For now, map channels
1158 * in the ALSA order: FL/FR/RL/RR/C/LFE/SL/SR. Remapping is needed as
1159 * HDMI speaker order is different. See CEA-861 Section 6.6.2.
1160 */
1161 hdmi_write_reg(av_base, HDMI_CORE_AV_I2S_IN_MAP, 0x78);
1162 REG_FLD_MOD(av_base, HDMI_CORE_AV_SWAP_I2S, 1, 5, 5);
1141} 1163}
1142 1164
1143void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data, 1165static void ti_hdmi_4xxx_core_audio_infoframe_cfg(struct hdmi_ip_data *ip_data,
1144 struct hdmi_core_infoframe_audio *info_aud) 1166 struct snd_cea_861_aud_if *info_aud)
1145{ 1167{
1146 u8 val;
1147 u8 sum = 0, checksum = 0; 1168 u8 sum = 0, checksum = 0;
1148 void __iomem *av_base = hdmi_av_base(ip_data); 1169 void __iomem *av_base = hdmi_av_base(ip_data);
1149 1170
@@ -1157,24 +1178,23 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
1157 hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a); 1178 hdmi_write_reg(av_base, HDMI_CORE_AV_AUDIO_LEN, 0x0a);
1158 sum += 0x84 + 0x001 + 0x00a; 1179 sum += 0x84 + 0x001 + 0x00a;
1159 1180
1160 val = (info_aud->db1_coding_type << 4) 1181 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0),
1161 | (info_aud->db1_channel_count - 1); 1182 info_aud->db1_ct_cc);
1162 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(0), val); 1183 sum += info_aud->db1_ct_cc;
1163 sum += val;
1164 1184
1165 val = (info_aud->db2_sample_freq << 2) | info_aud->db2_sample_size; 1185 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1),
1166 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(1), val); 1186 info_aud->db2_sf_ss);
1167 sum += val; 1187 sum += info_aud->db2_sf_ss;
1168 1188
1169 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), 0x00); 1189 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(2), info_aud->db3);
1190 sum += info_aud->db3;
1170 1191
1171 val = info_aud->db4_channel_alloc; 1192 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), info_aud->db4_ca);
1172 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(3), val); 1193 sum += info_aud->db4_ca;
1173 sum += val;
1174 1194
1175 val = (info_aud->db5_downmix_inh << 7) | (info_aud->db5_lsv << 3); 1195 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4),
1176 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(4), val); 1196 info_aud->db5_dminh_lsv);
1177 sum += val; 1197 sum += info_aud->db5_dminh_lsv;
1178 1198
1179 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00); 1199 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(5), 0x00);
1180 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00); 1200 hdmi_write_reg(av_base, HDMI_CORE_AV_AUD_DBYTE(6), 0x00);
@@ -1192,70 +1212,212 @@ void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
1192 */ 1212 */
1193} 1213}
1194 1214
1195int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data, 1215int ti_hdmi_4xxx_audio_config(struct hdmi_ip_data *ip_data,
1196 u32 sample_freq, u32 *n, u32 *cts) 1216 struct omap_dss_audio *audio)
1197{ 1217{
1198 u32 r; 1218 struct hdmi_audio_format audio_format;
1199 u32 deep_color = 0; 1219 struct hdmi_audio_dma audio_dma;
1200 u32 pclk = ip_data->cfg.timings.pixel_clock; 1220 struct hdmi_core_audio_config core;
1201 1221 int err, n, cts, channel_count;
1202 if (n == NULL || cts == NULL) 1222 unsigned int fs_nr;
1223 bool word_length_16b = false;
1224
1225 if (!audio || !audio->iec || !audio->cea || !ip_data)
1203 return -EINVAL; 1226 return -EINVAL;
1227
1228 core.iec60958_cfg = audio->iec;
1204 /* 1229 /*
1205 * Obtain current deep color configuration. This needed 1230 * In the IEC-60958 status word, check if the audio sample word length
1206 * to calculate the TMDS clock based on the pixel clock. 1231 * is 16-bit as several optimizations can be performed in such case.
1207 */ 1232 */
1208 r = REG_GET(hdmi_wp_base(ip_data), HDMI_WP_VIDEO_CFG, 1, 0); 1233 if (!(audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24))
1209 switch (r) { 1234 if (audio->iec->status[4] & IEC958_AES4_CON_WORDLEN_20_16)
1210 case 1: /* No deep color selected */ 1235 word_length_16b = true;
1211 deep_color = 100; 1236
1237 /* I2S configuration. See Phillips' specification */
1238 if (word_length_16b)
1239 core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_LEFT;
1240 else
1241 core.i2s_cfg.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
1242 /*
1243 * The I2S input word length is twice the lenght given in the IEC-60958
1244 * status word. If the word size is greater than
1245 * 20 bits, increment by one.
1246 */
1247 core.i2s_cfg.in_length_bits = audio->iec->status[4]
1248 & IEC958_AES4_CON_WORDLEN;
1249 if (audio->iec->status[4] & IEC958_AES4_CON_MAX_WORDLEN_24)
1250 core.i2s_cfg.in_length_bits++;
1251 core.i2s_cfg.sck_edge_mode = HDMI_AUDIO_I2S_SCK_EDGE_RISING;
1252 core.i2s_cfg.vbit = HDMI_AUDIO_I2S_VBIT_FOR_PCM;
1253 core.i2s_cfg.direction = HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST;
1254 core.i2s_cfg.shift = HDMI_AUDIO_I2S_FIRST_BIT_SHIFT;
1255
1256 /* convert sample frequency to a number */
1257 switch (audio->iec->status[3] & IEC958_AES3_CON_FS) {
1258 case IEC958_AES3_CON_FS_32000:
1259 fs_nr = 32000;
1260 break;
1261 case IEC958_AES3_CON_FS_44100:
1262 fs_nr = 44100;
1263 break;
1264 case IEC958_AES3_CON_FS_48000:
1265 fs_nr = 48000;
1212 break; 1266 break;
1213 case 2: /* 10-bit deep color selected */ 1267 case IEC958_AES3_CON_FS_88200:
1214 deep_color = 125; 1268 fs_nr = 88200;
1215 break; 1269 break;
1216 case 3: /* 12-bit deep color selected */ 1270 case IEC958_AES3_CON_FS_96000:
1217 deep_color = 150; 1271 fs_nr = 96000;
1272 break;
1273 case IEC958_AES3_CON_FS_176400:
1274 fs_nr = 176400;
1275 break;
1276 case IEC958_AES3_CON_FS_192000:
1277 fs_nr = 192000;
1218 break; 1278 break;
1219 default: 1279 default:
1220 return -EINVAL; 1280 return -EINVAL;
1221 } 1281 }
1222 1282
1223 switch (sample_freq) { 1283 err = hdmi_compute_acr(fs_nr, &n, &cts);
1224 case 32000: 1284
1225 if ((deep_color == 125) && ((pclk == 54054) 1285 /* Audio clock regeneration settings */
1226 || (pclk == 74250))) 1286 core.n = n;
1227 *n = 8192; 1287 core.cts = cts;
1228 else 1288 if (dss_has_feature(FEAT_HDMI_CTS_SWMODE)) {
1229 *n = 4096; 1289 core.aud_par_busclk = 0;
1290 core.cts_mode = HDMI_AUDIO_CTS_MODE_SW;
1291 core.use_mclk = dss_has_feature(FEAT_HDMI_AUDIO_USE_MCLK);
1292 } else {
1293 core.aud_par_busclk = (((128 * 31) - 1) << 8);
1294 core.cts_mode = HDMI_AUDIO_CTS_MODE_HW;
1295 core.use_mclk = true;
1296 }
1297
1298 if (core.use_mclk)
1299 core.mclk_mode = HDMI_AUDIO_MCLK_128FS;
1300
1301 /* Audio channels settings */
1302 channel_count = (audio->cea->db1_ct_cc &
1303 CEA861_AUDIO_INFOFRAME_DB1CC) + 1;
1304
1305 switch (channel_count) {
1306 case 2:
1307 audio_format.active_chnnls_msk = 0x03;
1308 break;
1309 case 3:
1310 audio_format.active_chnnls_msk = 0x07;
1311 break;
1312 case 4:
1313 audio_format.active_chnnls_msk = 0x0f;
1314 break;
1315 case 5:
1316 audio_format.active_chnnls_msk = 0x1f;
1230 break; 1317 break;
1231 case 44100: 1318 case 6:
1232 *n = 6272; 1319 audio_format.active_chnnls_msk = 0x3f;
1233 break; 1320 break;
1234 case 48000: 1321 case 7:
1235 if ((deep_color == 125) && ((pclk == 54054) 1322 audio_format.active_chnnls_msk = 0x7f;
1236 || (pclk == 74250))) 1323 break;
1237 *n = 8192; 1324 case 8:
1238 else 1325 audio_format.active_chnnls_msk = 0xff;
1239 *n = 6144;
1240 break; 1326 break;
1241 default: 1327 default:
1242 *n = 0;
1243 return -EINVAL; 1328 return -EINVAL;
1244 } 1329 }
1245 1330
1246 /* Calculate CTS. See HDMI 1.3a or 1.4a specifications */ 1331 /*
1247 *cts = pclk * (*n / 128) * deep_color / (sample_freq / 10); 1332 * the HDMI IP needs to enable four stereo channels when transmitting
1333 * more than 2 audio channels
1334 */
1335 if (channel_count == 2) {
1336 audio_format.stereo_channels = HDMI_AUDIO_STEREO_ONECHANNEL;
1337 core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN;
1338 core.layout = HDMI_AUDIO_LAYOUT_2CH;
1339 } else {
1340 audio_format.stereo_channels = HDMI_AUDIO_STEREO_FOURCHANNELS;
1341 core.i2s_cfg.active_sds = HDMI_AUDIO_I2S_SD0_EN |
1342 HDMI_AUDIO_I2S_SD1_EN | HDMI_AUDIO_I2S_SD2_EN |
1343 HDMI_AUDIO_I2S_SD3_EN;
1344 core.layout = HDMI_AUDIO_LAYOUT_8CH;
1345 }
1346
1347 core.en_spdif = false;
1348 /* use sample frequency from channel status word */
1349 core.fs_override = true;
1350 /* enable ACR packets */
1351 core.en_acr_pkt = true;
1352 /* disable direct streaming digital audio */
1353 core.en_dsd_audio = false;
1354 /* use parallel audio interface */
1355 core.en_parallel_aud_input = true;
1356
1357 /* DMA settings */
1358 if (word_length_16b)
1359 audio_dma.transfer_size = 0x10;
1360 else
1361 audio_dma.transfer_size = 0x20;
1362 audio_dma.block_size = 0xC0;
1363 audio_dma.mode = HDMI_AUDIO_TRANSF_DMA;
1364 audio_dma.fifo_threshold = 0x20; /* in number of samples */
1365
1366 /* audio FIFO format settings */
1367 if (word_length_16b) {
1368 audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_TWOSAMPLES;
1369 audio_format.sample_size = HDMI_AUDIO_SAMPLE_16BITS;
1370 audio_format.justification = HDMI_AUDIO_JUSTIFY_LEFT;
1371 } else {
1372 audio_format.samples_per_word = HDMI_AUDIO_ONEWORD_ONESAMPLE;
1373 audio_format.sample_size = HDMI_AUDIO_SAMPLE_24BITS;
1374 audio_format.justification = HDMI_AUDIO_JUSTIFY_RIGHT;
1375 }
1376 audio_format.type = HDMI_AUDIO_TYPE_LPCM;
1377 audio_format.sample_order = HDMI_AUDIO_SAMPLE_LEFT_FIRST;
1378 /* disable start/stop signals of IEC 60958 blocks */
1379 audio_format.en_sig_blk_strt_end = HDMI_AUDIO_BLOCK_SIG_STARTEND_ON;
1380
1381 /* configure DMA and audio FIFO format*/
1382 ti_hdmi_4xxx_wp_audio_config_dma(ip_data, &audio_dma);
1383 ti_hdmi_4xxx_wp_audio_config_format(ip_data, &audio_format);
1384
1385 /* configure the core*/
1386 ti_hdmi_4xxx_core_audio_config(ip_data, &core);
1387
1388 /* configure CEA 861 audio infoframe*/
1389 ti_hdmi_4xxx_core_audio_infoframe_cfg(ip_data, audio->cea);
1248 1390
1249 return 0; 1391 return 0;
1250} 1392}
1251 1393
1252void ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data, bool enable) 1394int ti_hdmi_4xxx_wp_audio_enable(struct hdmi_ip_data *ip_data)
1395{
1396 REG_FLD_MOD(hdmi_wp_base(ip_data),
1397 HDMI_WP_AUDIO_CTRL, true, 31, 31);
1398 return 0;
1399}
1400
1401void ti_hdmi_4xxx_wp_audio_disable(struct hdmi_ip_data *ip_data)
1402{
1403 REG_FLD_MOD(hdmi_wp_base(ip_data),
1404 HDMI_WP_AUDIO_CTRL, false, 31, 31);
1405}
1406
1407int ti_hdmi_4xxx_audio_start(struct hdmi_ip_data *ip_data)
1253{ 1408{
1254 REG_FLD_MOD(hdmi_av_base(ip_data), 1409 REG_FLD_MOD(hdmi_av_base(ip_data),
1255 HDMI_CORE_AV_AUD_MODE, enable, 0, 0); 1410 HDMI_CORE_AV_AUD_MODE, true, 0, 0);
1256 REG_FLD_MOD(hdmi_wp_base(ip_data), 1411 REG_FLD_MOD(hdmi_wp_base(ip_data),
1257 HDMI_WP_AUDIO_CTRL, enable, 31, 31); 1412 HDMI_WP_AUDIO_CTRL, true, 30, 30);
1413 return 0;
1414}
1415
1416void ti_hdmi_4xxx_audio_stop(struct hdmi_ip_data *ip_data)
1417{
1418 REG_FLD_MOD(hdmi_av_base(ip_data),
1419 HDMI_CORE_AV_AUD_MODE, false, 0, 0);
1258 REG_FLD_MOD(hdmi_wp_base(ip_data), 1420 REG_FLD_MOD(hdmi_wp_base(ip_data),
1259 HDMI_WP_AUDIO_CTRL, enable, 30, 30); 1421 HDMI_WP_AUDIO_CTRL, false, 30, 30);
1260} 1422}
1261#endif 1423#endif
diff --git a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
index a14d1a0e6e41..8366ae19e82e 100644
--- a/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
+++ b/drivers/video/omap2/dss/ti_hdmi_4xxx_ip.h
@@ -24,11 +24,6 @@
24#include <linux/string.h> 24#include <linux/string.h>
25#include <video/omapdss.h> 25#include <video/omapdss.h>
26#include "ti_hdmi.h" 26#include "ti_hdmi.h"
27#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
28 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
29#include <sound/soc.h>
30#include <sound/pcm_params.h>
31#endif
32 27
33/* HDMI Wrapper */ 28/* HDMI Wrapper */
34 29
@@ -57,6 +52,13 @@
57#define HDMI_CORE_SYS_SRST 0x14 52#define HDMI_CORE_SYS_SRST 0x14
58#define HDMI_CORE_CTRL1 0x20 53#define HDMI_CORE_CTRL1 0x20
59#define HDMI_CORE_SYS_SYS_STAT 0x24 54#define HDMI_CORE_SYS_SYS_STAT 0x24
55#define HDMI_CORE_SYS_DE_DLY 0xC8
56#define HDMI_CORE_SYS_DE_CTRL 0xCC
57#define HDMI_CORE_SYS_DE_TOP 0xD0
58#define HDMI_CORE_SYS_DE_CNTL 0xD8
59#define HDMI_CORE_SYS_DE_CNTH 0xDC
60#define HDMI_CORE_SYS_DE_LINL 0xE0
61#define HDMI_CORE_SYS_DE_LINH_1 0xE4
60#define HDMI_CORE_SYS_VID_ACEN 0x124 62#define HDMI_CORE_SYS_VID_ACEN 0x124
61#define HDMI_CORE_SYS_VID_MODE 0x128 63#define HDMI_CORE_SYS_VID_MODE 0x128
62#define HDMI_CORE_SYS_INTR_STATE 0x1C0 64#define HDMI_CORE_SYS_INTR_STATE 0x1C0
@@ -66,50 +68,24 @@
66#define HDMI_CORE_SYS_INTR4 0x1D0 68#define HDMI_CORE_SYS_INTR4 0x1D0
67#define HDMI_CORE_SYS_UMASK1 0x1D4 69#define HDMI_CORE_SYS_UMASK1 0x1D4
68#define HDMI_CORE_SYS_TMDS_CTRL 0x208 70#define HDMI_CORE_SYS_TMDS_CTRL 0x208
69#define HDMI_CORE_SYS_DE_DLY 0xC8 71
70#define HDMI_CORE_SYS_DE_CTRL 0xCC
71#define HDMI_CORE_SYS_DE_TOP 0xD0
72#define HDMI_CORE_SYS_DE_CNTL 0xD8
73#define HDMI_CORE_SYS_DE_CNTH 0xDC
74#define HDMI_CORE_SYS_DE_LINL 0xE0
75#define HDMI_CORE_SYS_DE_LINH_1 0xE4
76#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1 72#define HDMI_CORE_CTRL1_VEN_FOLLOWVSYNC 0x1
77#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1 73#define HDMI_CORE_CTRL1_HEN_FOLLOWHSYNC 0x1
78#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1 74#define HDMI_CORE_CTRL1_BSEL_24BITBUS 0x1
79#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1 75#define HDMI_CORE_CTRL1_EDGE_RISINGEDGE 0x1
80 76
81/* HDMI DDC E-DID */ 77/* HDMI DDC E-DID */
82#define HDMI_CORE_DDC_CMD 0x3CC
83#define HDMI_CORE_DDC_STATUS 0x3C8
84#define HDMI_CORE_DDC_ADDR 0x3B4 78#define HDMI_CORE_DDC_ADDR 0x3B4
79#define HDMI_CORE_DDC_SEGM 0x3B8
85#define HDMI_CORE_DDC_OFFSET 0x3BC 80#define HDMI_CORE_DDC_OFFSET 0x3BC
86#define HDMI_CORE_DDC_COUNT1 0x3C0 81#define HDMI_CORE_DDC_COUNT1 0x3C0
87#define HDMI_CORE_DDC_COUNT2 0x3C4 82#define HDMI_CORE_DDC_COUNT2 0x3C4
83#define HDMI_CORE_DDC_STATUS 0x3C8
84#define HDMI_CORE_DDC_CMD 0x3CC
88#define HDMI_CORE_DDC_DATA 0x3D0 85#define HDMI_CORE_DDC_DATA 0x3D0
89#define HDMI_CORE_DDC_SEGM 0x3B8
90 86
91/* HDMI IP Core Audio Video */ 87/* HDMI IP Core Audio Video */
92 88
93#define HDMI_CORE_AV_HDMI_CTRL 0xBC
94#define HDMI_CORE_AV_DPD 0xF4
95#define HDMI_CORE_AV_PB_CTRL1 0xF8
96#define HDMI_CORE_AV_PB_CTRL2 0xFC
97#define HDMI_CORE_AV_AVI_TYPE 0x100
98#define HDMI_CORE_AV_AVI_VERS 0x104
99#define HDMI_CORE_AV_AVI_LEN 0x108
100#define HDMI_CORE_AV_AVI_CHSUM 0x10C
101#define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110)
102#define HDMI_CORE_AV_AVI_DBYTE_NELEMS 15
103#define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190)
104#define HDMI_CORE_AV_SPD_DBYTE_NELEMS 27
105#define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210)
106#define HDMI_CORE_AV_AUD_DBYTE_NELEMS 10
107#define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290)
108#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS 27
109#define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300)
110#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31
111#define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380)
112#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31
113#define HDMI_CORE_AV_ACR_CTRL 0x4 89#define HDMI_CORE_AV_ACR_CTRL 0x4
114#define HDMI_CORE_AV_FREQ_SVAL 0x8 90#define HDMI_CORE_AV_FREQ_SVAL 0x8
115#define HDMI_CORE_AV_N_SVAL1 0xC 91#define HDMI_CORE_AV_N_SVAL1 0xC
@@ -148,25 +124,39 @@
148#define HDMI_CORE_AV_AVI_VERS 0x104 124#define HDMI_CORE_AV_AVI_VERS 0x104
149#define HDMI_CORE_AV_AVI_LEN 0x108 125#define HDMI_CORE_AV_AVI_LEN 0x108
150#define HDMI_CORE_AV_AVI_CHSUM 0x10C 126#define HDMI_CORE_AV_AVI_CHSUM 0x10C
127#define HDMI_CORE_AV_AVI_DBYTE(n) (n * 4 + 0x110)
151#define HDMI_CORE_AV_SPD_TYPE 0x180 128#define HDMI_CORE_AV_SPD_TYPE 0x180
152#define HDMI_CORE_AV_SPD_VERS 0x184 129#define HDMI_CORE_AV_SPD_VERS 0x184
153#define HDMI_CORE_AV_SPD_LEN 0x188 130#define HDMI_CORE_AV_SPD_LEN 0x188
154#define HDMI_CORE_AV_SPD_CHSUM 0x18C 131#define HDMI_CORE_AV_SPD_CHSUM 0x18C
132#define HDMI_CORE_AV_SPD_DBYTE(n) (n * 4 + 0x190)
155#define HDMI_CORE_AV_AUDIO_TYPE 0x200 133#define HDMI_CORE_AV_AUDIO_TYPE 0x200
156#define HDMI_CORE_AV_AUDIO_VERS 0x204 134#define HDMI_CORE_AV_AUDIO_VERS 0x204
157#define HDMI_CORE_AV_AUDIO_LEN 0x208 135#define HDMI_CORE_AV_AUDIO_LEN 0x208
158#define HDMI_CORE_AV_AUDIO_CHSUM 0x20C 136#define HDMI_CORE_AV_AUDIO_CHSUM 0x20C
137#define HDMI_CORE_AV_AUD_DBYTE(n) (n * 4 + 0x210)
159#define HDMI_CORE_AV_MPEG_TYPE 0x280 138#define HDMI_CORE_AV_MPEG_TYPE 0x280
160#define HDMI_CORE_AV_MPEG_VERS 0x284 139#define HDMI_CORE_AV_MPEG_VERS 0x284
161#define HDMI_CORE_AV_MPEG_LEN 0x288 140#define HDMI_CORE_AV_MPEG_LEN 0x288
162#define HDMI_CORE_AV_MPEG_CHSUM 0x28C 141#define HDMI_CORE_AV_MPEG_CHSUM 0x28C
142#define HDMI_CORE_AV_MPEG_DBYTE(n) (n * 4 + 0x290)
143#define HDMI_CORE_AV_GEN_DBYTE(n) (n * 4 + 0x300)
163#define HDMI_CORE_AV_CP_BYTE1 0x37C 144#define HDMI_CORE_AV_CP_BYTE1 0x37C
145#define HDMI_CORE_AV_GEN2_DBYTE(n) (n * 4 + 0x380)
164#define HDMI_CORE_AV_CEC_ADDR_ID 0x3FC 146#define HDMI_CORE_AV_CEC_ADDR_ID 0x3FC
147
165#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4 148#define HDMI_CORE_AV_SPD_DBYTE_ELSIZE 0x4
166#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4 149#define HDMI_CORE_AV_GEN2_DBYTE_ELSIZE 0x4
167#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4 150#define HDMI_CORE_AV_MPEG_DBYTE_ELSIZE 0x4
168#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4 151#define HDMI_CORE_AV_GEN_DBYTE_ELSIZE 0x4
169 152
153#define HDMI_CORE_AV_AVI_DBYTE_NELEMS 15
154#define HDMI_CORE_AV_SPD_DBYTE_NELEMS 27
155#define HDMI_CORE_AV_AUD_DBYTE_NELEMS 10
156#define HDMI_CORE_AV_MPEG_DBYTE_NELEMS 27
157#define HDMI_CORE_AV_GEN_DBYTE_NELEMS 31
158#define HDMI_CORE_AV_GEN2_DBYTE_NELEMS 31
159
170/* PLL */ 160/* PLL */
171 161
172#define PLLCTRL_PLL_CONTROL 0x0 162#define PLLCTRL_PLL_CONTROL 0x0
@@ -284,35 +274,6 @@ enum hdmi_core_infoframe {
284 HDMI_INFOFRAME_AVI_DB5PR_8 = 7, 274 HDMI_INFOFRAME_AVI_DB5PR_8 = 7,
285 HDMI_INFOFRAME_AVI_DB5PR_9 = 8, 275 HDMI_INFOFRAME_AVI_DB5PR_9 = 8,
286 HDMI_INFOFRAME_AVI_DB5PR_10 = 9, 276 HDMI_INFOFRAME_AVI_DB5PR_10 = 9,
287 HDMI_INFOFRAME_AUDIO_DB1CT_FROM_STREAM = 0,
288 HDMI_INFOFRAME_AUDIO_DB1CT_IEC60958 = 1,
289 HDMI_INFOFRAME_AUDIO_DB1CT_AC3 = 2,
290 HDMI_INFOFRAME_AUDIO_DB1CT_MPEG1 = 3,
291 HDMI_INFOFRAME_AUDIO_DB1CT_MP3 = 4,
292 HDMI_INFOFRAME_AUDIO_DB1CT_MPEG2_MULTICH = 5,
293 HDMI_INFOFRAME_AUDIO_DB1CT_AAC = 6,
294 HDMI_INFOFRAME_AUDIO_DB1CT_DTS = 7,
295 HDMI_INFOFRAME_AUDIO_DB1CT_ATRAC = 8,
296 HDMI_INFOFRAME_AUDIO_DB1CT_ONEBIT = 9,
297 HDMI_INFOFRAME_AUDIO_DB1CT_DOLBY_DIGITAL_PLUS = 10,
298 HDMI_INFOFRAME_AUDIO_DB1CT_DTS_HD = 11,
299 HDMI_INFOFRAME_AUDIO_DB1CT_MAT = 12,
300 HDMI_INFOFRAME_AUDIO_DB1CT_DST = 13,
301 HDMI_INFOFRAME_AUDIO_DB1CT_WMA_PRO = 14,
302 HDMI_INFOFRAME_AUDIO_DB2SF_FROM_STREAM = 0,
303 HDMI_INFOFRAME_AUDIO_DB2SF_32000 = 1,
304 HDMI_INFOFRAME_AUDIO_DB2SF_44100 = 2,
305 HDMI_INFOFRAME_AUDIO_DB2SF_48000 = 3,
306 HDMI_INFOFRAME_AUDIO_DB2SF_88200 = 4,
307 HDMI_INFOFRAME_AUDIO_DB2SF_96000 = 5,
308 HDMI_INFOFRAME_AUDIO_DB2SF_176400 = 6,
309 HDMI_INFOFRAME_AUDIO_DB2SF_192000 = 7,
310 HDMI_INFOFRAME_AUDIO_DB2SS_FROM_STREAM = 0,
311 HDMI_INFOFRAME_AUDIO_DB2SS_16BIT = 1,
312 HDMI_INFOFRAME_AUDIO_DB2SS_20BIT = 2,
313 HDMI_INFOFRAME_AUDIO_DB2SS_24BIT = 3,
314 HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PERMITTED = 0,
315 HDMI_INFOFRAME_AUDIO_DB5_DM_INH_PROHIBITED = 1
316}; 277};
317 278
318enum hdmi_packing_mode { 279enum hdmi_packing_mode {
@@ -322,17 +283,6 @@ enum hdmi_packing_mode {
322 HDMI_PACK_ALREADYPACKED = 7 283 HDMI_PACK_ALREADYPACKED = 7
323}; 284};
324 285
325enum hdmi_core_audio_sample_freq {
326 HDMI_AUDIO_FS_32000 = 0x3,
327 HDMI_AUDIO_FS_44100 = 0x0,
328 HDMI_AUDIO_FS_48000 = 0x2,
329 HDMI_AUDIO_FS_88200 = 0x8,
330 HDMI_AUDIO_FS_96000 = 0xA,
331 HDMI_AUDIO_FS_176400 = 0xC,
332 HDMI_AUDIO_FS_192000 = 0xE,
333 HDMI_AUDIO_FS_NOT_INDICATED = 0x1
334};
335
336enum hdmi_core_audio_layout { 286enum hdmi_core_audio_layout {
337 HDMI_AUDIO_LAYOUT_2CH = 0, 287 HDMI_AUDIO_LAYOUT_2CH = 0,
338 HDMI_AUDIO_LAYOUT_8CH = 1 288 HDMI_AUDIO_LAYOUT_8CH = 1
@@ -387,37 +337,12 @@ enum hdmi_audio_blk_strt_end_sig {
387}; 337};
388 338
389enum hdmi_audio_i2s_config { 339enum hdmi_audio_i2s_config {
390 HDMI_AUDIO_I2S_WS_POLARITY_LOW_IS_LEFT = 0,
391 HDMI_AUDIO_I2S_WS_POLARIT_YLOW_IS_RIGHT = 1,
392 HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0, 340 HDMI_AUDIO_I2S_MSB_SHIFTED_FIRST = 0,
393 HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1, 341 HDMI_AUDIO_I2S_LSB_SHIFTED_FIRST = 1,
394 HDMI_AUDIO_I2S_MAX_WORD_20BITS = 0,
395 HDMI_AUDIO_I2S_MAX_WORD_24BITS = 1,
396 HDMI_AUDIO_I2S_CHST_WORD_NOT_SPECIFIED = 0,
397 HDMI_AUDIO_I2S_CHST_WORD_16_BITS = 1,
398 HDMI_AUDIO_I2S_CHST_WORD_17_BITS = 6,
399 HDMI_AUDIO_I2S_CHST_WORD_18_BITS = 2,
400 HDMI_AUDIO_I2S_CHST_WORD_19_BITS = 4,
401 HDMI_AUDIO_I2S_CHST_WORD_20_BITS_20MAX = 5,
402 HDMI_AUDIO_I2S_CHST_WORD_20_BITS_24MAX = 1,
403 HDMI_AUDIO_I2S_CHST_WORD_21_BITS = 6,
404 HDMI_AUDIO_I2S_CHST_WORD_22_BITS = 2,
405 HDMI_AUDIO_I2S_CHST_WORD_23_BITS = 4,
406 HDMI_AUDIO_I2S_CHST_WORD_24_BITS = 5,
407 HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0, 342 HDMI_AUDIO_I2S_SCK_EDGE_FALLING = 0,
408 HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1, 343 HDMI_AUDIO_I2S_SCK_EDGE_RISING = 1,
409 HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0, 344 HDMI_AUDIO_I2S_VBIT_FOR_PCM = 0,
410 HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1, 345 HDMI_AUDIO_I2S_VBIT_FOR_COMPRESSED = 1,
411 HDMI_AUDIO_I2S_INPUT_LENGTH_NA = 0,
412 HDMI_AUDIO_I2S_INPUT_LENGTH_16 = 2,
413 HDMI_AUDIO_I2S_INPUT_LENGTH_17 = 12,
414 HDMI_AUDIO_I2S_INPUT_LENGTH_18 = 4,
415 HDMI_AUDIO_I2S_INPUT_LENGTH_19 = 8,
416 HDMI_AUDIO_I2S_INPUT_LENGTH_20 = 10,
417 HDMI_AUDIO_I2S_INPUT_LENGTH_21 = 13,
418 HDMI_AUDIO_I2S_INPUT_LENGTH_22 = 5,
419 HDMI_AUDIO_I2S_INPUT_LENGTH_23 = 9,
420 HDMI_AUDIO_I2S_INPUT_LENGTH_24 = 11,
421 HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0, 346 HDMI_AUDIO_I2S_FIRST_BIT_SHIFT = 0,
422 HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1, 347 HDMI_AUDIO_I2S_FIRST_BIT_NO_SHIFT = 1,
423 HDMI_AUDIO_I2S_SD0_EN = 1, 348 HDMI_AUDIO_I2S_SD0_EN = 1,
@@ -446,20 +371,6 @@ struct hdmi_core_video_config {
446 enum hdmi_core_tclkselclkmult tclk_sel_clkmult; 371 enum hdmi_core_tclkselclkmult tclk_sel_clkmult;
447}; 372};
448 373
449/*
450 * Refer to section 8.2 in HDMI 1.3 specification for
451 * details about infoframe databytes
452 */
453struct hdmi_core_infoframe_audio {
454 u8 db1_coding_type;
455 u8 db1_channel_count;
456 u8 db2_sample_freq;
457 u8 db2_sample_size;
458 u8 db4_channel_alloc;
459 bool db5_downmix_inh;
460 u8 db5_lsv; /* Level shift values for downmix */
461};
462
463struct hdmi_core_packet_enable_repeat { 374struct hdmi_core_packet_enable_repeat {
464 u32 audio_pkt; 375 u32 audio_pkt;
465 u32 audio_pkt_repeat; 376 u32 audio_pkt_repeat;
@@ -496,15 +407,10 @@ struct hdmi_audio_dma {
496}; 407};
497 408
498struct hdmi_core_audio_i2s_config { 409struct hdmi_core_audio_i2s_config {
499 u8 word_max_length;
500 u8 word_length;
501 u8 in_length_bits; 410 u8 in_length_bits;
502 u8 justification; 411 u8 justification;
503 u8 en_high_bitrate_aud;
504 u8 sck_edge_mode; 412 u8 sck_edge_mode;
505 u8 cbit_order;
506 u8 vbit; 413 u8 vbit;
507 u8 ws_polarity;
508 u8 direction; 414 u8 direction;
509 u8 shift; 415 u8 shift;
510 u8 active_sds; 416 u8 active_sds;
@@ -512,7 +418,7 @@ struct hdmi_core_audio_i2s_config {
512 418
513struct hdmi_core_audio_config { 419struct hdmi_core_audio_config {
514 struct hdmi_core_audio_i2s_config i2s_cfg; 420 struct hdmi_core_audio_i2s_config i2s_cfg;
515 enum hdmi_core_audio_sample_freq freq_sample; 421 struct snd_aes_iec958 *iec60958_cfg;
516 bool fs_override; 422 bool fs_override;
517 u32 n; 423 u32 n;
518 u32 cts; 424 u32 cts;
@@ -527,17 +433,4 @@ struct hdmi_core_audio_config {
527 bool en_spdif; 433 bool en_spdif;
528}; 434};
529 435
530#if defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) || \
531 defined(CONFIG_SND_OMAP_SOC_OMAP4_HDMI_MODULE)
532int hdmi_config_audio_acr(struct hdmi_ip_data *ip_data,
533 u32 sample_freq, u32 *n, u32 *cts);
534void hdmi_core_audio_infoframe_config(struct hdmi_ip_data *ip_data,
535 struct hdmi_core_infoframe_audio *info_aud);
536void hdmi_core_audio_config(struct hdmi_ip_data *ip_data,
537 struct hdmi_core_audio_config *cfg);
538void hdmi_wp_audio_config_dma(struct hdmi_ip_data *ip_data,
539 struct hdmi_audio_dma *aud_dma);
540void hdmi_wp_audio_config_format(struct hdmi_ip_data *ip_data,
541 struct hdmi_audio_format *aud_fmt);
542#endif
543#endif 436#endif
diff --git a/drivers/video/omap2/dss/venc.c b/drivers/video/omap2/dss/venc.c
index 9c3daf71750c..2b8973931ff4 100644
--- a/drivers/video/omap2/dss/venc.c
+++ b/drivers/video/omap2/dss/venc.c
@@ -415,6 +415,7 @@ static const struct venc_config *venc_timings_to_config(
415 return &venc_config_ntsc_trm; 415 return &venc_config_ntsc_trm;
416 416
417 BUG(); 417 BUG();
418 return NULL;
418} 419}
419 420
420static int venc_power_on(struct omap_dss_device *dssdev) 421static int venc_power_on(struct omap_dss_device *dssdev)
@@ -440,10 +441,11 @@ static int venc_power_on(struct omap_dss_device *dssdev)
440 441
441 venc_write_reg(VENC_OUTPUT_CONTROL, l); 442 venc_write_reg(VENC_OUTPUT_CONTROL, l);
442 443
443 dispc_set_digit_size(dssdev->panel.timings.x_res, 444 dss_mgr_set_timings(dssdev->manager, &dssdev->panel.timings);
444 dssdev->panel.timings.y_res/2);
445 445
446 regulator_enable(venc.vdda_dac_reg); 446 r = regulator_enable(venc.vdda_dac_reg);
447 if (r)
448 goto err;
447 449
448 if (dssdev->platform_enable) 450 if (dssdev->platform_enable)
449 dssdev->platform_enable(dssdev); 451 dssdev->platform_enable(dssdev);
@@ -485,16 +487,68 @@ unsigned long venc_get_pixel_clock(void)
485 return 13500000; 487 return 13500000;
486} 488}
487 489
490static ssize_t display_output_type_show(struct device *dev,
491 struct device_attribute *attr, char *buf)
492{
493 struct omap_dss_device *dssdev = to_dss_device(dev);
494 const char *ret;
495
496 switch (dssdev->phy.venc.type) {
497 case OMAP_DSS_VENC_TYPE_COMPOSITE:
498 ret = "composite";
499 break;
500 case OMAP_DSS_VENC_TYPE_SVIDEO:
501 ret = "svideo";
502 break;
503 default:
504 return -EINVAL;
505 }
506
507 return snprintf(buf, PAGE_SIZE, "%s\n", ret);
508}
509
510static ssize_t display_output_type_store(struct device *dev,
511 struct device_attribute *attr, const char *buf, size_t size)
512{
513 struct omap_dss_device *dssdev = to_dss_device(dev);
514 enum omap_dss_venc_type new_type;
515
516 if (sysfs_streq("composite", buf))
517 new_type = OMAP_DSS_VENC_TYPE_COMPOSITE;
518 else if (sysfs_streq("svideo", buf))
519 new_type = OMAP_DSS_VENC_TYPE_SVIDEO;
520 else
521 return -EINVAL;
522
523 mutex_lock(&venc.venc_lock);
524
525 if (dssdev->phy.venc.type != new_type) {
526 dssdev->phy.venc.type = new_type;
527 if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
528 venc_power_off(dssdev);
529 venc_power_on(dssdev);
530 }
531 }
532
533 mutex_unlock(&venc.venc_lock);
534
535 return size;
536}
537
538static DEVICE_ATTR(output_type, S_IRUGO | S_IWUSR,
539 display_output_type_show, display_output_type_store);
540
488/* driver */ 541/* driver */
489static int venc_panel_probe(struct omap_dss_device *dssdev) 542static int venc_panel_probe(struct omap_dss_device *dssdev)
490{ 543{
491 dssdev->panel.timings = omap_dss_pal_timings; 544 dssdev->panel.timings = omap_dss_pal_timings;
492 545
493 return 0; 546 return device_create_file(&dssdev->dev, &dev_attr_output_type);
494} 547}
495 548
496static void venc_panel_remove(struct omap_dss_device *dssdev) 549static void venc_panel_remove(struct omap_dss_device *dssdev)
497{ 550{
551 device_remove_file(&dssdev->dev, &dev_attr_output_type);
498} 552}
499 553
500static int venc_panel_enable(struct omap_dss_device *dssdev) 554static int venc_panel_enable(struct omap_dss_device *dssdev)
@@ -577,12 +631,6 @@ static int venc_panel_resume(struct omap_dss_device *dssdev)
577 return venc_panel_enable(dssdev); 631 return venc_panel_enable(dssdev);
578} 632}
579 633
580static void venc_get_timings(struct omap_dss_device *dssdev,
581 struct omap_video_timings *timings)
582{
583 *timings = dssdev->panel.timings;
584}
585
586static void venc_set_timings(struct omap_dss_device *dssdev, 634static void venc_set_timings(struct omap_dss_device *dssdev,
587 struct omap_video_timings *timings) 635 struct omap_video_timings *timings)
588{ 636{
@@ -597,6 +645,8 @@ static void venc_set_timings(struct omap_dss_device *dssdev,
597 /* turn the venc off and on to get new timings to use */ 645 /* turn the venc off and on to get new timings to use */
598 venc_panel_disable(dssdev); 646 venc_panel_disable(dssdev);
599 venc_panel_enable(dssdev); 647 venc_panel_enable(dssdev);
648 } else {
649 dss_mgr_set_timings(dssdev->manager, timings);
600 } 650 }
601} 651}
602 652
@@ -661,7 +711,6 @@ static struct omap_dss_driver venc_driver = {
661 .get_resolution = omapdss_default_get_resolution, 711 .get_resolution = omapdss_default_get_resolution,
662 .get_recommended_bpp = omapdss_default_get_recommended_bpp, 712 .get_recommended_bpp = omapdss_default_get_recommended_bpp,
663 713
664 .get_timings = venc_get_timings,
665 .set_timings = venc_set_timings, 714 .set_timings = venc_set_timings,
666 .check_timings = venc_check_timings, 715 .check_timings = venc_check_timings,
667 716
@@ -675,7 +724,7 @@ static struct omap_dss_driver venc_driver = {
675}; 724};
676/* driver end */ 725/* driver end */
677 726
678int venc_init_display(struct omap_dss_device *dssdev) 727static int __init venc_init_display(struct omap_dss_device *dssdev)
679{ 728{
680 DSSDBG("init_display\n"); 729 DSSDBG("init_display\n");
681 730
@@ -695,7 +744,7 @@ int venc_init_display(struct omap_dss_device *dssdev)
695 return 0; 744 return 0;
696} 745}
697 746
698void venc_dump_regs(struct seq_file *s) 747static void venc_dump_regs(struct seq_file *s)
699{ 748{
700#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r)) 749#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r))
701 750
@@ -779,8 +828,32 @@ static void venc_put_clocks(void)
779 clk_put(venc.tv_dac_clk); 828 clk_put(venc.tv_dac_clk);
780} 829}
781 830
831static void __init venc_probe_pdata(struct platform_device *pdev)
832{
833 struct omap_dss_board_info *pdata = pdev->dev.platform_data;
834 int r, i;
835
836 for (i = 0; i < pdata->num_devices; ++i) {
837 struct omap_dss_device *dssdev = pdata->devices[i];
838
839 if (dssdev->type != OMAP_DISPLAY_TYPE_VENC)
840 continue;
841
842 r = venc_init_display(dssdev);
843 if (r) {
844 DSSERR("device %s init failed: %d\n", dssdev->name, r);
845 continue;
846 }
847
848 r = omap_dss_register_device(dssdev, &pdev->dev, i);
849 if (r)
850 DSSERR("device %s register failed: %d\n",
851 dssdev->name, r);
852 }
853}
854
782/* VENC HW IP initialisation */ 855/* VENC HW IP initialisation */
783static int omap_venchw_probe(struct platform_device *pdev) 856static int __init omap_venchw_probe(struct platform_device *pdev)
784{ 857{
785 u8 rev_id; 858 u8 rev_id;
786 struct resource *venc_mem; 859 struct resource *venc_mem;
@@ -824,6 +897,10 @@ static int omap_venchw_probe(struct platform_device *pdev)
824 if (r) 897 if (r)
825 goto err_reg_panel_driver; 898 goto err_reg_panel_driver;
826 899
900 dss_debugfs_create_file("venc", venc_dump_regs);
901
902 venc_probe_pdata(pdev);
903
827 return 0; 904 return 0;
828 905
829err_reg_panel_driver: 906err_reg_panel_driver:
@@ -833,12 +910,15 @@ err_runtime_get:
833 return r; 910 return r;
834} 911}
835 912
836static int omap_venchw_remove(struct platform_device *pdev) 913static int __exit omap_venchw_remove(struct platform_device *pdev)
837{ 914{
915 omap_dss_unregister_child_devices(&pdev->dev);
916
838 if (venc.vdda_dac_reg != NULL) { 917 if (venc.vdda_dac_reg != NULL) {
839 regulator_put(venc.vdda_dac_reg); 918 regulator_put(venc.vdda_dac_reg);
840 venc.vdda_dac_reg = NULL; 919 venc.vdda_dac_reg = NULL;
841 } 920 }
921
842 omap_dss_unregister_driver(&venc_driver); 922 omap_dss_unregister_driver(&venc_driver);
843 923
844 pm_runtime_disable(&pdev->dev); 924 pm_runtime_disable(&pdev->dev);
@@ -853,7 +933,6 @@ static int venc_runtime_suspend(struct device *dev)
853 clk_disable(venc.tv_dac_clk); 933 clk_disable(venc.tv_dac_clk);
854 934
855 dispc_runtime_put(); 935 dispc_runtime_put();
856 dss_runtime_put();
857 936
858 return 0; 937 return 0;
859} 938}
@@ -862,23 +941,14 @@ static int venc_runtime_resume(struct device *dev)
862{ 941{
863 int r; 942 int r;
864 943
865 r = dss_runtime_get();
866 if (r < 0)
867 goto err_get_dss;
868
869 r = dispc_runtime_get(); 944 r = dispc_runtime_get();
870 if (r < 0) 945 if (r < 0)
871 goto err_get_dispc; 946 return r;
872 947
873 if (venc.tv_dac_clk) 948 if (venc.tv_dac_clk)
874 clk_enable(venc.tv_dac_clk); 949 clk_enable(venc.tv_dac_clk);
875 950
876 return 0; 951 return 0;
877
878err_get_dispc:
879 dss_runtime_put();
880err_get_dss:
881 return r;
882} 952}
883 953
884static const struct dev_pm_ops venc_pm_ops = { 954static const struct dev_pm_ops venc_pm_ops = {
@@ -887,8 +957,7 @@ static const struct dev_pm_ops venc_pm_ops = {
887}; 957};
888 958
889static struct platform_driver omap_venchw_driver = { 959static struct platform_driver omap_venchw_driver = {
890 .probe = omap_venchw_probe, 960 .remove = __exit_p(omap_venchw_remove),
891 .remove = omap_venchw_remove,
892 .driver = { 961 .driver = {
893 .name = "omapdss_venc", 962 .name = "omapdss_venc",
894 .owner = THIS_MODULE, 963 .owner = THIS_MODULE,
@@ -896,18 +965,18 @@ static struct platform_driver omap_venchw_driver = {
896 }, 965 },
897}; 966};
898 967
899int venc_init_platform_driver(void) 968int __init venc_init_platform_driver(void)
900{ 969{
901 if (cpu_is_omap44xx()) 970 if (cpu_is_omap44xx())
902 return 0; 971 return 0;
903 972
904 return platform_driver_register(&omap_venchw_driver); 973 return platform_driver_probe(&omap_venchw_driver, omap_venchw_probe);
905} 974}
906 975
907void venc_uninit_platform_driver(void) 976void __exit venc_uninit_platform_driver(void)
908{ 977{
909 if (cpu_is_omap44xx()) 978 if (cpu_is_omap44xx())
910 return; 979 return;
911 980
912 return platform_driver_unregister(&omap_venchw_driver); 981 platform_driver_unregister(&omap_venchw_driver);
913} 982}
diff --git a/drivers/video/omap2/omapfb/omapfb-ioctl.c b/drivers/video/omap2/omapfb/omapfb-ioctl.c
index 6a09ef87e14f..c6cf372d22c5 100644
--- a/drivers/video/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/omap2/omapfb/omapfb-ioctl.c
@@ -70,7 +70,7 @@ static int omapfb_setup_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
70 70
71 DBG("omapfb_setup_plane\n"); 71 DBG("omapfb_setup_plane\n");
72 72
73 if (ofbi->num_overlays != 1) { 73 if (ofbi->num_overlays == 0) {
74 r = -EINVAL; 74 r = -EINVAL;
75 goto out; 75 goto out;
76 } 76 }
@@ -185,7 +185,7 @@ static int omapfb_query_plane(struct fb_info *fbi, struct omapfb_plane_info *pi)
185{ 185{
186 struct omapfb_info *ofbi = FB2OFB(fbi); 186 struct omapfb_info *ofbi = FB2OFB(fbi);
187 187
188 if (ofbi->num_overlays != 1) { 188 if (ofbi->num_overlays == 0) {
189 memset(pi, 0, sizeof(*pi)); 189 memset(pi, 0, sizeof(*pi));
190 } else { 190 } else {
191 struct omap_overlay *ovl; 191 struct omap_overlay *ovl;
@@ -225,6 +225,9 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
225 down_write_nested(&rg->lock, rg->id); 225 down_write_nested(&rg->lock, rg->id);
226 atomic_inc(&rg->lock_count); 226 atomic_inc(&rg->lock_count);
227 227
228 if (rg->size == size && rg->type == mi->type)
229 goto out;
230
228 if (atomic_read(&rg->map_count)) { 231 if (atomic_read(&rg->map_count)) {
229 r = -EBUSY; 232 r = -EBUSY;
230 goto out; 233 goto out;
@@ -247,12 +250,10 @@ static int omapfb_setup_mem(struct fb_info *fbi, struct omapfb_mem_info *mi)
247 } 250 }
248 } 251 }
249 252
250 if (rg->size != size || rg->type != mi->type) { 253 r = omapfb_realloc_fbmem(fbi, size, mi->type);
251 r = omapfb_realloc_fbmem(fbi, size, mi->type); 254 if (r) {
252 if (r) { 255 dev_err(fbdev->dev, "realloc fbmem failed\n");
253 dev_err(fbdev->dev, "realloc fbmem failed\n"); 256 goto out;
254 goto out;
255 }
256 } 257 }
257 258
258 out: 259 out:
diff --git a/drivers/video/omap2/omapfb/omapfb-main.c b/drivers/video/omap2/omapfb/omapfb-main.c
index b00db4068d21..3450ea0966c9 100644
--- a/drivers/video/omap2/omapfb/omapfb-main.c
+++ b/drivers/video/omap2/omapfb/omapfb-main.c
@@ -179,6 +179,7 @@ static unsigned omapfb_get_vrfb_offset(const struct omapfb_info *ofbi, int rot)
179 break; 179 break;
180 default: 180 default:
181 BUG(); 181 BUG();
182 return 0;
182 } 183 }
183 184
184 offset *= vrfb->bytespp; 185 offset *= vrfb->bytespp;
@@ -1502,7 +1503,7 @@ static int omapfb_parse_vram_param(const char *param, int max_entries,
1502 1503
1503 fbnum = simple_strtoul(p, &p, 10); 1504 fbnum = simple_strtoul(p, &p, 10);
1504 1505
1505 if (p == param) 1506 if (p == start)
1506 return -EINVAL; 1507 return -EINVAL;
1507 1508
1508 if (*p != ':') 1509 if (*p != ':')
@@ -2307,7 +2308,7 @@ static int omapfb_init_display(struct omapfb2_device *fbdev,
2307 return 0; 2308 return 0;
2308} 2309}
2309 2310
2310static int omapfb_probe(struct platform_device *pdev) 2311static int __init omapfb_probe(struct platform_device *pdev)
2311{ 2312{
2312 struct omapfb2_device *fbdev = NULL; 2313 struct omapfb2_device *fbdev = NULL;
2313 int r = 0; 2314 int r = 0;
@@ -2448,7 +2449,7 @@ err0:
2448 return r; 2449 return r;
2449} 2450}
2450 2451
2451static int omapfb_remove(struct platform_device *pdev) 2452static int __exit omapfb_remove(struct platform_device *pdev)
2452{ 2453{
2453 struct omapfb2_device *fbdev = platform_get_drvdata(pdev); 2454 struct omapfb2_device *fbdev = platform_get_drvdata(pdev);
2454 2455
@@ -2462,8 +2463,7 @@ static int omapfb_remove(struct platform_device *pdev)
2462} 2463}
2463 2464
2464static struct platform_driver omapfb_driver = { 2465static struct platform_driver omapfb_driver = {
2465 .probe = omapfb_probe, 2466 .remove = __exit_p(omapfb_remove),
2466 .remove = omapfb_remove,
2467 .driver = { 2467 .driver = {
2468 .name = "omapfb", 2468 .name = "omapfb",
2469 .owner = THIS_MODULE, 2469 .owner = THIS_MODULE,
@@ -2474,7 +2474,7 @@ static int __init omapfb_init(void)
2474{ 2474{
2475 DBG("omapfb_init\n"); 2475 DBG("omapfb_init\n");
2476 2476
2477 if (platform_driver_register(&omapfb_driver)) { 2477 if (platform_driver_probe(&omapfb_driver, omapfb_probe)) {
2478 printk(KERN_ERR "failed to register omapfb driver\n"); 2478 printk(KERN_ERR "failed to register omapfb driver\n");
2479 return -ENODEV; 2479 return -ENODEV;
2480 } 2480 }
diff --git a/drivers/video/omap2/omapfb/omapfb.h b/drivers/video/omap2/omapfb/omapfb.h
index c0bdc9b54ecf..30361a09aecd 100644
--- a/drivers/video/omap2/omapfb/omapfb.h
+++ b/drivers/video/omap2/omapfb/omapfb.h
@@ -166,6 +166,7 @@ static inline struct omapfb_display_data *get_display_data(
166 166
167 /* This should never happen */ 167 /* This should never happen */
168 BUG(); 168 BUG();
169 return NULL;
169} 170}
170 171
171static inline void omapfb_lock(struct omapfb2_device *fbdev) 172static inline void omapfb_lock(struct omapfb2_device *fbdev)
diff --git a/drivers/video/omap2/vrfb.c b/drivers/video/omap2/vrfb.c
index 4e5b960c32c8..7e990220ad2a 100644
--- a/drivers/video/omap2/vrfb.c
+++ b/drivers/video/omap2/vrfb.c
@@ -179,8 +179,10 @@ void omap_vrfb_setup(struct vrfb *vrfb, unsigned long paddr,
179 pixel_size_exp = 2; 179 pixel_size_exp = 2;
180 else if (bytespp == 2) 180 else if (bytespp == 2)
181 pixel_size_exp = 1; 181 pixel_size_exp = 1;
182 else 182 else {
183 BUG(); 183 BUG();
184 return;
185 }
184 186
185 vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp; 187 vrfb_width = ALIGN(width * bytespp, VRFB_PAGE_WIDTH) / bytespp;
186 vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT); 188 vrfb_height = ALIGN(height, VRFB_PAGE_HEIGHT);
diff --git a/drivers/video/pxa3xx-gcu.c b/drivers/video/pxa3xx-gcu.c
index 1d71c08a818f..0b4ae0cebeda 100644
--- a/drivers/video/pxa3xx-gcu.c
+++ b/drivers/video/pxa3xx-gcu.c
@@ -316,12 +316,9 @@ pxa3xx_gcu_wait_idle(struct pxa3xx_gcu_priv *priv)
316 ret = wait_event_interruptible_timeout(priv->wait_idle, 316 ret = wait_event_interruptible_timeout(priv->wait_idle,
317 !priv->shared->hw_running, HZ*4); 317 !priv->shared->hw_running, HZ*4);
318 318
319 if (ret < 0) 319 if (ret != 0)
320 break; 320 break;
321 321
322 if (ret > 0)
323 continue;
324
325 if (gc_readl(priv, REG_GCRBEXHR) == rbexhr && 322 if (gc_readl(priv, REG_GCRBEXHR) == rbexhr &&
326 priv->shared->num_interrupts == num) { 323 priv->shared->num_interrupts == num) {
327 QERROR("TIMEOUT"); 324 QERROR("TIMEOUT");
diff --git a/drivers/video/s3c-fb.c b/drivers/video/s3c-fb.c
index f3105160bf98..ea7b661e7229 100644
--- a/drivers/video/s3c-fb.c
+++ b/drivers/video/s3c-fb.c
@@ -47,7 +47,7 @@
47#ifdef CONFIG_FB_S3C_DEBUG_REGWRITE 47#ifdef CONFIG_FB_S3C_DEBUG_REGWRITE
48#undef writel 48#undef writel
49#define writel(v, r) do { \ 49#define writel(v, r) do { \
50 printk(KERN_DEBUG "%s: %08x => %p\n", __func__, (unsigned int)v, r); \ 50 pr_debug("%s: %08x => %p\n", __func__, (unsigned int)v, r); \
51 __raw_writel(v, r); \ 51 __raw_writel(v, r); \
52} while (0) 52} while (0)
53#endif /* FB_S3C_DEBUG_REGWRITE */ 53#endif /* FB_S3C_DEBUG_REGWRITE */
@@ -361,7 +361,7 @@ static int s3c_fb_calc_pixclk(struct s3c_fb *sfb, unsigned int pixclk)
361 result = (unsigned int)tmp / 1000; 361 result = (unsigned int)tmp / 1000;
362 362
363 dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n", 363 dev_dbg(sfb->dev, "pixclk=%u, clk=%lu, div=%d (%lu)\n",
364 pixclk, clk, result, clk / result); 364 pixclk, clk, result, result ? clk / result : clk);
365 365
366 return result; 366 return result;
367} 367}
@@ -495,7 +495,6 @@ static int s3c_fb_set_par(struct fb_info *info)
495 u32 alpha = 0; 495 u32 alpha = 0;
496 u32 data; 496 u32 data;
497 u32 pagewidth; 497 u32 pagewidth;
498 int clkdiv;
499 498
500 dev_dbg(sfb->dev, "setting framebuffer parameters\n"); 499 dev_dbg(sfb->dev, "setting framebuffer parameters\n");
501 500
@@ -532,48 +531,9 @@ static int s3c_fb_set_par(struct fb_info *info)
532 /* disable the window whilst we update it */ 531 /* disable the window whilst we update it */
533 writel(0, regs + WINCON(win_no)); 532 writel(0, regs + WINCON(win_no));
534 533
535 /* use platform specified window as the basis for the lcd timings */ 534 if (!sfb->output_on)
536
537 if (win_no == sfb->pdata->default_win) {
538 clkdiv = s3c_fb_calc_pixclk(sfb, var->pixclock);
539
540 data = sfb->pdata->vidcon0;
541 data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
542
543 if (clkdiv > 1)
544 data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
545 else
546 data &= ~VIDCON0_CLKDIR; /* 1:1 clock */
547
548 /* write the timing data to the panel */
549
550 if (sfb->variant.is_2443)
551 data |= (1 << 5);
552
553 writel(data, regs + VIDCON0);
554
555 s3c_fb_enable(sfb, 1); 535 s3c_fb_enable(sfb, 1);
556 536
557 data = VIDTCON0_VBPD(var->upper_margin - 1) |
558 VIDTCON0_VFPD(var->lower_margin - 1) |
559 VIDTCON0_VSPW(var->vsync_len - 1);
560
561 writel(data, regs + sfb->variant.vidtcon);
562
563 data = VIDTCON1_HBPD(var->left_margin - 1) |
564 VIDTCON1_HFPD(var->right_margin - 1) |
565 VIDTCON1_HSPW(var->hsync_len - 1);
566
567 /* VIDTCON1 */
568 writel(data, regs + sfb->variant.vidtcon + 4);
569
570 data = VIDTCON2_LINEVAL(var->yres - 1) |
571 VIDTCON2_HOZVAL(var->xres - 1) |
572 VIDTCON2_LINEVAL_E(var->yres - 1) |
573 VIDTCON2_HOZVAL_E(var->xres - 1);
574 writel(data, regs + sfb->variant.vidtcon + 8);
575 }
576
577 /* write the buffer address */ 537 /* write the buffer address */
578 538
579 /* start and end registers stride is 8 */ 539 /* start and end registers stride is 8 */
@@ -839,6 +799,7 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
839 struct s3c_fb *sfb = win->parent; 799 struct s3c_fb *sfb = win->parent;
840 unsigned int index = win->index; 800 unsigned int index = win->index;
841 u32 wincon; 801 u32 wincon;
802 u32 output_on = sfb->output_on;
842 803
843 dev_dbg(sfb->dev, "blank mode %d\n", blank_mode); 804 dev_dbg(sfb->dev, "blank mode %d\n", blank_mode);
844 805
@@ -877,34 +838,18 @@ static int s3c_fb_blank(int blank_mode, struct fb_info *info)
877 838
878 shadow_protect_win(win, 1); 839 shadow_protect_win(win, 1);
879 writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4)); 840 writel(wincon, sfb->regs + sfb->variant.wincon + (index * 4));
880 shadow_protect_win(win, 0);
881 841
882 /* Check the enabled state to see if we need to be running the 842 /* Check the enabled state to see if we need to be running the
883 * main LCD interface, as if there are no active windows then 843 * main LCD interface, as if there are no active windows then
884 * it is highly likely that we also do not need to output 844 * it is highly likely that we also do not need to output
885 * anything. 845 * anything.
886 */ 846 */
887 847 s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
888 /* We could do something like the following code, but the current 848 shadow_protect_win(win, 0);
889 * system of using framebuffer events means that we cannot make
890 * the distinction between just window 0 being inactive and all
891 * the windows being down.
892 *
893 * s3c_fb_enable(sfb, sfb->enabled ? 1 : 0);
894 */
895
896 /* we're stuck with this until we can do something about overriding
897 * the power control using the blanking event for a single fb.
898 */
899 if (index == sfb->pdata->default_win) {
900 shadow_protect_win(win, 1);
901 s3c_fb_enable(sfb, blank_mode != FB_BLANK_POWERDOWN ? 1 : 0);
902 shadow_protect_win(win, 0);
903 }
904 849
905 pm_runtime_put_sync(sfb->dev); 850 pm_runtime_put_sync(sfb->dev);
906 851
907 return 0; 852 return output_on == sfb->output_on;
908} 853}
909 854
910/** 855/**
@@ -1111,7 +1056,7 @@ static struct fb_ops s3c_fb_ops = {
1111 * 1056 *
1112 * Calculate the pixel clock when none has been given through platform data. 1057 * Calculate the pixel clock when none has been given through platform data.
1113 */ 1058 */
1114static void __devinit s3c_fb_missing_pixclock(struct fb_videomode *mode) 1059static void s3c_fb_missing_pixclock(struct fb_videomode *mode)
1115{ 1060{
1116 u64 pixclk = 1000000000000ULL; 1061 u64 pixclk = 1000000000000ULL;
1117 u32 div; 1062 u32 div;
@@ -1144,11 +1089,11 @@ static int __devinit s3c_fb_alloc_memory(struct s3c_fb *sfb,
1144 1089
1145 dev_dbg(sfb->dev, "allocating memory for display\n"); 1090 dev_dbg(sfb->dev, "allocating memory for display\n");
1146 1091
1147 real_size = windata->win_mode.xres * windata->win_mode.yres; 1092 real_size = windata->xres * windata->yres;
1148 virt_size = windata->virtual_x * windata->virtual_y; 1093 virt_size = windata->virtual_x * windata->virtual_y;
1149 1094
1150 dev_dbg(sfb->dev, "real_size=%u (%u.%u), virt_size=%u (%u.%u)\n", 1095 dev_dbg(sfb->dev, "real_size=%u (%u.%u), virt_size=%u (%u.%u)\n",
1151 real_size, windata->win_mode.xres, windata->win_mode.yres, 1096 real_size, windata->xres, windata->yres,
1152 virt_size, windata->virtual_x, windata->virtual_y); 1097 virt_size, windata->virtual_x, windata->virtual_y);
1153 1098
1154 size = (real_size > virt_size) ? real_size : virt_size; 1099 size = (real_size > virt_size) ? real_size : virt_size;
@@ -1230,7 +1175,7 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
1230 struct s3c_fb_win **res) 1175 struct s3c_fb_win **res)
1231{ 1176{
1232 struct fb_var_screeninfo *var; 1177 struct fb_var_screeninfo *var;
1233 struct fb_videomode *initmode; 1178 struct fb_videomode initmode;
1234 struct s3c_fb_pd_win *windata; 1179 struct s3c_fb_pd_win *windata;
1235 struct s3c_fb_win *win; 1180 struct s3c_fb_win *win;
1236 struct fb_info *fbinfo; 1181 struct fb_info *fbinfo;
@@ -1251,11 +1196,11 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
1251 } 1196 }
1252 1197
1253 windata = sfb->pdata->win[win_no]; 1198 windata = sfb->pdata->win[win_no];
1254 initmode = &windata->win_mode; 1199 initmode = *sfb->pdata->vtiming;
1255 1200
1256 WARN_ON(windata->max_bpp == 0); 1201 WARN_ON(windata->max_bpp == 0);
1257 WARN_ON(windata->win_mode.xres == 0); 1202 WARN_ON(windata->xres == 0);
1258 WARN_ON(windata->win_mode.yres == 0); 1203 WARN_ON(windata->yres == 0);
1259 1204
1260 win = fbinfo->par; 1205 win = fbinfo->par;
1261 *res = win; 1206 *res = win;
@@ -1294,7 +1239,9 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
1294 } 1239 }
1295 1240
1296 /* setup the initial video mode from the window */ 1241 /* setup the initial video mode from the window */
1297 fb_videomode_to_var(&fbinfo->var, initmode); 1242 initmode.xres = windata->xres;
1243 initmode.yres = windata->yres;
1244 fb_videomode_to_var(&fbinfo->var, &initmode);
1298 1245
1299 fbinfo->fix.type = FB_TYPE_PACKED_PIXELS; 1246 fbinfo->fix.type = FB_TYPE_PACKED_PIXELS;
1300 fbinfo->fix.accel = FB_ACCEL_NONE; 1247 fbinfo->fix.accel = FB_ACCEL_NONE;
@@ -1339,6 +1286,53 @@ static int __devinit s3c_fb_probe_win(struct s3c_fb *sfb, unsigned int win_no,
1339} 1286}
1340 1287
1341/** 1288/**
1289 * s3c_fb_set_rgb_timing() - set video timing for rgb interface.
1290 * @sfb: The base resources for the hardware.
1291 *
1292 * Set horizontal and vertical lcd rgb interface timing.
1293 */
1294static void s3c_fb_set_rgb_timing(struct s3c_fb *sfb)
1295{
1296 struct fb_videomode *vmode = sfb->pdata->vtiming;
1297 void __iomem *regs = sfb->regs;
1298 int clkdiv;
1299 u32 data;
1300
1301 if (!vmode->pixclock)
1302 s3c_fb_missing_pixclock(vmode);
1303
1304 clkdiv = s3c_fb_calc_pixclk(sfb, vmode->pixclock);
1305
1306 data = sfb->pdata->vidcon0;
1307 data &= ~(VIDCON0_CLKVAL_F_MASK | VIDCON0_CLKDIR);
1308
1309 if (clkdiv > 1)
1310 data |= VIDCON0_CLKVAL_F(clkdiv-1) | VIDCON0_CLKDIR;
1311 else
1312 data &= ~VIDCON0_CLKDIR; /* 1:1 clock */
1313
1314 if (sfb->variant.is_2443)
1315 data |= (1 << 5);
1316 writel(data, regs + VIDCON0);
1317
1318 data = VIDTCON0_VBPD(vmode->upper_margin - 1) |
1319 VIDTCON0_VFPD(vmode->lower_margin - 1) |
1320 VIDTCON0_VSPW(vmode->vsync_len - 1);
1321 writel(data, regs + sfb->variant.vidtcon);
1322
1323 data = VIDTCON1_HBPD(vmode->left_margin - 1) |
1324 VIDTCON1_HFPD(vmode->right_margin - 1) |
1325 VIDTCON1_HSPW(vmode->hsync_len - 1);
1326 writel(data, regs + sfb->variant.vidtcon + 4);
1327
1328 data = VIDTCON2_LINEVAL(vmode->yres - 1) |
1329 VIDTCON2_HOZVAL(vmode->xres - 1) |
1330 VIDTCON2_LINEVAL_E(vmode->yres - 1) |
1331 VIDTCON2_HOZVAL_E(vmode->xres - 1);
1332 writel(data, regs + sfb->variant.vidtcon + 8);
1333}
1334
1335/**
1342 * s3c_fb_clear_win() - clear hardware window registers. 1336 * s3c_fb_clear_win() - clear hardware window registers.
1343 * @sfb: The base resources for the hardware. 1337 * @sfb: The base resources for the hardware.
1344 * @win: The window to process. 1338 * @win: The window to process.
@@ -1354,8 +1348,14 @@ static void s3c_fb_clear_win(struct s3c_fb *sfb, int win)
1354 writel(0, regs + VIDOSD_A(win, sfb->variant)); 1348 writel(0, regs + VIDOSD_A(win, sfb->variant));
1355 writel(0, regs + VIDOSD_B(win, sfb->variant)); 1349 writel(0, regs + VIDOSD_B(win, sfb->variant));
1356 writel(0, regs + VIDOSD_C(win, sfb->variant)); 1350 writel(0, regs + VIDOSD_C(win, sfb->variant));
1357 reg = readl(regs + SHADOWCON); 1351
1358 writel(reg & ~SHADOWCON_WINx_PROTECT(win), regs + SHADOWCON); 1352 if (sfb->variant.has_shadowcon) {
1353 reg = readl(sfb->regs + SHADOWCON);
1354 reg &= ~(SHADOWCON_WINx_PROTECT(win) |
1355 SHADOWCON_CHx_ENABLE(win) |
1356 SHADOWCON_CHx_LOCAL_ENABLE(win));
1357 writel(reg, sfb->regs + SHADOWCON);
1358 }
1359} 1359}
1360 1360
1361static int __devinit s3c_fb_probe(struct platform_device *pdev) 1361static int __devinit s3c_fb_probe(struct platform_device *pdev)
@@ -1481,15 +1481,14 @@ static int __devinit s3c_fb_probe(struct platform_device *pdev)
1481 writel(0xffffff, regs + WKEYCON1); 1481 writel(0xffffff, regs + WKEYCON1);
1482 } 1482 }
1483 1483
1484 s3c_fb_set_rgb_timing(sfb);
1485
1484 /* we have the register setup, start allocating framebuffers */ 1486 /* we have the register setup, start allocating framebuffers */
1485 1487
1486 for (win = 0; win < fbdrv->variant.nr_windows; win++) { 1488 for (win = 0; win < fbdrv->variant.nr_windows; win++) {
1487 if (!pd->win[win]) 1489 if (!pd->win[win])
1488 continue; 1490 continue;
1489 1491
1490 if (!pd->win[win]->win_mode.pixclock)
1491 s3c_fb_missing_pixclock(&pd->win[win]->win_mode);
1492
1493 ret = s3c_fb_probe_win(sfb, win, fbdrv->win[win], 1492 ret = s3c_fb_probe_win(sfb, win, fbdrv->win[win],
1494 &sfb->windows[win]); 1493 &sfb->windows[win]);
1495 if (ret < 0) { 1494 if (ret < 0) {
@@ -1564,6 +1563,8 @@ static int s3c_fb_suspend(struct device *dev)
1564 struct s3c_fb_win *win; 1563 struct s3c_fb_win *win;
1565 int win_no; 1564 int win_no;
1566 1565
1566 pm_runtime_get_sync(sfb->dev);
1567
1567 for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) { 1568 for (win_no = S3C_FB_MAX_WIN - 1; win_no >= 0; win_no--) {
1568 win = sfb->windows[win_no]; 1569 win = sfb->windows[win_no];
1569 if (!win) 1570 if (!win)
@@ -1577,6 +1578,9 @@ static int s3c_fb_suspend(struct device *dev)
1577 clk_disable(sfb->lcd_clk); 1578 clk_disable(sfb->lcd_clk);
1578 1579
1579 clk_disable(sfb->bus_clk); 1580 clk_disable(sfb->bus_clk);
1581
1582 pm_runtime_put_sync(sfb->dev);
1583
1580 return 0; 1584 return 0;
1581} 1585}
1582 1586
@@ -1589,6 +1593,8 @@ static int s3c_fb_resume(struct device *dev)
1589 int win_no; 1593 int win_no;
1590 u32 reg; 1594 u32 reg;
1591 1595
1596 pm_runtime_get_sync(sfb->dev);
1597
1592 clk_enable(sfb->bus_clk); 1598 clk_enable(sfb->bus_clk);
1593 1599
1594 if (!sfb->variant.has_clksel) 1600 if (!sfb->variant.has_clksel)
@@ -1623,6 +1629,8 @@ static int s3c_fb_resume(struct device *dev)
1623 shadow_protect_win(win, 0); 1629 shadow_protect_win(win, 0);
1624 } 1630 }
1625 1631
1632 s3c_fb_set_rgb_timing(sfb);
1633
1626 /* restore framebuffers */ 1634 /* restore framebuffers */
1627 for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) { 1635 for (win_no = 0; win_no < S3C_FB_MAX_WIN; win_no++) {
1628 win = sfb->windows[win_no]; 1636 win = sfb->windows[win_no];
@@ -1633,6 +1641,8 @@ static int s3c_fb_resume(struct device *dev)
1633 s3c_fb_set_par(win->fbinfo); 1641 s3c_fb_set_par(win->fbinfo);
1634 } 1642 }
1635 1643
1644 pm_runtime_put_sync(sfb->dev);
1645
1636 return 0; 1646 return 0;
1637} 1647}
1638#endif 1648#endif
diff --git a/drivers/video/savage/savagefb_driver.c b/drivers/video/savage/savagefb_driver.c
index cee7803a0a1c..f3d3b9ce4751 100644
--- a/drivers/video/savage/savagefb_driver.c
+++ b/drivers/video/savage/savagefb_driver.c
@@ -1351,7 +1351,7 @@ static void savagefb_set_par_int(struct savagefb_par *par, struct savage_reg *r
1351 /* following part not present in X11 driver */ 1351 /* following part not present in X11 driver */
1352 cr67 = vga_in8(0x3d5, par) & 0xf; 1352 cr67 = vga_in8(0x3d5, par) & 0xf;
1353 vga_out8(0x3d5, 0x50 | cr67, par); 1353 vga_out8(0x3d5, 0x50 | cr67, par);
1354 udelay(10000); 1354 mdelay(10);
1355 vga_out8(0x3d4, 0x67, par); 1355 vga_out8(0x3d4, 0x67, par);
1356 /* end of part */ 1356 /* end of part */
1357 vga_out8(0x3d5, reg->CR67 & ~0x0c, par); 1357 vga_out8(0x3d5, reg->CR67 & ~0x0c, par);
@@ -1904,11 +1904,11 @@ static int savage_init_hw(struct savagefb_par *par)
1904 vga_out8(0x3d4, 0x66, par); 1904 vga_out8(0x3d4, 0x66, par);
1905 cr66 = vga_in8(0x3d5, par); 1905 cr66 = vga_in8(0x3d5, par);
1906 vga_out8(0x3d5, cr66 | 0x02, par); 1906 vga_out8(0x3d5, cr66 | 0x02, par);
1907 udelay(10000); 1907 mdelay(10);
1908 1908
1909 vga_out8(0x3d4, 0x66, par); 1909 vga_out8(0x3d4, 0x66, par);
1910 vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */ 1910 vga_out8(0x3d5, cr66 & ~0x02, par); /* clear reset flag */
1911 udelay(10000); 1911 mdelay(10);
1912 1912
1913 1913
1914 /* 1914 /*
@@ -1918,11 +1918,11 @@ static int savage_init_hw(struct savagefb_par *par)
1918 vga_out8(0x3d4, 0x3f, par); 1918 vga_out8(0x3d4, 0x3f, par);
1919 cr3f = vga_in8(0x3d5, par); 1919 cr3f = vga_in8(0x3d5, par);
1920 vga_out8(0x3d5, cr3f | 0x08, par); 1920 vga_out8(0x3d5, cr3f | 0x08, par);
1921 udelay(10000); 1921 mdelay(10);
1922 1922
1923 vga_out8(0x3d4, 0x3f, par); 1923 vga_out8(0x3d4, 0x3f, par);
1924 vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */ 1924 vga_out8(0x3d5, cr3f & ~0x08, par); /* clear reset flags */
1925 udelay(10000); 1925 mdelay(10);
1926 1926
1927 /* Savage ramdac speeds */ 1927 /* Savage ramdac speeds */
1928 par->numClocks = 4; 1928 par->numClocks = 4;
diff --git a/drivers/video/sh_mobile_hdmi.c b/drivers/video/sh_mobile_hdmi.c
index eafb19da2c07..930e550e752a 100644
--- a/drivers/video/sh_mobile_hdmi.c
+++ b/drivers/video/sh_mobile_hdmi.c
@@ -31,6 +31,7 @@
31 31
32#include "sh_mobile_lcdcfb.h" 32#include "sh_mobile_lcdcfb.h"
33 33
34/* HDMI Core Control Register (HTOP0) */
34#define HDMI_SYSTEM_CTRL 0x00 /* System control */ 35#define HDMI_SYSTEM_CTRL 0x00 /* System control */
35#define HDMI_L_R_DATA_SWAP_CTRL_RPKT 0x01 /* L/R data swap control, 36#define HDMI_L_R_DATA_SWAP_CTRL_RPKT 0x01 /* L/R data swap control,
36 bits 19..16 of 20-bit N for Audio Clock Regeneration packet */ 37 bits 19..16 of 20-bit N for Audio Clock Regeneration packet */
@@ -201,6 +202,68 @@
201#define HDMI_REVISION_ID 0xF1 /* Revision ID */ 202#define HDMI_REVISION_ID 0xF1 /* Revision ID */
202#define HDMI_TEST_MODE 0xFE /* Test mode */ 203#define HDMI_TEST_MODE 0xFE /* Test mode */
203 204
205/* HDMI Control Register (HTOP1) */
206#define HDMI_HTOP1_TEST_MODE 0x0000 /* Test mode */
207#define HDMI_HTOP1_VIDEO_INPUT 0x0008 /* VideoInput */
208#define HDMI_HTOP1_CORE_RSTN 0x000C /* CoreResetn */
209#define HDMI_HTOP1_PLLBW 0x0018 /* PLLBW */
210#define HDMI_HTOP1_CLK_TO_PHY 0x001C /* Clk to Phy */
211#define HDMI_HTOP1_VIDEO_INPUT2 0x0020 /* VideoInput2 */
212#define HDMI_HTOP1_TISEMP0_1 0x0024 /* tisemp0-1 */
213#define HDMI_HTOP1_TISEMP2_C 0x0028 /* tisemp2-c */
214#define HDMI_HTOP1_TISIDRV 0x002C /* tisidrv */
215#define HDMI_HTOP1_TISEN 0x0034 /* tisen */
216#define HDMI_HTOP1_TISDREN 0x0038 /* tisdren */
217#define HDMI_HTOP1_CISRANGE 0x003C /* cisrange */
218#define HDMI_HTOP1_ENABLE_SELECTOR 0x0040 /* Enable Selector */
219#define HDMI_HTOP1_MACRO_RESET 0x0044 /* Macro reset */
220#define HDMI_HTOP1_PLL_CALIBRATION 0x0048 /* PLL calibration */
221#define HDMI_HTOP1_RE_CALIBRATION 0x004C /* Re-calibration */
222#define HDMI_HTOP1_CURRENT 0x0050 /* Current */
223#define HDMI_HTOP1_PLL_LOCK_DETECT 0x0054 /* PLL lock detect */
224#define HDMI_HTOP1_PHY_TEST_MODE 0x0058 /* PHY Test Mode */
225#define HDMI_HTOP1_CLK_SET 0x0080 /* Clock Set */
226#define HDMI_HTOP1_DDC_FAIL_SAFE 0x0084 /* DDC fail safe */
227#define HDMI_HTOP1_PRBS 0x0088 /* PRBS */
228#define HDMI_HTOP1_EDID_AINC_CONTROL 0x008C /* EDID ainc Control */
229#define HDMI_HTOP1_HTOP_DCL_MODE 0x00FC /* Deep Coloer Mode */
230#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0 0x0100 /* Deep Color:FRC COEF0 */
231#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1 0x0104 /* Deep Color:FRC COEF1 */
232#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2 0x0108 /* Deep Color:FRC COEF2 */
233#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3 0x010C /* Deep Color:FRC COEF3 */
234#define HDMI_HTOP1_HTOP_DCL_FRC_COEF0_C 0x0110 /* Deep Color:FRC COEF0C */
235#define HDMI_HTOP1_HTOP_DCL_FRC_COEF1_C 0x0114 /* Deep Color:FRC COEF1C */
236#define HDMI_HTOP1_HTOP_DCL_FRC_COEF2_C 0x0118 /* Deep Color:FRC COEF2C */
237#define HDMI_HTOP1_HTOP_DCL_FRC_COEF3_C 0x011C /* Deep Color:FRC COEF3C */
238#define HDMI_HTOP1_HTOP_DCL_FRC_MODE 0x0120 /* Deep Color:FRC Mode */
239#define HDMI_HTOP1_HTOP_DCL_RECT_START1 0x0124 /* Deep Color:Rect Start1 */
240#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE1 0x0128 /* Deep Color:Rect Size1 */
241#define HDMI_HTOP1_HTOP_DCL_RECT_START2 0x012C /* Deep Color:Rect Start2 */
242#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE2 0x0130 /* Deep Color:Rect Size2 */
243#define HDMI_HTOP1_HTOP_DCL_RECT_START3 0x0134 /* Deep Color:Rect Start3 */
244#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE3 0x0138 /* Deep Color:Rect Size3 */
245#define HDMI_HTOP1_HTOP_DCL_RECT_START4 0x013C /* Deep Color:Rect Start4 */
246#define HDMI_HTOP1_HTOP_DCL_RECT_SIZE4 0x0140 /* Deep Color:Rect Size4 */
247#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1 0x0144 /* Deep Color:Fil Para Y1_1 */
248#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2 0x0148 /* Deep Color:Fil Para Y1_2 */
249#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1 0x014C /* Deep Color:Fil Para CB1_1 */
250#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2 0x0150 /* Deep Color:Fil Para CB1_2 */
251#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1 0x0154 /* Deep Color:Fil Para CR1_1 */
252#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2 0x0158 /* Deep Color:Fil Para CR1_2 */
253#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1 0x015C /* Deep Color:Fil Para Y2_1 */
254#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2 0x0160 /* Deep Color:Fil Para Y2_2 */
255#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1 0x0164 /* Deep Color:Fil Para CB2_1 */
256#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2 0x0168 /* Deep Color:Fil Para CB2_2 */
257#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1 0x016C /* Deep Color:Fil Para CR2_1 */
258#define HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2 0x0170 /* Deep Color:Fil Para CR2_2 */
259#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1 0x0174 /* Deep Color:Cor Para Y1 */
260#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1 0x0178 /* Deep Color:Cor Para CB1 */
261#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1 0x017C /* Deep Color:Cor Para CR1 */
262#define HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2 0x0180 /* Deep Color:Cor Para Y2 */
263#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2 0x0184 /* Deep Color:Cor Para CB2 */
264#define HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2 0x0188 /* Deep Color:Cor Para CR2 */
265#define HDMI_HTOP1_EDID_DATA_READ 0x0200 /* EDID Data Read 128Byte:0x03FC */
266
204enum hotplug_state { 267enum hotplug_state {
205 HDMI_HOTPLUG_DISCONNECTED, 268 HDMI_HOTPLUG_DISCONNECTED,
206 HDMI_HOTPLUG_CONNECTED, 269 HDMI_HOTPLUG_CONNECTED,
@@ -211,6 +274,7 @@ struct sh_hdmi {
211 struct sh_mobile_lcdc_entity entity; 274 struct sh_mobile_lcdc_entity entity;
212 275
213 void __iomem *base; 276 void __iomem *base;
277 void __iomem *htop1;
214 enum hotplug_state hp_state; /* hot-plug status */ 278 enum hotplug_state hp_state; /* hot-plug status */
215 u8 preprogrammed_vic; /* use a pre-programmed VIC or 279 u8 preprogrammed_vic; /* use a pre-programmed VIC or
216 the external mode */ 280 the external mode */
@@ -222,20 +286,66 @@ struct sh_hdmi {
222 struct delayed_work edid_work; 286 struct delayed_work edid_work;
223 struct fb_videomode mode; 287 struct fb_videomode mode;
224 struct fb_monspecs monspec; 288 struct fb_monspecs monspec;
289
290 /* register access functions */
291 void (*write)(struct sh_hdmi *hdmi, u8 data, u8 reg);
292 u8 (*read)(struct sh_hdmi *hdmi, u8 reg);
225}; 293};
226 294
227#define entity_to_sh_hdmi(e) container_of(e, struct sh_hdmi, entity) 295#define entity_to_sh_hdmi(e) container_of(e, struct sh_hdmi, entity)
228 296
229static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg) 297static void __hdmi_write8(struct sh_hdmi *hdmi, u8 data, u8 reg)
230{ 298{
231 iowrite8(data, hdmi->base + reg); 299 iowrite8(data, hdmi->base + reg);
232} 300}
233 301
234static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg) 302static u8 __hdmi_read8(struct sh_hdmi *hdmi, u8 reg)
235{ 303{
236 return ioread8(hdmi->base + reg); 304 return ioread8(hdmi->base + reg);
237} 305}
238 306
307static void __hdmi_write32(struct sh_hdmi *hdmi, u8 data, u8 reg)
308{
309 iowrite32((u32)data, hdmi->base + (reg * 4));
310 udelay(100);
311}
312
313static u8 __hdmi_read32(struct sh_hdmi *hdmi, u8 reg)
314{
315 return (u8)ioread32(hdmi->base + (reg * 4));
316}
317
318static void hdmi_write(struct sh_hdmi *hdmi, u8 data, u8 reg)
319{
320 hdmi->write(hdmi, data, reg);
321}
322
323static u8 hdmi_read(struct sh_hdmi *hdmi, u8 reg)
324{
325 return hdmi->read(hdmi, reg);
326}
327
328static void hdmi_bit_set(struct sh_hdmi *hdmi, u8 mask, u8 data, u8 reg)
329{
330 u8 val = hdmi_read(hdmi, reg);
331
332 val &= ~mask;
333 val |= (data & mask);
334
335 hdmi_write(hdmi, val, reg);
336}
337
338static void hdmi_htop1_write(struct sh_hdmi *hdmi, u32 data, u32 reg)
339{
340 iowrite32(data, hdmi->htop1 + reg);
341 udelay(100);
342}
343
344static u32 hdmi_htop1_read(struct sh_hdmi *hdmi, u32 reg)
345{
346 return ioread32(hdmi->htop1 + reg);
347}
348
239/* 349/*
240 * HDMI sound 350 * HDMI sound
241 */ 351 */
@@ -693,11 +803,11 @@ static void sh_hdmi_configure(struct sh_hdmi *hdmi)
693 msleep(10); 803 msleep(10);
694 804
695 /* PS mode b->d, reset PLLA and PLLB */ 805 /* PS mode b->d, reset PLLA and PLLB */
696 hdmi_write(hdmi, 0x4C, HDMI_SYSTEM_CTRL); 806 hdmi_bit_set(hdmi, 0xFC, 0x4C, HDMI_SYSTEM_CTRL);
697 807
698 udelay(10); 808 udelay(10);
699 809
700 hdmi_write(hdmi, 0x40, HDMI_SYSTEM_CTRL); 810 hdmi_bit_set(hdmi, 0xFC, 0x40, HDMI_SYSTEM_CTRL);
701} 811}
702 812
703static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi, 813static unsigned long sh_hdmi_rate_error(struct sh_hdmi *hdmi,
@@ -746,7 +856,9 @@ static int sh_hdmi_read_edid(struct sh_hdmi *hdmi, unsigned long *hdmi_rate,
746 /* Read EDID */ 856 /* Read EDID */
747 dev_dbg(hdmi->dev, "Read back EDID code:"); 857 dev_dbg(hdmi->dev, "Read back EDID code:");
748 for (i = 0; i < 128; i++) { 858 for (i = 0; i < 128; i++) {
749 edid[i] = hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW); 859 edid[i] = (hdmi->htop1) ?
860 (u8)hdmi_htop1_read(hdmi, HDMI_HTOP1_EDID_DATA_READ + (i * 4)) :
861 hdmi_read(hdmi, HDMI_EDID_KSV_FIFO_ACCESS_WINDOW);
750#ifdef DEBUG 862#ifdef DEBUG
751 if ((i % 16) == 0) { 863 if ((i % 16) == 0) {
752 printk(KERN_CONT "\n"); 864 printk(KERN_CONT "\n");
@@ -917,13 +1029,13 @@ static irqreturn_t sh_hdmi_hotplug(int irq, void *dev_id)
917 u8 status1, status2, mask1, mask2; 1029 u8 status1, status2, mask1, mask2;
918 1030
919 /* mode_b and PLLA and PLLB reset */ 1031 /* mode_b and PLLA and PLLB reset */
920 hdmi_write(hdmi, 0x2C, HDMI_SYSTEM_CTRL); 1032 hdmi_bit_set(hdmi, 0xFC, 0x2C, HDMI_SYSTEM_CTRL);
921 1033
922 /* How long shall reset be held? */ 1034 /* How long shall reset be held? */
923 udelay(10); 1035 udelay(10);
924 1036
925 /* mode_b and PLLA and PLLB reset release */ 1037 /* mode_b and PLLA and PLLB reset release */
926 hdmi_write(hdmi, 0x20, HDMI_SYSTEM_CTRL); 1038 hdmi_bit_set(hdmi, 0xFC, 0x20, HDMI_SYSTEM_CTRL);
927 1039
928 status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1); 1040 status1 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_1);
929 status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2); 1041 status2 = hdmi_read(hdmi, HDMI_INTERRUPT_STATUS_2);
@@ -1001,7 +1113,7 @@ static int sh_hdmi_display_on(struct sh_mobile_lcdc_entity *entity)
1001 */ 1113 */
1002 if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) { 1114 if (hdmi->hp_state == HDMI_HOTPLUG_EDID_DONE) {
1003 /* PS mode d->e. All functions are active */ 1115 /* PS mode d->e. All functions are active */
1004 hdmi_write(hdmi, 0x80, HDMI_SYSTEM_CTRL); 1116 hdmi_bit_set(hdmi, 0xFC, 0x80, HDMI_SYSTEM_CTRL);
1005 dev_dbg(hdmi->dev, "HDMI running\n"); 1117 dev_dbg(hdmi->dev, "HDMI running\n");
1006 } 1118 }
1007 1119
@@ -1016,7 +1128,7 @@ static void sh_hdmi_display_off(struct sh_mobile_lcdc_entity *entity)
1016 1128
1017 dev_dbg(hdmi->dev, "%s(%p)\n", __func__, hdmi); 1129 dev_dbg(hdmi->dev, "%s(%p)\n", __func__, hdmi);
1018 /* PS mode e->a */ 1130 /* PS mode e->a */
1019 hdmi_write(hdmi, 0x10, HDMI_SYSTEM_CTRL); 1131 hdmi_bit_set(hdmi, 0xFC, 0x10, HDMI_SYSTEM_CTRL);
1020} 1132}
1021 1133
1022static const struct sh_mobile_lcdc_entity_ops sh_hdmi_ops = { 1134static const struct sh_mobile_lcdc_entity_ops sh_hdmi_ops = {
@@ -1110,10 +1222,58 @@ out:
1110 dev_dbg(hdmi->dev, "%s(%p): end\n", __func__, hdmi); 1222 dev_dbg(hdmi->dev, "%s(%p): end\n", __func__, hdmi);
1111} 1223}
1112 1224
1225static void sh_hdmi_htop1_init(struct sh_hdmi *hdmi)
1226{
1227 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_MODE);
1228 hdmi_htop1_write(hdmi, 0x0000000b, 0x0010);
1229 hdmi_htop1_write(hdmi, 0x00006710, HDMI_HTOP1_HTOP_DCL_FRC_MODE);
1230 hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_1);
1231 hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y1_2);
1232 hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_1);
1233 hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB1_2);
1234 hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_1);
1235 hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR1_2);
1236 hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_1);
1237 hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_Y2_2);
1238 hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_1);
1239 hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CB2_2);
1240 hdmi_htop1_write(hdmi, 0x01020406, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_1);
1241 hdmi_htop1_write(hdmi, 0x07080806, HDMI_HTOP1_HTOP_DCL_FIL_PARA_CR2_2);
1242 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y1);
1243 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB1);
1244 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR1);
1245 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_Y2);
1246 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CB2);
1247 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_HTOP_DCL_COR_PARA_CR2);
1248 hdmi_htop1_write(hdmi, 0x00000008, HDMI_HTOP1_CURRENT);
1249 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP0_1);
1250 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_TISEMP2_C);
1251 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PHY_TEST_MODE);
1252 hdmi_htop1_write(hdmi, 0x00000081, HDMI_HTOP1_TISIDRV);
1253 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_PLLBW);
1254 hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
1255 hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
1256 hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
1257 hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
1258 hdmi_htop1_write(hdmi, 0x00000016, HDMI_HTOP1_CISRANGE);
1259 msleep(100);
1260 hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_ENABLE_SELECTOR);
1261 msleep(100);
1262 hdmi_htop1_write(hdmi, 0x00000003, HDMI_HTOP1_ENABLE_SELECTOR);
1263 hdmi_htop1_write(hdmi, 0x00000001, HDMI_HTOP1_MACRO_RESET);
1264 hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISEN);
1265 hdmi_htop1_write(hdmi, 0x0000000f, HDMI_HTOP1_TISDREN);
1266 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT);
1267 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_CLK_TO_PHY);
1268 hdmi_htop1_write(hdmi, 0x00000000, HDMI_HTOP1_VIDEO_INPUT2);
1269 hdmi_htop1_write(hdmi, 0x0000000a, HDMI_HTOP1_CLK_SET);
1270}
1271
1113static int __init sh_hdmi_probe(struct platform_device *pdev) 1272static int __init sh_hdmi_probe(struct platform_device *pdev)
1114{ 1273{
1115 struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data; 1274 struct sh_mobile_hdmi_info *pdata = pdev->dev.platform_data;
1116 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1275 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1276 struct resource *htop1_res;
1117 int irq = platform_get_irq(pdev, 0), ret; 1277 int irq = platform_get_irq(pdev, 0), ret;
1118 struct sh_hdmi *hdmi; 1278 struct sh_hdmi *hdmi;
1119 long rate; 1279 long rate;
@@ -1121,6 +1281,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1121 if (!res || !pdata || irq < 0) 1281 if (!res || !pdata || irq < 0)
1122 return -ENODEV; 1282 return -ENODEV;
1123 1283
1284 htop1_res = NULL;
1285 if (pdata->flags & HDMI_HAS_HTOP1) {
1286 htop1_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
1287 if (!htop1_res) {
1288 dev_err(&pdev->dev, "htop1 needs register base\n");
1289 return -EINVAL;
1290 }
1291 }
1292
1124 hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL); 1293 hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
1125 if (!hdmi) { 1294 if (!hdmi) {
1126 dev_err(&pdev->dev, "Cannot allocate device data\n"); 1295 dev_err(&pdev->dev, "Cannot allocate device data\n");
@@ -1138,6 +1307,15 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1138 goto egetclk; 1307 goto egetclk;
1139 } 1308 }
1140 1309
1310 /* select register access functions */
1311 if (pdata->flags & HDMI_32BIT_REG) {
1312 hdmi->write = __hdmi_write32;
1313 hdmi->read = __hdmi_read32;
1314 } else {
1315 hdmi->write = __hdmi_write8;
1316 hdmi->read = __hdmi_read8;
1317 }
1318
1141 /* An arbitrary relaxed pixclock just to get things started: from standard 480p */ 1319 /* An arbitrary relaxed pixclock just to get things started: from standard 480p */
1142 rate = clk_round_rate(hdmi->hdmi_clk, PICOS2KHZ(37037)); 1320 rate = clk_round_rate(hdmi->hdmi_clk, PICOS2KHZ(37037));
1143 if (rate > 0) 1321 if (rate > 0)
@@ -1176,6 +1354,24 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1176 pm_runtime_enable(&pdev->dev); 1354 pm_runtime_enable(&pdev->dev);
1177 pm_runtime_get_sync(&pdev->dev); 1355 pm_runtime_get_sync(&pdev->dev);
1178 1356
1357 /* init interrupt polarity */
1358 if (pdata->flags & HDMI_OUTPUT_PUSH_PULL)
1359 hdmi_bit_set(hdmi, 0x02, 0x02, HDMI_SYSTEM_CTRL);
1360
1361 if (pdata->flags & HDMI_OUTPUT_POLARITY_HI)
1362 hdmi_bit_set(hdmi, 0x01, 0x01, HDMI_SYSTEM_CTRL);
1363
1364 /* enable htop1 register if needed */
1365 if (htop1_res) {
1366 hdmi->htop1 = ioremap(htop1_res->start, resource_size(htop1_res));
1367 if (!hdmi->htop1) {
1368 dev_err(&pdev->dev, "control register region already claimed\n");
1369 ret = -ENOMEM;
1370 goto emap_htop1;
1371 }
1372 sh_hdmi_htop1_init(hdmi);
1373 }
1374
1179 /* Product and revision IDs are 0 in sh-mobile version */ 1375 /* Product and revision IDs are 0 in sh-mobile version */
1180 dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n", 1376 dev_info(&pdev->dev, "Detected HDMI controller 0x%x:0x%x\n",
1181 hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID)); 1377 hdmi_read(hdmi, HDMI_PRODUCT_ID), hdmi_read(hdmi, HDMI_REVISION_ID));
@@ -1199,6 +1395,9 @@ static int __init sh_hdmi_probe(struct platform_device *pdev)
1199ecodec: 1395ecodec:
1200 free_irq(irq, hdmi); 1396 free_irq(irq, hdmi);
1201ereqirq: 1397ereqirq:
1398 if (hdmi->htop1)
1399 iounmap(hdmi->htop1);
1400emap_htop1:
1202 pm_runtime_put(&pdev->dev); 1401 pm_runtime_put(&pdev->dev);
1203 pm_runtime_disable(&pdev->dev); 1402 pm_runtime_disable(&pdev->dev);
1204 iounmap(hdmi->base); 1403 iounmap(hdmi->base);
@@ -1230,6 +1429,8 @@ static int __exit sh_hdmi_remove(struct platform_device *pdev)
1230 pm_runtime_disable(&pdev->dev); 1429 pm_runtime_disable(&pdev->dev);
1231 clk_disable(hdmi->hdmi_clk); 1430 clk_disable(hdmi->hdmi_clk);
1232 clk_put(hdmi->hdmi_clk); 1431 clk_put(hdmi->hdmi_clk);
1432 if (hdmi->htop1)
1433 iounmap(hdmi->htop1);
1233 iounmap(hdmi->base); 1434 iounmap(hdmi->base);
1234 release_mem_region(res->start, resource_size(res)); 1435 release_mem_region(res->start, resource_size(res));
1235 kfree(hdmi); 1436 kfree(hdmi);
diff --git a/drivers/video/sis/init.h b/drivers/video/sis/init.h
index aff73842d877..85d6738b6c64 100644
--- a/drivers/video/sis/init.h
+++ b/drivers/video/sis/init.h
@@ -105,51 +105,6 @@ static const unsigned short ModeIndex_1920x1440[] = {0x68, 0x69, 0x00, 0x6b};
105static const unsigned short ModeIndex_300_2048x1536[]= {0x6c, 0x6d, 0x00, 0x00}; 105static const unsigned short ModeIndex_300_2048x1536[]= {0x6c, 0x6d, 0x00, 0x00};
106static const unsigned short ModeIndex_310_2048x1536[]= {0x6c, 0x6d, 0x00, 0x6e}; 106static const unsigned short ModeIndex_310_2048x1536[]= {0x6c, 0x6d, 0x00, 0x6e};
107 107
108static const unsigned short SiS_DRAMType[17][5]={
109 {0x0C,0x0A,0x02,0x40,0x39},
110 {0x0D,0x0A,0x01,0x40,0x48},
111 {0x0C,0x09,0x02,0x20,0x35},
112 {0x0D,0x09,0x01,0x20,0x44},
113 {0x0C,0x08,0x02,0x10,0x31},
114 {0x0D,0x08,0x01,0x10,0x40},
115 {0x0C,0x0A,0x01,0x20,0x34},
116 {0x0C,0x09,0x01,0x08,0x32},
117 {0x0B,0x08,0x02,0x08,0x21},
118 {0x0C,0x08,0x01,0x08,0x30},
119 {0x0A,0x08,0x02,0x04,0x11},
120 {0x0B,0x0A,0x01,0x10,0x28},
121 {0x09,0x08,0x02,0x02,0x01},
122 {0x0B,0x09,0x01,0x08,0x24},
123 {0x0B,0x08,0x01,0x04,0x20},
124 {0x0A,0x08,0x01,0x02,0x10},
125 {0x09,0x08,0x01,0x01,0x00}
126};
127
128static const unsigned short SiS_SDRDRAM_TYPE[13][5] =
129{
130 { 2,12, 9,64,0x35},
131 { 1,13, 9,64,0x44},
132 { 2,12, 8,32,0x31},
133 { 2,11, 9,32,0x25},
134 { 1,12, 9,32,0x34},
135 { 1,13, 8,32,0x40},
136 { 2,11, 8,16,0x21},
137 { 1,12, 8,16,0x30},
138 { 1,11, 9,16,0x24},
139 { 1,11, 8, 8,0x20},
140 { 2, 9, 8, 4,0x01},
141 { 1,10, 8, 4,0x10},
142 { 1, 9, 8, 2,0x00}
143};
144
145static const unsigned short SiS_DDRDRAM_TYPE[4][5] =
146{
147 { 2,12, 9,64,0x35},
148 { 2,12, 8,32,0x31},
149 { 2,11, 8,16,0x21},
150 { 2, 9, 8, 4,0x01}
151};
152
153static const unsigned char SiS_MDA_DAC[] = 108static const unsigned char SiS_MDA_DAC[] =
154{ 109{
155 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00, 110 0x00,0x00,0x00,0x00,0x00,0x00,0x00,0x00,
diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c
index 078ca2167d6f..a7a48db64ce2 100644
--- a/drivers/video/sis/sis_main.c
+++ b/drivers/video/sis/sis_main.c
@@ -4222,6 +4222,26 @@ sisfb_post_300_buswidth(struct sis_video_info *ivideo)
4222 return 1; /* 32bit */ 4222 return 1; /* 32bit */
4223} 4223}
4224 4224
4225static const unsigned short __devinitconst SiS_DRAMType[17][5] = {
4226 {0x0C,0x0A,0x02,0x40,0x39},
4227 {0x0D,0x0A,0x01,0x40,0x48},
4228 {0x0C,0x09,0x02,0x20,0x35},
4229 {0x0D,0x09,0x01,0x20,0x44},
4230 {0x0C,0x08,0x02,0x10,0x31},
4231 {0x0D,0x08,0x01,0x10,0x40},
4232 {0x0C,0x0A,0x01,0x20,0x34},
4233 {0x0C,0x09,0x01,0x08,0x32},
4234 {0x0B,0x08,0x02,0x08,0x21},
4235 {0x0C,0x08,0x01,0x08,0x30},
4236 {0x0A,0x08,0x02,0x04,0x11},
4237 {0x0B,0x0A,0x01,0x10,0x28},
4238 {0x09,0x08,0x02,0x02,0x01},
4239 {0x0B,0x09,0x01,0x08,0x24},
4240 {0x0B,0x08,0x01,0x04,0x20},
4241 {0x0A,0x08,0x01,0x02,0x10},
4242 {0x09,0x08,0x01,0x01,0x00}
4243};
4244
4225static int __devinit 4245static int __devinit
4226sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth, 4246sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth,
4227 int PseudoRankCapacity, int PseudoAdrPinCount, 4247 int PseudoRankCapacity, int PseudoAdrPinCount,
@@ -4231,27 +4251,8 @@ sisfb_post_300_rwtest(struct sis_video_info *ivideo, int iteration, int buswidth
4231 unsigned short sr14; 4251 unsigned short sr14;
4232 unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid; 4252 unsigned int k, RankCapacity, PageCapacity, BankNumHigh, BankNumMid;
4233 unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage; 4253 unsigned int PhysicalAdrOtherPage, PhysicalAdrHigh, PhysicalAdrHalfPage;
4234 static const unsigned short SiS_DRAMType[17][5] = {
4235 {0x0C,0x0A,0x02,0x40,0x39},
4236 {0x0D,0x0A,0x01,0x40,0x48},
4237 {0x0C,0x09,0x02,0x20,0x35},
4238 {0x0D,0x09,0x01,0x20,0x44},
4239 {0x0C,0x08,0x02,0x10,0x31},
4240 {0x0D,0x08,0x01,0x10,0x40},
4241 {0x0C,0x0A,0x01,0x20,0x34},
4242 {0x0C,0x09,0x01,0x08,0x32},
4243 {0x0B,0x08,0x02,0x08,0x21},
4244 {0x0C,0x08,0x01,0x08,0x30},
4245 {0x0A,0x08,0x02,0x04,0x11},
4246 {0x0B,0x0A,0x01,0x10,0x28},
4247 {0x09,0x08,0x02,0x02,0x01},
4248 {0x0B,0x09,0x01,0x08,0x24},
4249 {0x0B,0x08,0x01,0x04,0x20},
4250 {0x0A,0x08,0x01,0x02,0x10},
4251 {0x09,0x08,0x01,0x01,0x00}
4252 };
4253 4254
4254 for(k = 0; k <= 16; k++) { 4255 for(k = 0; k < ARRAY_SIZE(SiS_DRAMType); k++) {
4255 4256
4256 RankCapacity = buswidth * SiS_DRAMType[k][3]; 4257 RankCapacity = buswidth * SiS_DRAMType[k][3];
4257 4258
diff --git a/drivers/video/skeletonfb.c b/drivers/video/skeletonfb.c
index 30f7a815a62b..5b6abc6de84b 100644
--- a/drivers/video/skeletonfb.c
+++ b/drivers/video/skeletonfb.c
@@ -1036,6 +1036,6 @@ static void __exit xxxfb_exit(void)
1036 */ 1036 */
1037 1037
1038module_init(xxxfb_init); 1038module_init(xxxfb_init);
1039module_exit(xxxfb_remove); 1039module_exit(xxxfb_exit);
1040 1040
1041MODULE_LICENSE("GPL"); 1041MODULE_LICENSE("GPL");
diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
index ccbfef5e828f..af3ef27ad36c 100644
--- a/drivers/video/smscufx.c
+++ b/drivers/video/smscufx.c
@@ -846,7 +846,7 @@ static void ufx_raw_rect(struct ufx_data *dev, u16 *cmd, int x, int y,
846 } 846 }
847} 847}
848 848
849int ufx_handle_damage(struct ufx_data *dev, int x, int y, 849static int ufx_handle_damage(struct ufx_data *dev, int x, int y,
850 int width, int height) 850 int width, int height)
851{ 851{
852 size_t packed_line_len = ALIGN((width * 2), 4); 852 size_t packed_line_len = ALIGN((width * 2), 4);
@@ -1083,7 +1083,7 @@ static int ufx_ops_open(struct fb_info *info, int user)
1083 1083
1084 struct fb_deferred_io *fbdefio; 1084 struct fb_deferred_io *fbdefio;
1085 1085
1086 fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); 1086 fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
1087 1087
1088 if (fbdefio) { 1088 if (fbdefio) {
1089 fbdefio->delay = UFX_DEFIO_WRITE_DELAY; 1089 fbdefio->delay = UFX_DEFIO_WRITE_DELAY;
diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
index 7af1e8166182..8af64148294b 100644
--- a/drivers/video/udlfb.c
+++ b/drivers/video/udlfb.c
@@ -893,7 +893,7 @@ static int dlfb_ops_open(struct fb_info *info, int user)
893 893
894 struct fb_deferred_io *fbdefio; 894 struct fb_deferred_io *fbdefio;
895 895
896 fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL); 896 fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
897 897
898 if (fbdefio) { 898 if (fbdefio) {
899 fbdefio->delay = DL_DEFIO_WRITE_DELAY; 899 fbdefio->delay = DL_DEFIO_WRITE_DELAY;
diff --git a/drivers/video/via/viafbdev.c b/drivers/video/via/viafbdev.c
index 0c8837565bc7..c80e770e1800 100644
--- a/drivers/video/via/viafbdev.c
+++ b/drivers/video/via/viafbdev.c
@@ -1276,17 +1276,12 @@ static int viafb_dfph_proc_open(struct inode *inode, struct file *file)
1276static ssize_t viafb_dfph_proc_write(struct file *file, 1276static ssize_t viafb_dfph_proc_write(struct file *file,
1277 const char __user *buffer, size_t count, loff_t *pos) 1277 const char __user *buffer, size_t count, loff_t *pos)
1278{ 1278{
1279 char buf[20]; 1279 int err;
1280 u8 reg_val = 0; 1280 u8 reg_val;
1281 unsigned long length; 1281 err = kstrtou8_from_user(buffer, count, 0, &reg_val);
1282 if (count < 1) 1282 if (err)
1283 return -EINVAL; 1283 return err;
1284 length = count > 20 ? 20 : count; 1284
1285 if (copy_from_user(&buf[0], buffer, length))
1286 return -EFAULT;
1287 buf[length - 1] = '\0'; /*Ensure end string */
1288 if (kstrtou8(buf, 0, &reg_val) < 0)
1289 return -EINVAL;
1290 viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f); 1285 viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f);
1291 return count; 1286 return count;
1292} 1287}
@@ -1316,17 +1311,12 @@ static int viafb_dfpl_proc_open(struct inode *inode, struct file *file)
1316static ssize_t viafb_dfpl_proc_write(struct file *file, 1311static ssize_t viafb_dfpl_proc_write(struct file *file,
1317 const char __user *buffer, size_t count, loff_t *pos) 1312 const char __user *buffer, size_t count, loff_t *pos)
1318{ 1313{
1319 char buf[20]; 1314 int err;
1320 u8 reg_val = 0; 1315 u8 reg_val;
1321 unsigned long length; 1316 err = kstrtou8_from_user(buffer, count, 0, &reg_val);
1322 if (count < 1) 1317 if (err)
1323 return -EINVAL; 1318 return err;
1324 length = count > 20 ? 20 : count; 1319
1325 if (copy_from_user(&buf[0], buffer, length))
1326 return -EFAULT;
1327 buf[length - 1] = '\0'; /*Ensure end string */
1328 if (kstrtou8(buf, 0, &reg_val) < 0)
1329 return -EINVAL;
1330 viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f); 1320 viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f);
1331 return count; 1321 return count;
1332} 1322}
diff --git a/drivers/watchdog/sp805_wdt.c b/drivers/watchdog/sp805_wdt.c
index afcd13676542..e4841c36798b 100644
--- a/drivers/watchdog/sp805_wdt.c
+++ b/drivers/watchdog/sp805_wdt.c
@@ -4,7 +4,7 @@
4 * Watchdog driver for ARM SP805 watchdog module 4 * Watchdog driver for ARM SP805 watchdog module
5 * 5 *
6 * Copyright (C) 2010 ST Microelectronics 6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2 or later. This program is licensed "as is" without any 10 * License version 2 or later. This program is licensed "as is" without any
@@ -331,6 +331,6 @@ static struct amba_driver sp805_wdt_driver = {
331 331
332module_amba_driver(sp805_wdt_driver); 332module_amba_driver(sp805_wdt_driver);
333 333
334MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>"); 334MODULE_AUTHOR("Viresh Kumar <viresh.linux@gmail.com>");
335MODULE_DESCRIPTION("ARM SP805 Watchdog Driver"); 335MODULE_DESCRIPTION("ARM SP805 Watchdog Driver");
336MODULE_LICENSE("GPL"); 336MODULE_LICENSE("GPL");
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 6908e4ce2a0d..7595581d032c 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -827,6 +827,9 @@ int bind_evtchn_to_irq(unsigned int evtchn)
827 handle_edge_irq, "event"); 827 handle_edge_irq, "event");
828 828
829 xen_irq_info_evtchn_init(irq, evtchn); 829 xen_irq_info_evtchn_init(irq, evtchn);
830 } else {
831 struct irq_info *info = info_for_irq(irq);
832 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
830 } 833 }
831 834
832out: 835out:
@@ -862,6 +865,9 @@ static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
862 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi); 865 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
863 866
864 bind_evtchn_to_cpu(evtchn, cpu); 867 bind_evtchn_to_cpu(evtchn, cpu);
868 } else {
869 struct irq_info *info = info_for_irq(irq);
870 WARN_ON(info == NULL || info->type != IRQT_IPI);
865 } 871 }
866 872
867 out: 873 out:
@@ -939,6 +945,9 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
939 xen_irq_info_virq_init(cpu, irq, evtchn, virq); 945 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
940 946
941 bind_evtchn_to_cpu(evtchn, cpu); 947 bind_evtchn_to_cpu(evtchn, cpu);
948 } else {
949 struct irq_info *info = info_for_irq(irq);
950 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
942 } 951 }
943 952
944out: 953out:
diff --git a/drivers/xen/pci.c b/drivers/xen/pci.c
index b84bf0b6cc34..18fff88254eb 100644
--- a/drivers/xen/pci.c
+++ b/drivers/xen/pci.c
@@ -59,7 +59,7 @@ static int xen_add_device(struct device *dev)
59 59
60#ifdef CONFIG_ACPI 60#ifdef CONFIG_ACPI
61 handle = DEVICE_ACPI_HANDLE(&pci_dev->dev); 61 handle = DEVICE_ACPI_HANDLE(&pci_dev->dev);
62 if (!handle) 62 if (!handle && pci_dev->bus->bridge)
63 handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge); 63 handle = DEVICE_ACPI_HANDLE(pci_dev->bus->bridge);
64#ifdef CONFIG_PCI_IOV 64#ifdef CONFIG_PCI_IOV
65 if (!handle && pci_dev->is_virtfn) 65 if (!handle && pci_dev->is_virtfn)
diff --git a/drivers/xen/tmem.c b/drivers/xen/tmem.c
index dcb79521e6c8..89f264c67420 100644
--- a/drivers/xen/tmem.c
+++ b/drivers/xen/tmem.c
@@ -269,7 +269,7 @@ static inline struct tmem_oid oswiz(unsigned type, u32 ind)
269} 269}
270 270
271/* returns 0 if the page was successfully put into frontswap, -1 if not */ 271/* returns 0 if the page was successfully put into frontswap, -1 if not */
272static int tmem_frontswap_put_page(unsigned type, pgoff_t offset, 272static int tmem_frontswap_store(unsigned type, pgoff_t offset,
273 struct page *page) 273 struct page *page)
274{ 274{
275 u64 ind64 = (u64)offset; 275 u64 ind64 = (u64)offset;
@@ -295,7 +295,7 @@ static int tmem_frontswap_put_page(unsigned type, pgoff_t offset,
295 * returns 0 if the page was successfully gotten from frontswap, -1 if 295 * returns 0 if the page was successfully gotten from frontswap, -1 if
296 * was not present (should never happen!) 296 * was not present (should never happen!)
297 */ 297 */
298static int tmem_frontswap_get_page(unsigned type, pgoff_t offset, 298static int tmem_frontswap_load(unsigned type, pgoff_t offset,
299 struct page *page) 299 struct page *page)
300{ 300{
301 u64 ind64 = (u64)offset; 301 u64 ind64 = (u64)offset;
@@ -362,8 +362,8 @@ static int __init no_frontswap(char *s)
362__setup("nofrontswap", no_frontswap); 362__setup("nofrontswap", no_frontswap);
363 363
364static struct frontswap_ops __initdata tmem_frontswap_ops = { 364static struct frontswap_ops __initdata tmem_frontswap_ops = {
365 .put_page = tmem_frontswap_put_page, 365 .store = tmem_frontswap_store,
366 .get_page = tmem_frontswap_get_page, 366 .load = tmem_frontswap_load,
367 .invalidate_page = tmem_frontswap_flush_page, 367 .invalidate_page = tmem_frontswap_flush_page,
368 .invalidate_area = tmem_frontswap_flush_area, 368 .invalidate_area = tmem_frontswap_flush_area,
369 .init = tmem_frontswap_init 369 .init = tmem_frontswap_init
diff --git a/fs/9p/vfs_inode_dotl.c b/fs/9p/vfs_inode_dotl.c
index a1e6c990cd41..e3dd2a1e2bfc 100644
--- a/fs/9p/vfs_inode_dotl.c
+++ b/fs/9p/vfs_inode_dotl.c
@@ -68,24 +68,6 @@ static gid_t v9fs_get_fsgid_for_create(struct inode *dir_inode)
68 return current_fsgid(); 68 return current_fsgid();
69} 69}
70 70
71/**
72 * v9fs_dentry_from_dir_inode - helper function to get the dentry from
73 * dir inode.
74 *
75 */
76
77static struct dentry *v9fs_dentry_from_dir_inode(struct inode *inode)
78{
79 struct dentry *dentry;
80
81 spin_lock(&inode->i_lock);
82 /* Directory should have only one entry. */
83 BUG_ON(S_ISDIR(inode->i_mode) && !list_is_singular(&inode->i_dentry));
84 dentry = list_entry(inode->i_dentry.next, struct dentry, d_alias);
85 spin_unlock(&inode->i_lock);
86 return dentry;
87}
88
89static int v9fs_test_inode_dotl(struct inode *inode, void *data) 71static int v9fs_test_inode_dotl(struct inode *inode, void *data)
90{ 72{
91 struct v9fs_inode *v9inode = V9FS_I(inode); 73 struct v9fs_inode *v9inode = V9FS_I(inode);
@@ -415,7 +397,7 @@ static int v9fs_vfs_mkdir_dotl(struct inode *dir,
415 if (dir->i_mode & S_ISGID) 397 if (dir->i_mode & S_ISGID)
416 omode |= S_ISGID; 398 omode |= S_ISGID;
417 399
418 dir_dentry = v9fs_dentry_from_dir_inode(dir); 400 dir_dentry = dentry->d_parent;
419 dfid = v9fs_fid_lookup(dir_dentry); 401 dfid = v9fs_fid_lookup(dir_dentry);
420 if (IS_ERR(dfid)) { 402 if (IS_ERR(dfid)) {
421 err = PTR_ERR(dfid); 403 err = PTR_ERR(dfid);
@@ -793,7 +775,7 @@ v9fs_vfs_link_dotl(struct dentry *old_dentry, struct inode *dir,
793 dir->i_ino, old_dentry->d_name.name, dentry->d_name.name); 775 dir->i_ino, old_dentry->d_name.name, dentry->d_name.name);
794 776
795 v9ses = v9fs_inode2v9ses(dir); 777 v9ses = v9fs_inode2v9ses(dir);
796 dir_dentry = v9fs_dentry_from_dir_inode(dir); 778 dir_dentry = dentry->d_parent;
797 dfid = v9fs_fid_lookup(dir_dentry); 779 dfid = v9fs_fid_lookup(dir_dentry);
798 if (IS_ERR(dfid)) 780 if (IS_ERR(dfid))
799 return PTR_ERR(dfid); 781 return PTR_ERR(dfid);
@@ -858,7 +840,7 @@ v9fs_vfs_mknod_dotl(struct inode *dir, struct dentry *dentry, umode_t omode,
858 return -EINVAL; 840 return -EINVAL;
859 841
860 v9ses = v9fs_inode2v9ses(dir); 842 v9ses = v9fs_inode2v9ses(dir);
861 dir_dentry = v9fs_dentry_from_dir_inode(dir); 843 dir_dentry = dentry->d_parent;
862 dfid = v9fs_fid_lookup(dir_dentry); 844 dfid = v9fs_fid_lookup(dir_dentry);
863 if (IS_ERR(dfid)) { 845 if (IS_ERR(dfid)) {
864 err = PTR_ERR(dfid); 846 err = PTR_ERR(dfid);
diff --git a/fs/affs/affs.h b/fs/affs/affs.h
index 45a0ce45d7b4..1fceb320d2f2 100644
--- a/fs/affs/affs.h
+++ b/fs/affs/affs.h
@@ -18,14 +18,6 @@
18#define AFFS_GET_HASHENTRY(data,hashkey) be32_to_cpu(((struct dir_front *)data)->hashtable[hashkey]) 18#define AFFS_GET_HASHENTRY(data,hashkey) be32_to_cpu(((struct dir_front *)data)->hashtable[hashkey])
19#define AFFS_BLOCK(sb, bh, blk) (AFFS_HEAD(bh)->table[AFFS_SB(sb)->s_hashsize-1-(blk)]) 19#define AFFS_BLOCK(sb, bh, blk) (AFFS_HEAD(bh)->table[AFFS_SB(sb)->s_hashsize-1-(blk)])
20 20
21#ifdef __LITTLE_ENDIAN
22#define BO_EXBITS 0x18UL
23#elif defined(__BIG_ENDIAN)
24#define BO_EXBITS 0x00UL
25#else
26#error Endianness must be known for affs to work.
27#endif
28
29#define AFFS_HEAD(bh) ((struct affs_head *)(bh)->b_data) 21#define AFFS_HEAD(bh) ((struct affs_head *)(bh)->b_data)
30#define AFFS_TAIL(sb, bh) ((struct affs_tail *)((bh)->b_data+(sb)->s_blocksize-sizeof(struct affs_tail))) 22#define AFFS_TAIL(sb, bh) ((struct affs_tail *)((bh)->b_data+(sb)->s_blocksize-sizeof(struct affs_tail)))
31#define AFFS_ROOT_HEAD(bh) ((struct affs_root_head *)(bh)->b_data) 23#define AFFS_ROOT_HEAD(bh) ((struct affs_root_head *)(bh)->b_data)
diff --git a/fs/aio.c b/fs/aio.c
index e7f2fad7b4ce..55c4c7656053 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -134,9 +134,9 @@ static int aio_setup_ring(struct kioctx *ctx)
134 info->mmap_size = nr_pages * PAGE_SIZE; 134 info->mmap_size = nr_pages * PAGE_SIZE;
135 dprintk("attempting mmap of %lu bytes\n", info->mmap_size); 135 dprintk("attempting mmap of %lu bytes\n", info->mmap_size);
136 down_write(&ctx->mm->mmap_sem); 136 down_write(&ctx->mm->mmap_sem);
137 info->mmap_base = do_mmap(NULL, 0, info->mmap_size, 137 info->mmap_base = do_mmap_pgoff(NULL, 0, info->mmap_size,
138 PROT_READ|PROT_WRITE, MAP_ANONYMOUS|MAP_PRIVATE, 138 PROT_READ|PROT_WRITE,
139 0); 139 MAP_ANONYMOUS|MAP_PRIVATE, 0);
140 if (IS_ERR((void *)info->mmap_base)) { 140 if (IS_ERR((void *)info->mmap_base)) {
141 up_write(&ctx->mm->mmap_sem); 141 up_write(&ctx->mm->mmap_sem);
142 info->mmap_size = 0; 142 info->mmap_size = 0;
@@ -1446,13 +1446,13 @@ static ssize_t aio_setup_vectored_rw(int type, struct kiocb *kiocb, bool compat)
1446 ret = compat_rw_copy_check_uvector(type, 1446 ret = compat_rw_copy_check_uvector(type,
1447 (struct compat_iovec __user *)kiocb->ki_buf, 1447 (struct compat_iovec __user *)kiocb->ki_buf,
1448 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, 1448 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1449 &kiocb->ki_iovec, 1); 1449 &kiocb->ki_iovec);
1450 else 1450 else
1451#endif 1451#endif
1452 ret = rw_copy_check_uvector(type, 1452 ret = rw_copy_check_uvector(type,
1453 (struct iovec __user *)kiocb->ki_buf, 1453 (struct iovec __user *)kiocb->ki_buf,
1454 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec, 1454 kiocb->ki_nbytes, 1, &kiocb->ki_inline_vec,
1455 &kiocb->ki_iovec, 1); 1455 &kiocb->ki_iovec);
1456 if (ret < 0) 1456 if (ret < 0)
1457 goto out; 1457 goto out;
1458 1458
diff --git a/fs/attr.c b/fs/attr.c
index 584620e5dee5..0da90951d277 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -176,6 +176,11 @@ int notify_change(struct dentry * dentry, struct iattr * attr)
176 return -EPERM; 176 return -EPERM;
177 } 177 }
178 178
179 if ((ia_valid & ATTR_SIZE) && IS_I_VERSION(inode)) {
180 if (attr->ia_size != inode->i_size)
181 inode_inc_iversion(inode);
182 }
183
179 if ((ia_valid & ATTR_MODE)) { 184 if ((ia_valid & ATTR_MODE)) {
180 umode_t amode = attr->ia_mode; 185 umode_t amode = attr->ia_mode;
181 /* Flag setting protected by i_mutex */ 186 /* Flag setting protected by i_mutex */
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index e658dd134b95..1b52956afe33 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -329,7 +329,6 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
329 if (!size) 329 if (!size)
330 return addr; 330 return addr;
331 331
332 down_write(&current->mm->mmap_sem);
333 /* 332 /*
334 * total_size is the size of the ELF (interpreter) image. 333 * total_size is the size of the ELF (interpreter) image.
335 * The _first_ mmap needs to know the full size, otherwise 334 * The _first_ mmap needs to know the full size, otherwise
@@ -340,13 +339,12 @@ static unsigned long elf_map(struct file *filep, unsigned long addr,
340 */ 339 */
341 if (total_size) { 340 if (total_size) {
342 total_size = ELF_PAGEALIGN(total_size); 341 total_size = ELF_PAGEALIGN(total_size);
343 map_addr = do_mmap(filep, addr, total_size, prot, type, off); 342 map_addr = vm_mmap(filep, addr, total_size, prot, type, off);
344 if (!BAD_ADDR(map_addr)) 343 if (!BAD_ADDR(map_addr))
345 do_munmap(current->mm, map_addr+size, total_size-size); 344 vm_munmap(map_addr+size, total_size-size);
346 } else 345 } else
347 map_addr = do_mmap(filep, addr, size, prot, type, off); 346 map_addr = vm_mmap(filep, addr, size, prot, type, off);
348 347
349 up_write(&current->mm->mmap_sem);
350 return(map_addr); 348 return(map_addr);
351} 349}
352 350
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 6b2daf99fab8..178cb70acc26 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -562,7 +562,7 @@ static int load_flat_file(struct linux_binprm * bprm,
562 realdatastart = (unsigned long) -ENOMEM; 562 realdatastart = (unsigned long) -ENOMEM;
563 printk("Unable to allocate RAM for process data, errno %d\n", 563 printk("Unable to allocate RAM for process data, errno %d\n",
564 (int)-realdatastart); 564 (int)-realdatastart);
565 do_munmap(current->mm, textpos, text_len); 565 vm_munmap(textpos, text_len);
566 ret = realdatastart; 566 ret = realdatastart;
567 goto err; 567 goto err;
568 } 568 }
@@ -586,8 +586,8 @@ static int load_flat_file(struct linux_binprm * bprm,
586 } 586 }
587 if (IS_ERR_VALUE(result)) { 587 if (IS_ERR_VALUE(result)) {
588 printk("Unable to read data+bss, errno %d\n", (int)-result); 588 printk("Unable to read data+bss, errno %d\n", (int)-result);
589 do_munmap(current->mm, textpos, text_len); 589 vm_munmap(textpos, text_len);
590 do_munmap(current->mm, realdatastart, len); 590 vm_munmap(realdatastart, len);
591 ret = result; 591 ret = result;
592 goto err; 592 goto err;
593 } 593 }
@@ -654,7 +654,7 @@ static int load_flat_file(struct linux_binprm * bprm,
654 } 654 }
655 if (IS_ERR_VALUE(result)) { 655 if (IS_ERR_VALUE(result)) {
656 printk("Unable to read code+data+bss, errno %d\n",(int)-result); 656 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
657 do_munmap(current->mm, textpos, text_len + data_len + extra + 657 vm_munmap(textpos, text_len + data_len + extra +
658 MAX_SHARED_LIBS * sizeof(unsigned long)); 658 MAX_SHARED_LIBS * sizeof(unsigned long));
659 ret = result; 659 ret = result;
660 goto err; 660 goto err;
diff --git a/fs/btrfs/acl.c b/fs/btrfs/acl.c
index 89b156d85d63..761e2cd8fed1 100644
--- a/fs/btrfs/acl.c
+++ b/fs/btrfs/acl.c
@@ -227,7 +227,11 @@ int btrfs_init_acl(struct btrfs_trans_handle *trans,
227 if (ret > 0) { 227 if (ret > 0) {
228 /* we need an acl */ 228 /* we need an acl */
229 ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS); 229 ret = btrfs_set_acl(trans, inode, acl, ACL_TYPE_ACCESS);
230 } else {
231 cache_no_acl(inode);
230 } 232 }
233 } else {
234 cache_no_acl(inode);
231 } 235 }
232failed: 236failed:
233 posix_acl_release(acl); 237 posix_acl_release(acl);
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index bcec06750232..7301cdb4b2cb 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -24,22 +24,135 @@
24#include "delayed-ref.h" 24#include "delayed-ref.h"
25#include "locking.h" 25#include "locking.h"
26 26
27struct extent_inode_elem {
28 u64 inum;
29 u64 offset;
30 struct extent_inode_elem *next;
31};
32
33static int check_extent_in_eb(struct btrfs_key *key, struct extent_buffer *eb,
34 struct btrfs_file_extent_item *fi,
35 u64 extent_item_pos,
36 struct extent_inode_elem **eie)
37{
38 u64 data_offset;
39 u64 data_len;
40 struct extent_inode_elem *e;
41
42 data_offset = btrfs_file_extent_offset(eb, fi);
43 data_len = btrfs_file_extent_num_bytes(eb, fi);
44
45 if (extent_item_pos < data_offset ||
46 extent_item_pos >= data_offset + data_len)
47 return 1;
48
49 e = kmalloc(sizeof(*e), GFP_NOFS);
50 if (!e)
51 return -ENOMEM;
52
53 e->next = *eie;
54 e->inum = key->objectid;
55 e->offset = key->offset + (extent_item_pos - data_offset);
56 *eie = e;
57
58 return 0;
59}
60
61static int find_extent_in_eb(struct extent_buffer *eb, u64 wanted_disk_byte,
62 u64 extent_item_pos,
63 struct extent_inode_elem **eie)
64{
65 u64 disk_byte;
66 struct btrfs_key key;
67 struct btrfs_file_extent_item *fi;
68 int slot;
69 int nritems;
70 int extent_type;
71 int ret;
72
73 /*
74 * from the shared data ref, we only have the leaf but we need
75 * the key. thus, we must look into all items and see that we
76 * find one (some) with a reference to our extent item.
77 */
78 nritems = btrfs_header_nritems(eb);
79 for (slot = 0; slot < nritems; ++slot) {
80 btrfs_item_key_to_cpu(eb, &key, slot);
81 if (key.type != BTRFS_EXTENT_DATA_KEY)
82 continue;
83 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
84 extent_type = btrfs_file_extent_type(eb, fi);
85 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
86 continue;
87 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
88 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
89 if (disk_byte != wanted_disk_byte)
90 continue;
91
92 ret = check_extent_in_eb(&key, eb, fi, extent_item_pos, eie);
93 if (ret < 0)
94 return ret;
95 }
96
97 return 0;
98}
99
27/* 100/*
28 * this structure records all encountered refs on the way up to the root 101 * this structure records all encountered refs on the way up to the root
29 */ 102 */
30struct __prelim_ref { 103struct __prelim_ref {
31 struct list_head list; 104 struct list_head list;
32 u64 root_id; 105 u64 root_id;
33 struct btrfs_key key; 106 struct btrfs_key key_for_search;
34 int level; 107 int level;
35 int count; 108 int count;
109 struct extent_inode_elem *inode_list;
36 u64 parent; 110 u64 parent;
37 u64 wanted_disk_byte; 111 u64 wanted_disk_byte;
38}; 112};
39 113
114/*
115 * the rules for all callers of this function are:
116 * - obtaining the parent is the goal
117 * - if you add a key, you must know that it is a correct key
118 * - if you cannot add the parent or a correct key, then we will look into the
119 * block later to set a correct key
120 *
121 * delayed refs
122 * ============
123 * backref type | shared | indirect | shared | indirect
124 * information | tree | tree | data | data
125 * --------------------+--------+----------+--------+----------
126 * parent logical | y | - | - | -
127 * key to resolve | - | y | y | y
128 * tree block logical | - | - | - | -
129 * root for resolving | y | y | y | y
130 *
131 * - column 1: we've the parent -> done
132 * - column 2, 3, 4: we use the key to find the parent
133 *
134 * on disk refs (inline or keyed)
135 * ==============================
136 * backref type | shared | indirect | shared | indirect
137 * information | tree | tree | data | data
138 * --------------------+--------+----------+--------+----------
139 * parent logical | y | - | y | -
140 * key to resolve | - | - | - | y
141 * tree block logical | y | y | y | y
142 * root for resolving | - | y | y | y
143 *
144 * - column 1, 3: we've the parent -> done
145 * - column 2: we take the first key from the block to find the parent
146 * (see __add_missing_keys)
147 * - column 4: we use the key to find the parent
148 *
149 * additional information that's available but not required to find the parent
150 * block might help in merging entries to gain some speed.
151 */
152
40static int __add_prelim_ref(struct list_head *head, u64 root_id, 153static int __add_prelim_ref(struct list_head *head, u64 root_id,
41 struct btrfs_key *key, int level, u64 parent, 154 struct btrfs_key *key, int level,
42 u64 wanted_disk_byte, int count) 155 u64 parent, u64 wanted_disk_byte, int count)
43{ 156{
44 struct __prelim_ref *ref; 157 struct __prelim_ref *ref;
45 158
@@ -50,10 +163,11 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
50 163
51 ref->root_id = root_id; 164 ref->root_id = root_id;
52 if (key) 165 if (key)
53 ref->key = *key; 166 ref->key_for_search = *key;
54 else 167 else
55 memset(&ref->key, 0, sizeof(ref->key)); 168 memset(&ref->key_for_search, 0, sizeof(ref->key_for_search));
56 169
170 ref->inode_list = NULL;
57 ref->level = level; 171 ref->level = level;
58 ref->count = count; 172 ref->count = count;
59 ref->parent = parent; 173 ref->parent = parent;
@@ -64,52 +178,75 @@ static int __add_prelim_ref(struct list_head *head, u64 root_id,
64} 178}
65 179
66static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path, 180static int add_all_parents(struct btrfs_root *root, struct btrfs_path *path,
67 struct ulist *parents, 181 struct ulist *parents, int level,
68 struct extent_buffer *eb, int level, 182 struct btrfs_key *key_for_search, u64 time_seq,
69 u64 wanted_objectid, u64 wanted_disk_byte) 183 u64 wanted_disk_byte,
184 const u64 *extent_item_pos)
70{ 185{
71 int ret; 186 int ret = 0;
72 int slot; 187 int slot;
73 struct btrfs_file_extent_item *fi; 188 struct extent_buffer *eb;
74 struct btrfs_key key; 189 struct btrfs_key key;
190 struct btrfs_file_extent_item *fi;
191 struct extent_inode_elem *eie = NULL;
75 u64 disk_byte; 192 u64 disk_byte;
76 193
77add_parent: 194 if (level != 0) {
78 ret = ulist_add(parents, eb->start, 0, GFP_NOFS); 195 eb = path->nodes[level];
79 if (ret < 0) 196 ret = ulist_add(parents, eb->start, 0, GFP_NOFS);
80 return ret; 197 if (ret < 0)
81 198 return ret;
82 if (level != 0)
83 return 0; 199 return 0;
200 }
84 201
85 /* 202 /*
86 * if the current leaf is full with EXTENT_DATA items, we must 203 * We normally enter this function with the path already pointing to
87 * check the next one if that holds a reference as well. 204 * the first item to check. But sometimes, we may enter it with
88 * ref->count cannot be used to skip this check. 205 * slot==nritems. In that case, go to the next leaf before we continue.
89 * repeat this until we don't find any additional EXTENT_DATA items.
90 */ 206 */
91 while (1) { 207 if (path->slots[0] >= btrfs_header_nritems(path->nodes[0]))
92 ret = btrfs_next_leaf(root, path); 208 ret = btrfs_next_old_leaf(root, path, time_seq);
93 if (ret < 0)
94 return ret;
95 if (ret)
96 return 0;
97 209
210 while (!ret) {
98 eb = path->nodes[0]; 211 eb = path->nodes[0];
99 for (slot = 0; slot < btrfs_header_nritems(eb); ++slot) { 212 slot = path->slots[0];
100 btrfs_item_key_to_cpu(eb, &key, slot); 213
101 if (key.objectid != wanted_objectid || 214 btrfs_item_key_to_cpu(eb, &key, slot);
102 key.type != BTRFS_EXTENT_DATA_KEY) 215
103 return 0; 216 if (key.objectid != key_for_search->objectid ||
104 fi = btrfs_item_ptr(eb, slot, 217 key.type != BTRFS_EXTENT_DATA_KEY)
105 struct btrfs_file_extent_item); 218 break;
106 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi); 219
107 if (disk_byte == wanted_disk_byte) 220 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
108 goto add_parent; 221 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
222
223 if (disk_byte == wanted_disk_byte) {
224 eie = NULL;
225 if (extent_item_pos) {
226 ret = check_extent_in_eb(&key, eb, fi,
227 *extent_item_pos,
228 &eie);
229 if (ret < 0)
230 break;
231 }
232 if (!ret) {
233 ret = ulist_add(parents, eb->start,
234 (unsigned long)eie, GFP_NOFS);
235 if (ret < 0)
236 break;
237 if (!extent_item_pos) {
238 ret = btrfs_next_old_leaf(root, path,
239 time_seq);
240 continue;
241 }
242 }
109 } 243 }
244 ret = btrfs_next_old_item(root, path, time_seq);
110 } 245 }
111 246
112 return 0; 247 if (ret > 0)
248 ret = 0;
249 return ret;
113} 250}
114 251
115/* 252/*
@@ -118,13 +255,14 @@ add_parent:
118 */ 255 */
119static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info, 256static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
120 int search_commit_root, 257 int search_commit_root,
258 u64 time_seq,
121 struct __prelim_ref *ref, 259 struct __prelim_ref *ref,
122 struct ulist *parents) 260 struct ulist *parents,
261 const u64 *extent_item_pos)
123{ 262{
124 struct btrfs_path *path; 263 struct btrfs_path *path;
125 struct btrfs_root *root; 264 struct btrfs_root *root;
126 struct btrfs_key root_key; 265 struct btrfs_key root_key;
127 struct btrfs_key key = {0};
128 struct extent_buffer *eb; 266 struct extent_buffer *eb;
129 int ret = 0; 267 int ret = 0;
130 int root_level; 268 int root_level;
@@ -152,12 +290,13 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
152 goto out; 290 goto out;
153 291
154 path->lowest_level = level; 292 path->lowest_level = level;
155 ret = btrfs_search_slot(NULL, root, &ref->key, path, 0, 0); 293 ret = btrfs_search_old_slot(root, &ref->key_for_search, path, time_seq);
156 pr_debug("search slot in root %llu (level %d, ref count %d) returned " 294 pr_debug("search slot in root %llu (level %d, ref count %d) returned "
157 "%d for key (%llu %u %llu)\n", 295 "%d for key (%llu %u %llu)\n",
158 (unsigned long long)ref->root_id, level, ref->count, ret, 296 (unsigned long long)ref->root_id, level, ref->count, ret,
159 (unsigned long long)ref->key.objectid, ref->key.type, 297 (unsigned long long)ref->key_for_search.objectid,
160 (unsigned long long)ref->key.offset); 298 ref->key_for_search.type,
299 (unsigned long long)ref->key_for_search.offset);
161 if (ret < 0) 300 if (ret < 0)
162 goto out; 301 goto out;
163 302
@@ -168,20 +307,9 @@ static int __resolve_indirect_ref(struct btrfs_fs_info *fs_info,
168 goto out; 307 goto out;
169 } 308 }
170 309
171 if (level == 0) { 310 ret = add_all_parents(root, path, parents, level, &ref->key_for_search,
172 if (ret == 1 && path->slots[0] >= btrfs_header_nritems(eb)) { 311 time_seq, ref->wanted_disk_byte,
173 ret = btrfs_next_leaf(root, path); 312 extent_item_pos);
174 if (ret)
175 goto out;
176 eb = path->nodes[0];
177 }
178
179 btrfs_item_key_to_cpu(eb, &key, path->slots[0]);
180 }
181
182 /* the last two parameters will only be used for level == 0 */
183 ret = add_all_parents(root, path, parents, eb, level, key.objectid,
184 ref->wanted_disk_byte);
185out: 313out:
186 btrfs_free_path(path); 314 btrfs_free_path(path);
187 return ret; 315 return ret;
@@ -191,8 +319,9 @@ out:
191 * resolve all indirect backrefs from the list 319 * resolve all indirect backrefs from the list
192 */ 320 */
193static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info, 321static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
194 int search_commit_root, 322 int search_commit_root, u64 time_seq,
195 struct list_head *head) 323 struct list_head *head,
324 const u64 *extent_item_pos)
196{ 325{
197 int err; 326 int err;
198 int ret = 0; 327 int ret = 0;
@@ -201,6 +330,7 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
201 struct __prelim_ref *new_ref; 330 struct __prelim_ref *new_ref;
202 struct ulist *parents; 331 struct ulist *parents;
203 struct ulist_node *node; 332 struct ulist_node *node;
333 struct ulist_iterator uiter;
204 334
205 parents = ulist_alloc(GFP_NOFS); 335 parents = ulist_alloc(GFP_NOFS);
206 if (!parents) 336 if (!parents)
@@ -217,7 +347,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
217 if (ref->count == 0) 347 if (ref->count == 0)
218 continue; 348 continue;
219 err = __resolve_indirect_ref(fs_info, search_commit_root, 349 err = __resolve_indirect_ref(fs_info, search_commit_root,
220 ref, parents); 350 time_seq, ref, parents,
351 extent_item_pos);
221 if (err) { 352 if (err) {
222 if (ret == 0) 353 if (ret == 0)
223 ret = err; 354 ret = err;
@@ -225,11 +356,14 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
225 } 356 }
226 357
227 /* we put the first parent into the ref at hand */ 358 /* we put the first parent into the ref at hand */
228 node = ulist_next(parents, NULL); 359 ULIST_ITER_INIT(&uiter);
360 node = ulist_next(parents, &uiter);
229 ref->parent = node ? node->val : 0; 361 ref->parent = node ? node->val : 0;
362 ref->inode_list =
363 node ? (struct extent_inode_elem *)node->aux : 0;
230 364
231 /* additional parents require new refs being added here */ 365 /* additional parents require new refs being added here */
232 while ((node = ulist_next(parents, node))) { 366 while ((node = ulist_next(parents, &uiter))) {
233 new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS); 367 new_ref = kmalloc(sizeof(*new_ref), GFP_NOFS);
234 if (!new_ref) { 368 if (!new_ref) {
235 ret = -ENOMEM; 369 ret = -ENOMEM;
@@ -237,6 +371,8 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
237 } 371 }
238 memcpy(new_ref, ref, sizeof(*ref)); 372 memcpy(new_ref, ref, sizeof(*ref));
239 new_ref->parent = node->val; 373 new_ref->parent = node->val;
374 new_ref->inode_list =
375 (struct extent_inode_elem *)node->aux;
240 list_add(&new_ref->list, &ref->list); 376 list_add(&new_ref->list, &ref->list);
241 } 377 }
242 ulist_reinit(parents); 378 ulist_reinit(parents);
@@ -246,10 +382,65 @@ static int __resolve_indirect_refs(struct btrfs_fs_info *fs_info,
246 return ret; 382 return ret;
247} 383}
248 384
385static inline int ref_for_same_block(struct __prelim_ref *ref1,
386 struct __prelim_ref *ref2)
387{
388 if (ref1->level != ref2->level)
389 return 0;
390 if (ref1->root_id != ref2->root_id)
391 return 0;
392 if (ref1->key_for_search.type != ref2->key_for_search.type)
393 return 0;
394 if (ref1->key_for_search.objectid != ref2->key_for_search.objectid)
395 return 0;
396 if (ref1->key_for_search.offset != ref2->key_for_search.offset)
397 return 0;
398 if (ref1->parent != ref2->parent)
399 return 0;
400
401 return 1;
402}
403
404/*
405 * read tree blocks and add keys where required.
406 */
407static int __add_missing_keys(struct btrfs_fs_info *fs_info,
408 struct list_head *head)
409{
410 struct list_head *pos;
411 struct extent_buffer *eb;
412
413 list_for_each(pos, head) {
414 struct __prelim_ref *ref;
415 ref = list_entry(pos, struct __prelim_ref, list);
416
417 if (ref->parent)
418 continue;
419 if (ref->key_for_search.type)
420 continue;
421 BUG_ON(!ref->wanted_disk_byte);
422 eb = read_tree_block(fs_info->tree_root, ref->wanted_disk_byte,
423 fs_info->tree_root->leafsize, 0);
424 BUG_ON(!eb);
425 btrfs_tree_read_lock(eb);
426 if (btrfs_header_level(eb) == 0)
427 btrfs_item_key_to_cpu(eb, &ref->key_for_search, 0);
428 else
429 btrfs_node_key_to_cpu(eb, &ref->key_for_search, 0);
430 btrfs_tree_read_unlock(eb);
431 free_extent_buffer(eb);
432 }
433 return 0;
434}
435
249/* 436/*
250 * merge two lists of backrefs and adjust counts accordingly 437 * merge two lists of backrefs and adjust counts accordingly
251 * 438 *
252 * mode = 1: merge identical keys, if key is set 439 * mode = 1: merge identical keys, if key is set
440 * FIXME: if we add more keys in __add_prelim_ref, we can merge more here.
441 * additionally, we could even add a key range for the blocks we
442 * looked into to merge even more (-> replace unresolved refs by those
443 * having a parent).
253 * mode = 2: merge identical parents 444 * mode = 2: merge identical parents
254 */ 445 */
255static int __merge_refs(struct list_head *head, int mode) 446static int __merge_refs(struct list_head *head, int mode)
@@ -263,20 +454,21 @@ static int __merge_refs(struct list_head *head, int mode)
263 454
264 ref1 = list_entry(pos1, struct __prelim_ref, list); 455 ref1 = list_entry(pos1, struct __prelim_ref, list);
265 456
266 if (mode == 1 && ref1->key.type == 0)
267 continue;
268 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head; 457 for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
269 pos2 = n2, n2 = pos2->next) { 458 pos2 = n2, n2 = pos2->next) {
270 struct __prelim_ref *ref2; 459 struct __prelim_ref *ref2;
460 struct __prelim_ref *xchg;
271 461
272 ref2 = list_entry(pos2, struct __prelim_ref, list); 462 ref2 = list_entry(pos2, struct __prelim_ref, list);
273 463
274 if (mode == 1) { 464 if (mode == 1) {
275 if (memcmp(&ref1->key, &ref2->key, 465 if (!ref_for_same_block(ref1, ref2))
276 sizeof(ref1->key)) ||
277 ref1->level != ref2->level ||
278 ref1->root_id != ref2->root_id)
279 continue; 466 continue;
467 if (!ref1->parent && ref2->parent) {
468 xchg = ref1;
469 ref1 = ref2;
470 ref2 = xchg;
471 }
280 ref1->count += ref2->count; 472 ref1->count += ref2->count;
281 } else { 473 } else {
282 if (ref1->parent != ref2->parent) 474 if (ref1->parent != ref2->parent)
@@ -296,16 +488,17 @@ static int __merge_refs(struct list_head *head, int mode)
296 * smaller or equal that seq to the list 488 * smaller or equal that seq to the list
297 */ 489 */
298static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq, 490static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
299 struct btrfs_key *info_key,
300 struct list_head *prefs) 491 struct list_head *prefs)
301{ 492{
302 struct btrfs_delayed_extent_op *extent_op = head->extent_op; 493 struct btrfs_delayed_extent_op *extent_op = head->extent_op;
303 struct rb_node *n = &head->node.rb_node; 494 struct rb_node *n = &head->node.rb_node;
495 struct btrfs_key key;
496 struct btrfs_key op_key = {0};
304 int sgn; 497 int sgn;
305 int ret = 0; 498 int ret = 0;
306 499
307 if (extent_op && extent_op->update_key) 500 if (extent_op && extent_op->update_key)
308 btrfs_disk_key_to_cpu(info_key, &extent_op->key); 501 btrfs_disk_key_to_cpu(&op_key, &extent_op->key);
309 502
310 while ((n = rb_prev(n))) { 503 while ((n = rb_prev(n))) {
311 struct btrfs_delayed_ref_node *node; 504 struct btrfs_delayed_ref_node *node;
@@ -337,7 +530,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
337 struct btrfs_delayed_tree_ref *ref; 530 struct btrfs_delayed_tree_ref *ref;
338 531
339 ref = btrfs_delayed_node_to_tree_ref(node); 532 ref = btrfs_delayed_node_to_tree_ref(node);
340 ret = __add_prelim_ref(prefs, ref->root, info_key, 533 ret = __add_prelim_ref(prefs, ref->root, &op_key,
341 ref->level + 1, 0, node->bytenr, 534 ref->level + 1, 0, node->bytenr,
342 node->ref_mod * sgn); 535 node->ref_mod * sgn);
343 break; 536 break;
@@ -346,7 +539,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
346 struct btrfs_delayed_tree_ref *ref; 539 struct btrfs_delayed_tree_ref *ref;
347 540
348 ref = btrfs_delayed_node_to_tree_ref(node); 541 ref = btrfs_delayed_node_to_tree_ref(node);
349 ret = __add_prelim_ref(prefs, ref->root, info_key, 542 ret = __add_prelim_ref(prefs, ref->root, NULL,
350 ref->level + 1, ref->parent, 543 ref->level + 1, ref->parent,
351 node->bytenr, 544 node->bytenr,
352 node->ref_mod * sgn); 545 node->ref_mod * sgn);
@@ -354,8 +547,6 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
354 } 547 }
355 case BTRFS_EXTENT_DATA_REF_KEY: { 548 case BTRFS_EXTENT_DATA_REF_KEY: {
356 struct btrfs_delayed_data_ref *ref; 549 struct btrfs_delayed_data_ref *ref;
357 struct btrfs_key key;
358
359 ref = btrfs_delayed_node_to_data_ref(node); 550 ref = btrfs_delayed_node_to_data_ref(node);
360 551
361 key.objectid = ref->objectid; 552 key.objectid = ref->objectid;
@@ -368,7 +559,6 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
368 } 559 }
369 case BTRFS_SHARED_DATA_REF_KEY: { 560 case BTRFS_SHARED_DATA_REF_KEY: {
370 struct btrfs_delayed_data_ref *ref; 561 struct btrfs_delayed_data_ref *ref;
371 struct btrfs_key key;
372 562
373 ref = btrfs_delayed_node_to_data_ref(node); 563 ref = btrfs_delayed_node_to_data_ref(node);
374 564
@@ -394,8 +584,7 @@ static int __add_delayed_refs(struct btrfs_delayed_ref_head *head, u64 seq,
394 */ 584 */
395static int __add_inline_refs(struct btrfs_fs_info *fs_info, 585static int __add_inline_refs(struct btrfs_fs_info *fs_info,
396 struct btrfs_path *path, u64 bytenr, 586 struct btrfs_path *path, u64 bytenr,
397 struct btrfs_key *info_key, int *info_level, 587 int *info_level, struct list_head *prefs)
398 struct list_head *prefs)
399{ 588{
400 int ret = 0; 589 int ret = 0;
401 int slot; 590 int slot;
@@ -411,7 +600,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
411 * enumerate all inline refs 600 * enumerate all inline refs
412 */ 601 */
413 leaf = path->nodes[0]; 602 leaf = path->nodes[0];
414 slot = path->slots[0] - 1; 603 slot = path->slots[0];
415 604
416 item_size = btrfs_item_size_nr(leaf, slot); 605 item_size = btrfs_item_size_nr(leaf, slot);
417 BUG_ON(item_size < sizeof(*ei)); 606 BUG_ON(item_size < sizeof(*ei));
@@ -424,12 +613,9 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
424 613
425 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) { 614 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
426 struct btrfs_tree_block_info *info; 615 struct btrfs_tree_block_info *info;
427 struct btrfs_disk_key disk_key;
428 616
429 info = (struct btrfs_tree_block_info *)ptr; 617 info = (struct btrfs_tree_block_info *)ptr;
430 *info_level = btrfs_tree_block_level(leaf, info); 618 *info_level = btrfs_tree_block_level(leaf, info);
431 btrfs_tree_block_key(leaf, info, &disk_key);
432 btrfs_disk_key_to_cpu(info_key, &disk_key);
433 ptr += sizeof(struct btrfs_tree_block_info); 619 ptr += sizeof(struct btrfs_tree_block_info);
434 BUG_ON(ptr > end); 620 BUG_ON(ptr > end);
435 } else { 621 } else {
@@ -447,7 +633,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
447 633
448 switch (type) { 634 switch (type) {
449 case BTRFS_SHARED_BLOCK_REF_KEY: 635 case BTRFS_SHARED_BLOCK_REF_KEY:
450 ret = __add_prelim_ref(prefs, 0, info_key, 636 ret = __add_prelim_ref(prefs, 0, NULL,
451 *info_level + 1, offset, 637 *info_level + 1, offset,
452 bytenr, 1); 638 bytenr, 1);
453 break; 639 break;
@@ -462,8 +648,9 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
462 break; 648 break;
463 } 649 }
464 case BTRFS_TREE_BLOCK_REF_KEY: 650 case BTRFS_TREE_BLOCK_REF_KEY:
465 ret = __add_prelim_ref(prefs, offset, info_key, 651 ret = __add_prelim_ref(prefs, offset, NULL,
466 *info_level + 1, 0, bytenr, 1); 652 *info_level + 1, 0,
653 bytenr, 1);
467 break; 654 break;
468 case BTRFS_EXTENT_DATA_REF_KEY: { 655 case BTRFS_EXTENT_DATA_REF_KEY: {
469 struct btrfs_extent_data_ref *dref; 656 struct btrfs_extent_data_ref *dref;
@@ -477,8 +664,8 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
477 key.type = BTRFS_EXTENT_DATA_KEY; 664 key.type = BTRFS_EXTENT_DATA_KEY;
478 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 665 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
479 root = btrfs_extent_data_ref_root(leaf, dref); 666 root = btrfs_extent_data_ref_root(leaf, dref);
480 ret = __add_prelim_ref(prefs, root, &key, 0, 0, bytenr, 667 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
481 count); 668 bytenr, count);
482 break; 669 break;
483 } 670 }
484 default: 671 default:
@@ -496,8 +683,7 @@ static int __add_inline_refs(struct btrfs_fs_info *fs_info,
496 */ 683 */
497static int __add_keyed_refs(struct btrfs_fs_info *fs_info, 684static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
498 struct btrfs_path *path, u64 bytenr, 685 struct btrfs_path *path, u64 bytenr,
499 struct btrfs_key *info_key, int info_level, 686 int info_level, struct list_head *prefs)
500 struct list_head *prefs)
501{ 687{
502 struct btrfs_root *extent_root = fs_info->extent_root; 688 struct btrfs_root *extent_root = fs_info->extent_root;
503 int ret; 689 int ret;
@@ -527,7 +713,7 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
527 713
528 switch (key.type) { 714 switch (key.type) {
529 case BTRFS_SHARED_BLOCK_REF_KEY: 715 case BTRFS_SHARED_BLOCK_REF_KEY:
530 ret = __add_prelim_ref(prefs, 0, info_key, 716 ret = __add_prelim_ref(prefs, 0, NULL,
531 info_level + 1, key.offset, 717 info_level + 1, key.offset,
532 bytenr, 1); 718 bytenr, 1);
533 break; 719 break;
@@ -543,8 +729,9 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
543 break; 729 break;
544 } 730 }
545 case BTRFS_TREE_BLOCK_REF_KEY: 731 case BTRFS_TREE_BLOCK_REF_KEY:
546 ret = __add_prelim_ref(prefs, key.offset, info_key, 732 ret = __add_prelim_ref(prefs, key.offset, NULL,
547 info_level + 1, 0, bytenr, 1); 733 info_level + 1, 0,
734 bytenr, 1);
548 break; 735 break;
549 case BTRFS_EXTENT_DATA_REF_KEY: { 736 case BTRFS_EXTENT_DATA_REF_KEY: {
550 struct btrfs_extent_data_ref *dref; 737 struct btrfs_extent_data_ref *dref;
@@ -560,7 +747,7 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
560 key.offset = btrfs_extent_data_ref_offset(leaf, dref); 747 key.offset = btrfs_extent_data_ref_offset(leaf, dref);
561 root = btrfs_extent_data_ref_root(leaf, dref); 748 root = btrfs_extent_data_ref_root(leaf, dref);
562 ret = __add_prelim_ref(prefs, root, &key, 0, 0, 749 ret = __add_prelim_ref(prefs, root, &key, 0, 0,
563 bytenr, count); 750 bytenr, count);
564 break; 751 break;
565 } 752 }
566 default: 753 default:
@@ -582,11 +769,12 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
582 */ 769 */
583static int find_parent_nodes(struct btrfs_trans_handle *trans, 770static int find_parent_nodes(struct btrfs_trans_handle *trans,
584 struct btrfs_fs_info *fs_info, u64 bytenr, 771 struct btrfs_fs_info *fs_info, u64 bytenr,
585 u64 seq, struct ulist *refs, struct ulist *roots) 772 u64 delayed_ref_seq, u64 time_seq,
773 struct ulist *refs, struct ulist *roots,
774 const u64 *extent_item_pos)
586{ 775{
587 struct btrfs_key key; 776 struct btrfs_key key;
588 struct btrfs_path *path; 777 struct btrfs_path *path;
589 struct btrfs_key info_key = { 0 };
590 struct btrfs_delayed_ref_root *delayed_refs = NULL; 778 struct btrfs_delayed_ref_root *delayed_refs = NULL;
591 struct btrfs_delayed_ref_head *head; 779 struct btrfs_delayed_ref_head *head;
592 int info_level = 0; 780 int info_level = 0;
@@ -645,7 +833,7 @@ again:
645 btrfs_put_delayed_ref(&head->node); 833 btrfs_put_delayed_ref(&head->node);
646 goto again; 834 goto again;
647 } 835 }
648 ret = __add_delayed_refs(head, seq, &info_key, 836 ret = __add_delayed_refs(head, delayed_ref_seq,
649 &prefs_delayed); 837 &prefs_delayed);
650 if (ret) { 838 if (ret) {
651 spin_unlock(&delayed_refs->lock); 839 spin_unlock(&delayed_refs->lock);
@@ -659,16 +847,17 @@ again:
659 struct extent_buffer *leaf; 847 struct extent_buffer *leaf;
660 int slot; 848 int slot;
661 849
850 path->slots[0]--;
662 leaf = path->nodes[0]; 851 leaf = path->nodes[0];
663 slot = path->slots[0] - 1; 852 slot = path->slots[0];
664 btrfs_item_key_to_cpu(leaf, &key, slot); 853 btrfs_item_key_to_cpu(leaf, &key, slot);
665 if (key.objectid == bytenr && 854 if (key.objectid == bytenr &&
666 key.type == BTRFS_EXTENT_ITEM_KEY) { 855 key.type == BTRFS_EXTENT_ITEM_KEY) {
667 ret = __add_inline_refs(fs_info, path, bytenr, 856 ret = __add_inline_refs(fs_info, path, bytenr,
668 &info_key, &info_level, &prefs); 857 &info_level, &prefs);
669 if (ret) 858 if (ret)
670 goto out; 859 goto out;
671 ret = __add_keyed_refs(fs_info, path, bytenr, &info_key, 860 ret = __add_keyed_refs(fs_info, path, bytenr,
672 info_level, &prefs); 861 info_level, &prefs);
673 if (ret) 862 if (ret)
674 goto out; 863 goto out;
@@ -676,21 +865,18 @@ again:
676 } 865 }
677 btrfs_release_path(path); 866 btrfs_release_path(path);
678 867
679 /*
680 * when adding the delayed refs above, the info_key might not have
681 * been known yet. Go over the list and replace the missing keys
682 */
683 list_for_each_entry(ref, &prefs_delayed, list) {
684 if ((ref->key.offset | ref->key.type | ref->key.objectid) == 0)
685 memcpy(&ref->key, &info_key, sizeof(ref->key));
686 }
687 list_splice_init(&prefs_delayed, &prefs); 868 list_splice_init(&prefs_delayed, &prefs);
688 869
870 ret = __add_missing_keys(fs_info, &prefs);
871 if (ret)
872 goto out;
873
689 ret = __merge_refs(&prefs, 1); 874 ret = __merge_refs(&prefs, 1);
690 if (ret) 875 if (ret)
691 goto out; 876 goto out;
692 877
693 ret = __resolve_indirect_refs(fs_info, search_commit_root, &prefs); 878 ret = __resolve_indirect_refs(fs_info, search_commit_root, time_seq,
879 &prefs, extent_item_pos);
694 if (ret) 880 if (ret)
695 goto out; 881 goto out;
696 882
@@ -709,7 +895,33 @@ again:
709 BUG_ON(ret < 0); 895 BUG_ON(ret < 0);
710 } 896 }
711 if (ref->count && ref->parent) { 897 if (ref->count && ref->parent) {
712 ret = ulist_add(refs, ref->parent, 0, GFP_NOFS); 898 struct extent_inode_elem *eie = NULL;
899 if (extent_item_pos && !ref->inode_list) {
900 u32 bsz;
901 struct extent_buffer *eb;
902 bsz = btrfs_level_size(fs_info->extent_root,
903 info_level);
904 eb = read_tree_block(fs_info->extent_root,
905 ref->parent, bsz, 0);
906 BUG_ON(!eb);
907 ret = find_extent_in_eb(eb, bytenr,
908 *extent_item_pos, &eie);
909 ref->inode_list = eie;
910 free_extent_buffer(eb);
911 }
912 ret = ulist_add_merge(refs, ref->parent,
913 (unsigned long)ref->inode_list,
914 (unsigned long *)&eie, GFP_NOFS);
915 if (!ret && extent_item_pos) {
916 /*
917 * we've recorded that parent, so we must extend
918 * its inode list here
919 */
920 BUG_ON(!eie);
921 while (eie->next)
922 eie = eie->next;
923 eie->next = ref->inode_list;
924 }
713 BUG_ON(ret < 0); 925 BUG_ON(ret < 0);
714 } 926 }
715 kfree(ref); 927 kfree(ref);
@@ -734,6 +946,28 @@ out:
734 return ret; 946 return ret;
735} 947}
736 948
949static void free_leaf_list(struct ulist *blocks)
950{
951 struct ulist_node *node = NULL;
952 struct extent_inode_elem *eie;
953 struct extent_inode_elem *eie_next;
954 struct ulist_iterator uiter;
955
956 ULIST_ITER_INIT(&uiter);
957 while ((node = ulist_next(blocks, &uiter))) {
958 if (!node->aux)
959 continue;
960 eie = (struct extent_inode_elem *)node->aux;
961 for (; eie; eie = eie_next) {
962 eie_next = eie->next;
963 kfree(eie);
964 }
965 node->aux = 0;
966 }
967
968 ulist_free(blocks);
969}
970
737/* 971/*
738 * Finds all leafs with a reference to the specified combination of bytenr and 972 * Finds all leafs with a reference to the specified combination of bytenr and
739 * offset. key_list_head will point to a list of corresponding keys (caller must 973 * offset. key_list_head will point to a list of corresponding keys (caller must
@@ -744,7 +978,9 @@ out:
744 */ 978 */
745static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans, 979static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
746 struct btrfs_fs_info *fs_info, u64 bytenr, 980 struct btrfs_fs_info *fs_info, u64 bytenr,
747 u64 num_bytes, u64 seq, struct ulist **leafs) 981 u64 delayed_ref_seq, u64 time_seq,
982 struct ulist **leafs,
983 const u64 *extent_item_pos)
748{ 984{
749 struct ulist *tmp; 985 struct ulist *tmp;
750 int ret; 986 int ret;
@@ -758,11 +994,12 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
758 return -ENOMEM; 994 return -ENOMEM;
759 } 995 }
760 996
761 ret = find_parent_nodes(trans, fs_info, bytenr, seq, *leafs, tmp); 997 ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
998 time_seq, *leafs, tmp, extent_item_pos);
762 ulist_free(tmp); 999 ulist_free(tmp);
763 1000
764 if (ret < 0 && ret != -ENOENT) { 1001 if (ret < 0 && ret != -ENOENT) {
765 ulist_free(*leafs); 1002 free_leaf_list(*leafs);
766 return ret; 1003 return ret;
767 } 1004 }
768 1005
@@ -784,10 +1021,12 @@ static int btrfs_find_all_leafs(struct btrfs_trans_handle *trans,
784 */ 1021 */
785int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 1022int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
786 struct btrfs_fs_info *fs_info, u64 bytenr, 1023 struct btrfs_fs_info *fs_info, u64 bytenr,
787 u64 num_bytes, u64 seq, struct ulist **roots) 1024 u64 delayed_ref_seq, u64 time_seq,
1025 struct ulist **roots)
788{ 1026{
789 struct ulist *tmp; 1027 struct ulist *tmp;
790 struct ulist_node *node = NULL; 1028 struct ulist_node *node = NULL;
1029 struct ulist_iterator uiter;
791 int ret; 1030 int ret;
792 1031
793 tmp = ulist_alloc(GFP_NOFS); 1032 tmp = ulist_alloc(GFP_NOFS);
@@ -799,15 +1038,16 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
799 return -ENOMEM; 1038 return -ENOMEM;
800 } 1039 }
801 1040
1041 ULIST_ITER_INIT(&uiter);
802 while (1) { 1042 while (1) {
803 ret = find_parent_nodes(trans, fs_info, bytenr, seq, 1043 ret = find_parent_nodes(trans, fs_info, bytenr, delayed_ref_seq,
804 tmp, *roots); 1044 time_seq, tmp, *roots, NULL);
805 if (ret < 0 && ret != -ENOENT) { 1045 if (ret < 0 && ret != -ENOENT) {
806 ulist_free(tmp); 1046 ulist_free(tmp);
807 ulist_free(*roots); 1047 ulist_free(*roots);
808 return ret; 1048 return ret;
809 } 1049 }
810 node = ulist_next(tmp, node); 1050 node = ulist_next(tmp, &uiter);
811 if (!node) 1051 if (!node)
812 break; 1052 break;
813 bytenr = node->val; 1053 bytenr = node->val;
@@ -1093,67 +1333,25 @@ int tree_backref_for_extent(unsigned long *ptr, struct extent_buffer *eb,
1093 return 0; 1333 return 0;
1094} 1334}
1095 1335
1096static int iterate_leaf_refs(struct btrfs_fs_info *fs_info, u64 logical, 1336static int iterate_leaf_refs(struct extent_inode_elem *inode_list,
1097 u64 orig_extent_item_objectid, 1337 u64 root, u64 extent_item_objectid,
1098 u64 extent_item_pos, u64 root,
1099 iterate_extent_inodes_t *iterate, void *ctx) 1338 iterate_extent_inodes_t *iterate, void *ctx)
1100{ 1339{
1101 u64 disk_byte; 1340 struct extent_inode_elem *eie;
1102 struct btrfs_key key;
1103 struct btrfs_file_extent_item *fi;
1104 struct extent_buffer *eb;
1105 int slot;
1106 int nritems;
1107 int ret = 0; 1341 int ret = 0;
1108 int extent_type;
1109 u64 data_offset;
1110 u64 data_len;
1111
1112 eb = read_tree_block(fs_info->tree_root, logical,
1113 fs_info->tree_root->leafsize, 0);
1114 if (!eb)
1115 return -EIO;
1116
1117 /*
1118 * from the shared data ref, we only have the leaf but we need
1119 * the key. thus, we must look into all items and see that we
1120 * find one (some) with a reference to our extent item.
1121 */
1122 nritems = btrfs_header_nritems(eb);
1123 for (slot = 0; slot < nritems; ++slot) {
1124 btrfs_item_key_to_cpu(eb, &key, slot);
1125 if (key.type != BTRFS_EXTENT_DATA_KEY)
1126 continue;
1127 fi = btrfs_item_ptr(eb, slot, struct btrfs_file_extent_item);
1128 extent_type = btrfs_file_extent_type(eb, fi);
1129 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
1130 continue;
1131 /* don't skip BTRFS_FILE_EXTENT_PREALLOC, we can handle that */
1132 disk_byte = btrfs_file_extent_disk_bytenr(eb, fi);
1133 if (disk_byte != orig_extent_item_objectid)
1134 continue;
1135
1136 data_offset = btrfs_file_extent_offset(eb, fi);
1137 data_len = btrfs_file_extent_num_bytes(eb, fi);
1138
1139 if (extent_item_pos < data_offset ||
1140 extent_item_pos >= data_offset + data_len)
1141 continue;
1142 1342
1343 for (eie = inode_list; eie; eie = eie->next) {
1143 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), " 1344 pr_debug("ref for %llu resolved, key (%llu EXTEND_DATA %llu), "
1144 "root %llu\n", orig_extent_item_objectid, 1345 "root %llu\n", extent_item_objectid,
1145 key.objectid, key.offset, root); 1346 eie->inum, eie->offset, root);
1146 ret = iterate(key.objectid, 1347 ret = iterate(eie->inum, eie->offset, root, ctx);
1147 key.offset + (extent_item_pos - data_offset),
1148 root, ctx);
1149 if (ret) { 1348 if (ret) {
1150 pr_debug("stopping iteration because ret=%d\n", ret); 1349 pr_debug("stopping iteration for %llu due to ret=%d\n",
1350 extent_item_objectid, ret);
1151 break; 1351 break;
1152 } 1352 }
1153 } 1353 }
1154 1354
1155 free_extent_buffer(eb);
1156
1157 return ret; 1355 return ret;
1158} 1356}
1159 1357
@@ -1175,7 +1373,10 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1175 struct ulist *roots = NULL; 1373 struct ulist *roots = NULL;
1176 struct ulist_node *ref_node = NULL; 1374 struct ulist_node *ref_node = NULL;
1177 struct ulist_node *root_node = NULL; 1375 struct ulist_node *root_node = NULL;
1178 struct seq_list seq_elem; 1376 struct seq_list seq_elem = {};
1377 struct seq_list tree_mod_seq_elem = {};
1378 struct ulist_iterator ref_uiter;
1379 struct ulist_iterator root_uiter;
1179 struct btrfs_delayed_ref_root *delayed_refs = NULL; 1380 struct btrfs_delayed_ref_root *delayed_refs = NULL;
1180 1381
1181 pr_debug("resolving all inodes for extent %llu\n", 1382 pr_debug("resolving all inodes for extent %llu\n",
@@ -1192,34 +1393,41 @@ int iterate_extent_inodes(struct btrfs_fs_info *fs_info,
1192 spin_lock(&delayed_refs->lock); 1393 spin_lock(&delayed_refs->lock);
1193 btrfs_get_delayed_seq(delayed_refs, &seq_elem); 1394 btrfs_get_delayed_seq(delayed_refs, &seq_elem);
1194 spin_unlock(&delayed_refs->lock); 1395 spin_unlock(&delayed_refs->lock);
1396 btrfs_get_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1195 } 1397 }
1196 1398
1197 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid, 1399 ret = btrfs_find_all_leafs(trans, fs_info, extent_item_objectid,
1198 extent_item_pos, seq_elem.seq, 1400 seq_elem.seq, tree_mod_seq_elem.seq, &refs,
1199 &refs); 1401 &extent_item_pos);
1200
1201 if (ret) 1402 if (ret)
1202 goto out; 1403 goto out;
1203 1404
1204 while (!ret && (ref_node = ulist_next(refs, ref_node))) { 1405 ULIST_ITER_INIT(&ref_uiter);
1205 ret = btrfs_find_all_roots(trans, fs_info, ref_node->val, -1, 1406 while (!ret && (ref_node = ulist_next(refs, &ref_uiter))) {
1206 seq_elem.seq, &roots); 1407 ret = btrfs_find_all_roots(trans, fs_info, ref_node->val,
1408 seq_elem.seq,
1409 tree_mod_seq_elem.seq, &roots);
1207 if (ret) 1410 if (ret)
1208 break; 1411 break;
1209 while (!ret && (root_node = ulist_next(roots, root_node))) { 1412 ULIST_ITER_INIT(&root_uiter);
1210 pr_debug("root %llu references leaf %llu\n", 1413 while (!ret && (root_node = ulist_next(roots, &root_uiter))) {
1211 root_node->val, ref_node->val); 1414 pr_debug("root %llu references leaf %llu, data list "
1212 ret = iterate_leaf_refs(fs_info, ref_node->val, 1415 "%#lx\n", root_node->val, ref_node->val,
1213 extent_item_objectid, 1416 ref_node->aux);
1214 extent_item_pos, root_node->val, 1417 ret = iterate_leaf_refs(
1215 iterate, ctx); 1418 (struct extent_inode_elem *)ref_node->aux,
1419 root_node->val, extent_item_objectid,
1420 iterate, ctx);
1216 } 1421 }
1422 ulist_free(roots);
1423 roots = NULL;
1217 } 1424 }
1218 1425
1219 ulist_free(refs); 1426 free_leaf_list(refs);
1220 ulist_free(roots); 1427 ulist_free(roots);
1221out: 1428out:
1222 if (!search_commit_root) { 1429 if (!search_commit_root) {
1430 btrfs_put_tree_mod_seq(fs_info, &tree_mod_seq_elem);
1223 btrfs_put_delayed_seq(delayed_refs, &seq_elem); 1431 btrfs_put_delayed_seq(delayed_refs, &seq_elem);
1224 btrfs_end_transaction(trans, fs_info->extent_root); 1432 btrfs_end_transaction(trans, fs_info->extent_root);
1225 } 1433 }
diff --git a/fs/btrfs/backref.h b/fs/btrfs/backref.h
index 57ea2e959e4d..c18d8ac7b795 100644
--- a/fs/btrfs/backref.h
+++ b/fs/btrfs/backref.h
@@ -58,7 +58,8 @@ int paths_from_inode(u64 inum, struct inode_fs_paths *ipath);
58 58
59int btrfs_find_all_roots(struct btrfs_trans_handle *trans, 59int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
60 struct btrfs_fs_info *fs_info, u64 bytenr, 60 struct btrfs_fs_info *fs_info, u64 bytenr,
61 u64 num_bytes, u64 seq, struct ulist **roots); 61 u64 delayed_ref_seq, u64 time_seq,
62 struct ulist **roots);
62 63
63struct btrfs_data_container *init_data_container(u32 total_bytes); 64struct btrfs_data_container *init_data_container(u32 total_bytes);
64struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root, 65struct inode_fs_paths *init_ipath(s32 total_bytes, struct btrfs_root *fs_root,
diff --git a/fs/btrfs/btrfs_inode.h b/fs/btrfs/btrfs_inode.h
index 9b9b15fd5204..12394a90d60f 100644
--- a/fs/btrfs/btrfs_inode.h
+++ b/fs/btrfs/btrfs_inode.h
@@ -24,6 +24,21 @@
24#include "ordered-data.h" 24#include "ordered-data.h"
25#include "delayed-inode.h" 25#include "delayed-inode.h"
26 26
27/*
28 * ordered_data_close is set by truncate when a file that used
29 * to have good data has been truncated to zero. When it is set
30 * the btrfs file release call will add this inode to the
31 * ordered operations list so that we make sure to flush out any
32 * new data the application may have written before commit.
33 */
34#define BTRFS_INODE_ORDERED_DATA_CLOSE 0
35#define BTRFS_INODE_ORPHAN_META_RESERVED 1
36#define BTRFS_INODE_DUMMY 2
37#define BTRFS_INODE_IN_DEFRAG 3
38#define BTRFS_INODE_DELALLOC_META_RESERVED 4
39#define BTRFS_INODE_HAS_ORPHAN_ITEM 5
40#define BTRFS_INODE_HAS_ASYNC_EXTENT 6
41
27/* in memory btrfs inode */ 42/* in memory btrfs inode */
28struct btrfs_inode { 43struct btrfs_inode {
29 /* which subvolume this inode belongs to */ 44 /* which subvolume this inode belongs to */
@@ -57,9 +72,6 @@ struct btrfs_inode {
57 /* used to order data wrt metadata */ 72 /* used to order data wrt metadata */
58 struct btrfs_ordered_inode_tree ordered_tree; 73 struct btrfs_ordered_inode_tree ordered_tree;
59 74
60 /* for keeping track of orphaned inodes */
61 struct list_head i_orphan;
62
63 /* list of all the delalloc inodes in the FS. There are times we need 75 /* list of all the delalloc inodes in the FS. There are times we need
64 * to write all the delalloc pages to disk, and this list is used 76 * to write all the delalloc pages to disk, and this list is used
65 * to walk them all. 77 * to walk them all.
@@ -78,14 +90,13 @@ struct btrfs_inode {
78 /* the space_info for where this inode's data allocations are done */ 90 /* the space_info for where this inode's data allocations are done */
79 struct btrfs_space_info *space_info; 91 struct btrfs_space_info *space_info;
80 92
93 unsigned long runtime_flags;
94
81 /* full 64 bit generation number, struct vfs_inode doesn't have a big 95 /* full 64 bit generation number, struct vfs_inode doesn't have a big
82 * enough field for this. 96 * enough field for this.
83 */ 97 */
84 u64 generation; 98 u64 generation;
85 99
86 /* sequence number for NFS changes */
87 u64 sequence;
88
89 /* 100 /*
90 * transid of the trans_handle that last modified this inode 101 * transid of the trans_handle that last modified this inode
91 */ 102 */
@@ -145,22 +156,9 @@ struct btrfs_inode {
145 unsigned reserved_extents; 156 unsigned reserved_extents;
146 157
147 /* 158 /*
148 * ordered_data_close is set by truncate when a file that used
149 * to have good data has been truncated to zero. When it is set
150 * the btrfs file release call will add this inode to the
151 * ordered operations list so that we make sure to flush out any
152 * new data the application may have written before commit.
153 */
154 unsigned ordered_data_close:1;
155 unsigned orphan_meta_reserved:1;
156 unsigned dummy_inode:1;
157 unsigned in_defrag:1;
158 unsigned delalloc_meta_reserved:1;
159
160 /*
161 * always compress this one file 159 * always compress this one file
162 */ 160 */
163 unsigned force_compress:4; 161 unsigned force_compress;
164 162
165 struct btrfs_delayed_node *delayed_node; 163 struct btrfs_delayed_node *delayed_node;
166 164
@@ -202,4 +200,17 @@ static inline bool btrfs_is_free_space_inode(struct btrfs_root *root,
202 return false; 200 return false;
203} 201}
204 202
203static inline int btrfs_inode_in_log(struct inode *inode, u64 generation)
204{
205 struct btrfs_root *root = BTRFS_I(inode)->root;
206 int ret = 0;
207
208 mutex_lock(&root->log_mutex);
209 if (BTRFS_I(inode)->logged_trans == generation &&
210 BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
211 ret = 1;
212 mutex_unlock(&root->log_mutex);
213 return ret;
214}
215
205#endif 216#endif
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index c053e90f2006..da6e9364a5e3 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -93,6 +93,7 @@
93#include "print-tree.h" 93#include "print-tree.h"
94#include "locking.h" 94#include "locking.h"
95#include "check-integrity.h" 95#include "check-integrity.h"
96#include "rcu-string.h"
96 97
97#define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000 98#define BTRFSIC_BLOCK_HASHTABLE_SIZE 0x10000
98#define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000 99#define BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE 0x10000
@@ -103,8 +104,6 @@
103#define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300 104#define BTRFSIC_BLOCK_STACK_FRAME_MAGIC_NUMBER 20111300
104#define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters, 105#define BTRFSIC_TREE_DUMP_MAX_INDENT_LEVEL (200 - 6) /* in characters,
105 * excluding " [...]" */ 106 * excluding " [...]" */
106#define BTRFSIC_BLOCK_SIZE PAGE_SIZE
107
108#define BTRFSIC_GENERATION_UNKNOWN ((u64)-1) 107#define BTRFSIC_GENERATION_UNKNOWN ((u64)-1)
109 108
110/* 109/*
@@ -210,8 +209,9 @@ struct btrfsic_block_data_ctx {
210 u64 dev_bytenr; /* physical bytenr on device */ 209 u64 dev_bytenr; /* physical bytenr on device */
211 u32 len; 210 u32 len;
212 struct btrfsic_dev_state *dev; 211 struct btrfsic_dev_state *dev;
213 char *data; 212 char **datav;
214 struct buffer_head *bh; /* do not use if set to NULL */ 213 struct page **pagev;
214 void *mem_to_free;
215}; 215};
216 216
217/* This structure is used to implement recursion without occupying 217/* This structure is used to implement recursion without occupying
@@ -243,6 +243,8 @@ struct btrfsic_state {
243 struct btrfs_root *root; 243 struct btrfs_root *root;
244 u64 max_superblock_generation; 244 u64 max_superblock_generation;
245 struct btrfsic_block *latest_superblock; 245 struct btrfsic_block *latest_superblock;
246 u32 metablock_size;
247 u32 datablock_size;
246}; 248};
247 249
248static void btrfsic_block_init(struct btrfsic_block *b); 250static void btrfsic_block_init(struct btrfsic_block *b);
@@ -290,8 +292,10 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
290static int btrfsic_process_metablock(struct btrfsic_state *state, 292static int btrfsic_process_metablock(struct btrfsic_state *state,
291 struct btrfsic_block *block, 293 struct btrfsic_block *block,
292 struct btrfsic_block_data_ctx *block_ctx, 294 struct btrfsic_block_data_ctx *block_ctx,
293 struct btrfs_header *hdr,
294 int limit_nesting, int force_iodone_flag); 295 int limit_nesting, int force_iodone_flag);
296static void btrfsic_read_from_block_data(
297 struct btrfsic_block_data_ctx *block_ctx,
298 void *dst, u32 offset, size_t len);
295static int btrfsic_create_link_to_next_block( 299static int btrfsic_create_link_to_next_block(
296 struct btrfsic_state *state, 300 struct btrfsic_state *state,
297 struct btrfsic_block *block, 301 struct btrfsic_block *block,
@@ -318,12 +322,13 @@ static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx);
318static int btrfsic_read_block(struct btrfsic_state *state, 322static int btrfsic_read_block(struct btrfsic_state *state,
319 struct btrfsic_block_data_ctx *block_ctx); 323 struct btrfsic_block_data_ctx *block_ctx);
320static void btrfsic_dump_database(struct btrfsic_state *state); 324static void btrfsic_dump_database(struct btrfsic_state *state);
325static void btrfsic_complete_bio_end_io(struct bio *bio, int err);
321static int btrfsic_test_for_metadata(struct btrfsic_state *state, 326static int btrfsic_test_for_metadata(struct btrfsic_state *state,
322 const u8 *data, unsigned int size); 327 char **datav, unsigned int num_pages);
323static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 328static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
324 u64 dev_bytenr, u8 *mapped_data, 329 u64 dev_bytenr, char **mapped_datav,
325 unsigned int len, struct bio *bio, 330 unsigned int num_pages,
326 int *bio_is_patched, 331 struct bio *bio, int *bio_is_patched,
327 struct buffer_head *bh, 332 struct buffer_head *bh,
328 int submit_bio_bh_rw); 333 int submit_bio_bh_rw);
329static int btrfsic_process_written_superblock( 334static int btrfsic_process_written_superblock(
@@ -375,7 +380,7 @@ static struct btrfsic_dev_state *btrfsic_dev_state_lookup(
375static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 380static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
376 u64 bytenr, 381 u64 bytenr,
377 struct btrfsic_dev_state *dev_state, 382 struct btrfsic_dev_state *dev_state,
378 u64 dev_bytenr, char *data); 383 u64 dev_bytenr);
379 384
380static struct mutex btrfsic_mutex; 385static struct mutex btrfsic_mutex;
381static int btrfsic_is_initialized; 386static int btrfsic_is_initialized;
@@ -651,7 +656,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
651 int pass; 656 int pass;
652 657
653 BUG_ON(NULL == state); 658 BUG_ON(NULL == state);
654 selected_super = kmalloc(sizeof(*selected_super), GFP_NOFS); 659 selected_super = kzalloc(sizeof(*selected_super), GFP_NOFS);
655 if (NULL == selected_super) { 660 if (NULL == selected_super) {
656 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); 661 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
657 return -1; 662 return -1;
@@ -718,7 +723,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
718 723
719 num_copies = 724 num_copies =
720 btrfs_num_copies(&state->root->fs_info->mapping_tree, 725 btrfs_num_copies(&state->root->fs_info->mapping_tree,
721 next_bytenr, PAGE_SIZE); 726 next_bytenr, state->metablock_size);
722 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 727 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
723 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 728 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
724 (unsigned long long)next_bytenr, num_copies); 729 (unsigned long long)next_bytenr, num_copies);
@@ -727,9 +732,9 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
727 struct btrfsic_block *next_block; 732 struct btrfsic_block *next_block;
728 struct btrfsic_block_data_ctx tmp_next_block_ctx; 733 struct btrfsic_block_data_ctx tmp_next_block_ctx;
729 struct btrfsic_block_link *l; 734 struct btrfsic_block_link *l;
730 struct btrfs_header *hdr;
731 735
732 ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE, 736 ret = btrfsic_map_block(state, next_bytenr,
737 state->metablock_size,
733 &tmp_next_block_ctx, 738 &tmp_next_block_ctx,
734 mirror_num); 739 mirror_num);
735 if (ret) { 740 if (ret) {
@@ -758,7 +763,7 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
758 BUG_ON(NULL == l); 763 BUG_ON(NULL == l);
759 764
760 ret = btrfsic_read_block(state, &tmp_next_block_ctx); 765 ret = btrfsic_read_block(state, &tmp_next_block_ctx);
761 if (ret < (int)BTRFSIC_BLOCK_SIZE) { 766 if (ret < (int)PAGE_CACHE_SIZE) {
762 printk(KERN_INFO 767 printk(KERN_INFO
763 "btrfsic: read @logical %llu failed!\n", 768 "btrfsic: read @logical %llu failed!\n",
764 (unsigned long long) 769 (unsigned long long)
@@ -768,11 +773,9 @@ static int btrfsic_process_superblock(struct btrfsic_state *state,
768 return -1; 773 return -1;
769 } 774 }
770 775
771 hdr = (struct btrfs_header *)tmp_next_block_ctx.data;
772 ret = btrfsic_process_metablock(state, 776 ret = btrfsic_process_metablock(state,
773 next_block, 777 next_block,
774 &tmp_next_block_ctx, 778 &tmp_next_block_ctx,
775 hdr,
776 BTRFS_MAX_LEVEL + 3, 1); 779 BTRFS_MAX_LEVEL + 3, 1);
777 btrfsic_release_block_ctx(&tmp_next_block_ctx); 780 btrfsic_release_block_ctx(&tmp_next_block_ctx);
778 } 781 }
@@ -799,7 +802,10 @@ static int btrfsic_process_superblock_dev_mirror(
799 802
800 /* super block bytenr is always the unmapped device bytenr */ 803 /* super block bytenr is always the unmapped device bytenr */
801 dev_bytenr = btrfs_sb_offset(superblock_mirror_num); 804 dev_bytenr = btrfs_sb_offset(superblock_mirror_num);
802 bh = __bread(superblock_bdev, dev_bytenr / 4096, 4096); 805 if (dev_bytenr + BTRFS_SUPER_INFO_SIZE > device->total_bytes)
806 return -1;
807 bh = __bread(superblock_bdev, dev_bytenr / 4096,
808 BTRFS_SUPER_INFO_SIZE);
803 if (NULL == bh) 809 if (NULL == bh)
804 return -1; 810 return -1;
805 super_tmp = (struct btrfs_super_block *) 811 super_tmp = (struct btrfs_super_block *)
@@ -808,7 +814,10 @@ static int btrfsic_process_superblock_dev_mirror(
808 if (btrfs_super_bytenr(super_tmp) != dev_bytenr || 814 if (btrfs_super_bytenr(super_tmp) != dev_bytenr ||
809 strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC, 815 strncmp((char *)(&(super_tmp->magic)), BTRFS_MAGIC,
810 sizeof(super_tmp->magic)) || 816 sizeof(super_tmp->magic)) ||
811 memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE)) { 817 memcmp(device->uuid, super_tmp->dev_item.uuid, BTRFS_UUID_SIZE) ||
818 btrfs_super_nodesize(super_tmp) != state->metablock_size ||
819 btrfs_super_leafsize(super_tmp) != state->metablock_size ||
820 btrfs_super_sectorsize(super_tmp) != state->datablock_size) {
812 brelse(bh); 821 brelse(bh);
813 return 0; 822 return 0;
814 } 823 }
@@ -835,13 +844,14 @@ static int btrfsic_process_superblock_dev_mirror(
835 superblock_tmp->never_written = 0; 844 superblock_tmp->never_written = 0;
836 superblock_tmp->mirror_num = 1 + superblock_mirror_num; 845 superblock_tmp->mirror_num = 1 + superblock_mirror_num;
837 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE) 846 if (state->print_mask & BTRFSIC_PRINT_MASK_SUPERBLOCK_WRITE)
838 printk(KERN_INFO "New initial S-block (bdev %p, %s)" 847 printk_in_rcu(KERN_INFO "New initial S-block (bdev %p, %s)"
839 " @%llu (%s/%llu/%d)\n", 848 " @%llu (%s/%llu/%d)\n",
840 superblock_bdev, device->name, 849 superblock_bdev,
841 (unsigned long long)dev_bytenr, 850 rcu_str_deref(device->name),
842 dev_state->name, 851 (unsigned long long)dev_bytenr,
843 (unsigned long long)dev_bytenr, 852 dev_state->name,
844 superblock_mirror_num); 853 (unsigned long long)dev_bytenr,
854 superblock_mirror_num);
845 list_add(&superblock_tmp->all_blocks_node, 855 list_add(&superblock_tmp->all_blocks_node,
846 &state->all_blocks_list); 856 &state->all_blocks_list);
847 btrfsic_block_hashtable_add(superblock_tmp, 857 btrfsic_block_hashtable_add(superblock_tmp,
@@ -893,7 +903,7 @@ static int btrfsic_process_superblock_dev_mirror(
893 903
894 num_copies = 904 num_copies =
895 btrfs_num_copies(&state->root->fs_info->mapping_tree, 905 btrfs_num_copies(&state->root->fs_info->mapping_tree,
896 next_bytenr, PAGE_SIZE); 906 next_bytenr, state->metablock_size);
897 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 907 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
898 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 908 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
899 (unsigned long long)next_bytenr, num_copies); 909 (unsigned long long)next_bytenr, num_copies);
@@ -902,7 +912,8 @@ static int btrfsic_process_superblock_dev_mirror(
902 struct btrfsic_block_data_ctx tmp_next_block_ctx; 912 struct btrfsic_block_data_ctx tmp_next_block_ctx;
903 struct btrfsic_block_link *l; 913 struct btrfsic_block_link *l;
904 914
905 if (btrfsic_map_block(state, next_bytenr, PAGE_SIZE, 915 if (btrfsic_map_block(state, next_bytenr,
916 state->metablock_size,
906 &tmp_next_block_ctx, 917 &tmp_next_block_ctx,
907 mirror_num)) { 918 mirror_num)) {
908 printk(KERN_INFO "btrfsic: btrfsic_map_block(" 919 printk(KERN_INFO "btrfsic: btrfsic_map_block("
@@ -966,13 +977,15 @@ static int btrfsic_process_metablock(
966 struct btrfsic_state *state, 977 struct btrfsic_state *state,
967 struct btrfsic_block *const first_block, 978 struct btrfsic_block *const first_block,
968 struct btrfsic_block_data_ctx *const first_block_ctx, 979 struct btrfsic_block_data_ctx *const first_block_ctx,
969 struct btrfs_header *const first_hdr,
970 int first_limit_nesting, int force_iodone_flag) 980 int first_limit_nesting, int force_iodone_flag)
971{ 981{
972 struct btrfsic_stack_frame initial_stack_frame = { 0 }; 982 struct btrfsic_stack_frame initial_stack_frame = { 0 };
973 struct btrfsic_stack_frame *sf; 983 struct btrfsic_stack_frame *sf;
974 struct btrfsic_stack_frame *next_stack; 984 struct btrfsic_stack_frame *next_stack;
985 struct btrfs_header *const first_hdr =
986 (struct btrfs_header *)first_block_ctx->datav[0];
975 987
988 BUG_ON(!first_hdr);
976 sf = &initial_stack_frame; 989 sf = &initial_stack_frame;
977 sf->error = 0; 990 sf->error = 0;
978 sf->i = -1; 991 sf->i = -1;
@@ -1012,21 +1025,47 @@ continue_with_current_leaf_stack_frame:
1012 } 1025 }
1013 1026
1014 if (sf->i < sf->nr) { 1027 if (sf->i < sf->nr) {
1015 struct btrfs_item *disk_item = leafhdr->items + sf->i; 1028 struct btrfs_item disk_item;
1016 struct btrfs_disk_key *disk_key = &disk_item->key; 1029 u32 disk_item_offset =
1030 (uintptr_t)(leafhdr->items + sf->i) -
1031 (uintptr_t)leafhdr;
1032 struct btrfs_disk_key *disk_key;
1017 u8 type; 1033 u8 type;
1018 const u32 item_offset = le32_to_cpu(disk_item->offset); 1034 u32 item_offset;
1019 1035
1036 if (disk_item_offset + sizeof(struct btrfs_item) >
1037 sf->block_ctx->len) {
1038leaf_item_out_of_bounce_error:
1039 printk(KERN_INFO
1040 "btrfsic: leaf item out of bounce at logical %llu, dev %s\n",
1041 sf->block_ctx->start,
1042 sf->block_ctx->dev->name);
1043 goto one_stack_frame_backwards;
1044 }
1045 btrfsic_read_from_block_data(sf->block_ctx,
1046 &disk_item,
1047 disk_item_offset,
1048 sizeof(struct btrfs_item));
1049 item_offset = le32_to_cpu(disk_item.offset);
1050 disk_key = &disk_item.key;
1020 type = disk_key->type; 1051 type = disk_key->type;
1021 1052
1022 if (BTRFS_ROOT_ITEM_KEY == type) { 1053 if (BTRFS_ROOT_ITEM_KEY == type) {
1023 const struct btrfs_root_item *const root_item = 1054 struct btrfs_root_item root_item;
1024 (struct btrfs_root_item *) 1055 u32 root_item_offset;
1025 (sf->block_ctx->data + 1056 u64 next_bytenr;
1026 offsetof(struct btrfs_leaf, items) + 1057
1027 item_offset); 1058 root_item_offset = item_offset +
1028 const u64 next_bytenr = 1059 offsetof(struct btrfs_leaf, items);
1029 le64_to_cpu(root_item->bytenr); 1060 if (root_item_offset +
1061 sizeof(struct btrfs_root_item) >
1062 sf->block_ctx->len)
1063 goto leaf_item_out_of_bounce_error;
1064 btrfsic_read_from_block_data(
1065 sf->block_ctx, &root_item,
1066 root_item_offset,
1067 sizeof(struct btrfs_root_item));
1068 next_bytenr = le64_to_cpu(root_item.bytenr);
1030 1069
1031 sf->error = 1070 sf->error =
1032 btrfsic_create_link_to_next_block( 1071 btrfsic_create_link_to_next_block(
@@ -1041,7 +1080,7 @@ continue_with_current_leaf_stack_frame:
1041 &sf->num_copies, 1080 &sf->num_copies,
1042 &sf->mirror_num, 1081 &sf->mirror_num,
1043 disk_key, 1082 disk_key,
1044 le64_to_cpu(root_item-> 1083 le64_to_cpu(root_item.
1045 generation)); 1084 generation));
1046 if (sf->error) 1085 if (sf->error)
1047 goto one_stack_frame_backwards; 1086 goto one_stack_frame_backwards;
@@ -1049,7 +1088,7 @@ continue_with_current_leaf_stack_frame:
1049 if (NULL != sf->next_block) { 1088 if (NULL != sf->next_block) {
1050 struct btrfs_header *const next_hdr = 1089 struct btrfs_header *const next_hdr =
1051 (struct btrfs_header *) 1090 (struct btrfs_header *)
1052 sf->next_block_ctx.data; 1091 sf->next_block_ctx.datav[0];
1053 1092
1054 next_stack = 1093 next_stack =
1055 btrfsic_stack_frame_alloc(); 1094 btrfsic_stack_frame_alloc();
@@ -1111,10 +1150,24 @@ continue_with_current_node_stack_frame:
1111 } 1150 }
1112 1151
1113 if (sf->i < sf->nr) { 1152 if (sf->i < sf->nr) {
1114 struct btrfs_key_ptr *disk_key_ptr = 1153 struct btrfs_key_ptr key_ptr;
1115 nodehdr->ptrs + sf->i; 1154 u32 key_ptr_offset;
1116 const u64 next_bytenr = 1155 u64 next_bytenr;
1117 le64_to_cpu(disk_key_ptr->blockptr); 1156
1157 key_ptr_offset = (uintptr_t)(nodehdr->ptrs + sf->i) -
1158 (uintptr_t)nodehdr;
1159 if (key_ptr_offset + sizeof(struct btrfs_key_ptr) >
1160 sf->block_ctx->len) {
1161 printk(KERN_INFO
1162 "btrfsic: node item out of bounce at logical %llu, dev %s\n",
1163 sf->block_ctx->start,
1164 sf->block_ctx->dev->name);
1165 goto one_stack_frame_backwards;
1166 }
1167 btrfsic_read_from_block_data(
1168 sf->block_ctx, &key_ptr, key_ptr_offset,
1169 sizeof(struct btrfs_key_ptr));
1170 next_bytenr = le64_to_cpu(key_ptr.blockptr);
1118 1171
1119 sf->error = btrfsic_create_link_to_next_block( 1172 sf->error = btrfsic_create_link_to_next_block(
1120 state, 1173 state,
@@ -1127,15 +1180,15 @@ continue_with_current_node_stack_frame:
1127 force_iodone_flag, 1180 force_iodone_flag,
1128 &sf->num_copies, 1181 &sf->num_copies,
1129 &sf->mirror_num, 1182 &sf->mirror_num,
1130 &disk_key_ptr->key, 1183 &key_ptr.key,
1131 le64_to_cpu(disk_key_ptr->generation)); 1184 le64_to_cpu(key_ptr.generation));
1132 if (sf->error) 1185 if (sf->error)
1133 goto one_stack_frame_backwards; 1186 goto one_stack_frame_backwards;
1134 1187
1135 if (NULL != sf->next_block) { 1188 if (NULL != sf->next_block) {
1136 struct btrfs_header *const next_hdr = 1189 struct btrfs_header *const next_hdr =
1137 (struct btrfs_header *) 1190 (struct btrfs_header *)
1138 sf->next_block_ctx.data; 1191 sf->next_block_ctx.datav[0];
1139 1192
1140 next_stack = btrfsic_stack_frame_alloc(); 1193 next_stack = btrfsic_stack_frame_alloc();
1141 if (NULL == next_stack) 1194 if (NULL == next_stack)
@@ -1181,6 +1234,35 @@ one_stack_frame_backwards:
1181 return sf->error; 1234 return sf->error;
1182} 1235}
1183 1236
1237static void btrfsic_read_from_block_data(
1238 struct btrfsic_block_data_ctx *block_ctx,
1239 void *dstv, u32 offset, size_t len)
1240{
1241 size_t cur;
1242 size_t offset_in_page;
1243 char *kaddr;
1244 char *dst = (char *)dstv;
1245 size_t start_offset = block_ctx->start & ((u64)PAGE_CACHE_SIZE - 1);
1246 unsigned long i = (start_offset + offset) >> PAGE_CACHE_SHIFT;
1247
1248 WARN_ON(offset + len > block_ctx->len);
1249 offset_in_page = (start_offset + offset) &
1250 ((unsigned long)PAGE_CACHE_SIZE - 1);
1251
1252 while (len > 0) {
1253 cur = min(len, ((size_t)PAGE_CACHE_SIZE - offset_in_page));
1254 BUG_ON(i >= (block_ctx->len + PAGE_CACHE_SIZE - 1) >>
1255 PAGE_CACHE_SHIFT);
1256 kaddr = block_ctx->datav[i];
1257 memcpy(dst, kaddr + offset_in_page, cur);
1258
1259 dst += cur;
1260 len -= cur;
1261 offset_in_page = 0;
1262 i++;
1263 }
1264}
1265
1184static int btrfsic_create_link_to_next_block( 1266static int btrfsic_create_link_to_next_block(
1185 struct btrfsic_state *state, 1267 struct btrfsic_state *state,
1186 struct btrfsic_block *block, 1268 struct btrfsic_block *block,
@@ -1204,7 +1286,7 @@ static int btrfsic_create_link_to_next_block(
1204 if (0 == *num_copiesp) { 1286 if (0 == *num_copiesp) {
1205 *num_copiesp = 1287 *num_copiesp =
1206 btrfs_num_copies(&state->root->fs_info->mapping_tree, 1288 btrfs_num_copies(&state->root->fs_info->mapping_tree,
1207 next_bytenr, PAGE_SIZE); 1289 next_bytenr, state->metablock_size);
1208 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1290 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
1209 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 1291 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
1210 (unsigned long long)next_bytenr, *num_copiesp); 1292 (unsigned long long)next_bytenr, *num_copiesp);
@@ -1219,7 +1301,7 @@ static int btrfsic_create_link_to_next_block(
1219 "btrfsic_create_link_to_next_block(mirror_num=%d)\n", 1301 "btrfsic_create_link_to_next_block(mirror_num=%d)\n",
1220 *mirror_nump); 1302 *mirror_nump);
1221 ret = btrfsic_map_block(state, next_bytenr, 1303 ret = btrfsic_map_block(state, next_bytenr,
1222 BTRFSIC_BLOCK_SIZE, 1304 state->metablock_size,
1223 next_block_ctx, *mirror_nump); 1305 next_block_ctx, *mirror_nump);
1224 if (ret) { 1306 if (ret) {
1225 printk(KERN_INFO 1307 printk(KERN_INFO
@@ -1314,7 +1396,7 @@ static int btrfsic_create_link_to_next_block(
1314 1396
1315 if (limit_nesting > 0 && did_alloc_block_link) { 1397 if (limit_nesting > 0 && did_alloc_block_link) {
1316 ret = btrfsic_read_block(state, next_block_ctx); 1398 ret = btrfsic_read_block(state, next_block_ctx);
1317 if (ret < (int)BTRFSIC_BLOCK_SIZE) { 1399 if (ret < (int)next_block_ctx->len) {
1318 printk(KERN_INFO 1400 printk(KERN_INFO
1319 "btrfsic: read block @logical %llu failed!\n", 1401 "btrfsic: read block @logical %llu failed!\n",
1320 (unsigned long long)next_bytenr); 1402 (unsigned long long)next_bytenr);
@@ -1339,43 +1421,74 @@ static int btrfsic_handle_extent_data(
1339 u32 item_offset, int force_iodone_flag) 1421 u32 item_offset, int force_iodone_flag)
1340{ 1422{
1341 int ret; 1423 int ret;
1342 struct btrfs_file_extent_item *file_extent_item = 1424 struct btrfs_file_extent_item file_extent_item;
1343 (struct btrfs_file_extent_item *)(block_ctx->data + 1425 u64 file_extent_item_offset;
1344 offsetof(struct btrfs_leaf, 1426 u64 next_bytenr;
1345 items) + item_offset); 1427 u64 num_bytes;
1346 u64 next_bytenr = 1428 u64 generation;
1347 le64_to_cpu(file_extent_item->disk_bytenr) +
1348 le64_to_cpu(file_extent_item->offset);
1349 u64 num_bytes = le64_to_cpu(file_extent_item->num_bytes);
1350 u64 generation = le64_to_cpu(file_extent_item->generation);
1351 struct btrfsic_block_link *l; 1429 struct btrfsic_block_link *l;
1352 1430
1431 file_extent_item_offset = offsetof(struct btrfs_leaf, items) +
1432 item_offset;
1433 if (file_extent_item_offset +
1434 offsetof(struct btrfs_file_extent_item, disk_num_bytes) >
1435 block_ctx->len) {
1436 printk(KERN_INFO
1437 "btrfsic: file item out of bounce at logical %llu, dev %s\n",
1438 block_ctx->start, block_ctx->dev->name);
1439 return -1;
1440 }
1441
1442 btrfsic_read_from_block_data(block_ctx, &file_extent_item,
1443 file_extent_item_offset,
1444 offsetof(struct btrfs_file_extent_item, disk_num_bytes));
1445 if (BTRFS_FILE_EXTENT_REG != file_extent_item.type ||
1446 ((u64)0) == le64_to_cpu(file_extent_item.disk_bytenr)) {
1447 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
1448 printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu\n",
1449 file_extent_item.type,
1450 (unsigned long long)
1451 le64_to_cpu(file_extent_item.disk_bytenr));
1452 return 0;
1453 }
1454
1455 if (file_extent_item_offset + sizeof(struct btrfs_file_extent_item) >
1456 block_ctx->len) {
1457 printk(KERN_INFO
1458 "btrfsic: file item out of bounce at logical %llu, dev %s\n",
1459 block_ctx->start, block_ctx->dev->name);
1460 return -1;
1461 }
1462 btrfsic_read_from_block_data(block_ctx, &file_extent_item,
1463 file_extent_item_offset,
1464 sizeof(struct btrfs_file_extent_item));
1465 next_bytenr = le64_to_cpu(file_extent_item.disk_bytenr) +
1466 le64_to_cpu(file_extent_item.offset);
1467 generation = le64_to_cpu(file_extent_item.generation);
1468 num_bytes = le64_to_cpu(file_extent_item.num_bytes);
1469 generation = le64_to_cpu(file_extent_item.generation);
1470
1353 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE) 1471 if (state->print_mask & BTRFSIC_PRINT_MASK_VERY_VERBOSE)
1354 printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu," 1472 printk(KERN_INFO "extent_data: type %u, disk_bytenr = %llu,"
1355 " offset = %llu, num_bytes = %llu\n", 1473 " offset = %llu, num_bytes = %llu\n",
1356 file_extent_item->type, 1474 file_extent_item.type,
1357 (unsigned long long)
1358 le64_to_cpu(file_extent_item->disk_bytenr),
1359 (unsigned long long) 1475 (unsigned long long)
1360 le64_to_cpu(file_extent_item->offset), 1476 le64_to_cpu(file_extent_item.disk_bytenr),
1361 (unsigned long long) 1477 (unsigned long long)le64_to_cpu(file_extent_item.offset),
1362 le64_to_cpu(file_extent_item->num_bytes)); 1478 (unsigned long long)num_bytes);
1363 if (BTRFS_FILE_EXTENT_REG != file_extent_item->type ||
1364 ((u64)0) == le64_to_cpu(file_extent_item->disk_bytenr))
1365 return 0;
1366 while (num_bytes > 0) { 1479 while (num_bytes > 0) {
1367 u32 chunk_len; 1480 u32 chunk_len;
1368 int num_copies; 1481 int num_copies;
1369 int mirror_num; 1482 int mirror_num;
1370 1483
1371 if (num_bytes > BTRFSIC_BLOCK_SIZE) 1484 if (num_bytes > state->datablock_size)
1372 chunk_len = BTRFSIC_BLOCK_SIZE; 1485 chunk_len = state->datablock_size;
1373 else 1486 else
1374 chunk_len = num_bytes; 1487 chunk_len = num_bytes;
1375 1488
1376 num_copies = 1489 num_copies =
1377 btrfs_num_copies(&state->root->fs_info->mapping_tree, 1490 btrfs_num_copies(&state->root->fs_info->mapping_tree,
1378 next_bytenr, PAGE_SIZE); 1491 next_bytenr, state->datablock_size);
1379 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 1492 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
1380 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 1493 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
1381 (unsigned long long)next_bytenr, num_copies); 1494 (unsigned long long)next_bytenr, num_copies);
@@ -1475,8 +1588,9 @@ static int btrfsic_map_block(struct btrfsic_state *state, u64 bytenr, u32 len,
1475 block_ctx_out->dev_bytenr = multi->stripes[0].physical; 1588 block_ctx_out->dev_bytenr = multi->stripes[0].physical;
1476 block_ctx_out->start = bytenr; 1589 block_ctx_out->start = bytenr;
1477 block_ctx_out->len = len; 1590 block_ctx_out->len = len;
1478 block_ctx_out->data = NULL; 1591 block_ctx_out->datav = NULL;
1479 block_ctx_out->bh = NULL; 1592 block_ctx_out->pagev = NULL;
1593 block_ctx_out->mem_to_free = NULL;
1480 1594
1481 if (0 == ret) 1595 if (0 == ret)
1482 kfree(multi); 1596 kfree(multi);
@@ -1496,8 +1610,9 @@ static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
1496 block_ctx_out->dev_bytenr = bytenr; 1610 block_ctx_out->dev_bytenr = bytenr;
1497 block_ctx_out->start = bytenr; 1611 block_ctx_out->start = bytenr;
1498 block_ctx_out->len = len; 1612 block_ctx_out->len = len;
1499 block_ctx_out->data = NULL; 1613 block_ctx_out->datav = NULL;
1500 block_ctx_out->bh = NULL; 1614 block_ctx_out->pagev = NULL;
1615 block_ctx_out->mem_to_free = NULL;
1501 if (NULL != block_ctx_out->dev) { 1616 if (NULL != block_ctx_out->dev) {
1502 return 0; 1617 return 0;
1503 } else { 1618 } else {
@@ -1508,38 +1623,127 @@ static int btrfsic_map_superblock(struct btrfsic_state *state, u64 bytenr,
1508 1623
1509static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx) 1624static void btrfsic_release_block_ctx(struct btrfsic_block_data_ctx *block_ctx)
1510{ 1625{
1511 if (NULL != block_ctx->bh) { 1626 if (block_ctx->mem_to_free) {
1512 brelse(block_ctx->bh); 1627 unsigned int num_pages;
1513 block_ctx->bh = NULL; 1628
1629 BUG_ON(!block_ctx->datav);
1630 BUG_ON(!block_ctx->pagev);
1631 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
1632 PAGE_CACHE_SHIFT;
1633 while (num_pages > 0) {
1634 num_pages--;
1635 if (block_ctx->datav[num_pages]) {
1636 kunmap(block_ctx->pagev[num_pages]);
1637 block_ctx->datav[num_pages] = NULL;
1638 }
1639 if (block_ctx->pagev[num_pages]) {
1640 __free_page(block_ctx->pagev[num_pages]);
1641 block_ctx->pagev[num_pages] = NULL;
1642 }
1643 }
1644
1645 kfree(block_ctx->mem_to_free);
1646 block_ctx->mem_to_free = NULL;
1647 block_ctx->pagev = NULL;
1648 block_ctx->datav = NULL;
1514 } 1649 }
1515} 1650}
1516 1651
1517static int btrfsic_read_block(struct btrfsic_state *state, 1652static int btrfsic_read_block(struct btrfsic_state *state,
1518 struct btrfsic_block_data_ctx *block_ctx) 1653 struct btrfsic_block_data_ctx *block_ctx)
1519{ 1654{
1520 block_ctx->bh = NULL; 1655 unsigned int num_pages;
1521 if (block_ctx->dev_bytenr & 4095) { 1656 unsigned int i;
1657 u64 dev_bytenr;
1658 int ret;
1659
1660 BUG_ON(block_ctx->datav);
1661 BUG_ON(block_ctx->pagev);
1662 BUG_ON(block_ctx->mem_to_free);
1663 if (block_ctx->dev_bytenr & ((u64)PAGE_CACHE_SIZE - 1)) {
1522 printk(KERN_INFO 1664 printk(KERN_INFO
1523 "btrfsic: read_block() with unaligned bytenr %llu\n", 1665 "btrfsic: read_block() with unaligned bytenr %llu\n",
1524 (unsigned long long)block_ctx->dev_bytenr); 1666 (unsigned long long)block_ctx->dev_bytenr);
1525 return -1; 1667 return -1;
1526 } 1668 }
1527 if (block_ctx->len > 4096) { 1669
1528 printk(KERN_INFO 1670 num_pages = (block_ctx->len + (u64)PAGE_CACHE_SIZE - 1) >>
1529 "btrfsic: read_block() with too huge size %d\n", 1671 PAGE_CACHE_SHIFT;
1530 block_ctx->len); 1672 block_ctx->mem_to_free = kzalloc((sizeof(*block_ctx->datav) +
1673 sizeof(*block_ctx->pagev)) *
1674 num_pages, GFP_NOFS);
1675 if (!block_ctx->mem_to_free)
1531 return -1; 1676 return -1;
1677 block_ctx->datav = block_ctx->mem_to_free;
1678 block_ctx->pagev = (struct page **)(block_ctx->datav + num_pages);
1679 for (i = 0; i < num_pages; i++) {
1680 block_ctx->pagev[i] = alloc_page(GFP_NOFS);
1681 if (!block_ctx->pagev[i])
1682 return -1;
1532 } 1683 }
1533 1684
1534 block_ctx->bh = __bread(block_ctx->dev->bdev, 1685 dev_bytenr = block_ctx->dev_bytenr;
1535 block_ctx->dev_bytenr >> 12, 4096); 1686 for (i = 0; i < num_pages;) {
1536 if (NULL == block_ctx->bh) 1687 struct bio *bio;
1537 return -1; 1688 unsigned int j;
1538 block_ctx->data = block_ctx->bh->b_data; 1689 DECLARE_COMPLETION_ONSTACK(complete);
1690
1691 bio = bio_alloc(GFP_NOFS, num_pages - i);
1692 if (!bio) {
1693 printk(KERN_INFO
1694 "btrfsic: bio_alloc() for %u pages failed!\n",
1695 num_pages - i);
1696 return -1;
1697 }
1698 bio->bi_bdev = block_ctx->dev->bdev;
1699 bio->bi_sector = dev_bytenr >> 9;
1700 bio->bi_end_io = btrfsic_complete_bio_end_io;
1701 bio->bi_private = &complete;
1702
1703 for (j = i; j < num_pages; j++) {
1704 ret = bio_add_page(bio, block_ctx->pagev[j],
1705 PAGE_CACHE_SIZE, 0);
1706 if (PAGE_CACHE_SIZE != ret)
1707 break;
1708 }
1709 if (j == i) {
1710 printk(KERN_INFO
1711 "btrfsic: error, failed to add a single page!\n");
1712 return -1;
1713 }
1714 submit_bio(READ, bio);
1715
1716 /* this will also unplug the queue */
1717 wait_for_completion(&complete);
1718
1719 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1720 printk(KERN_INFO
1721 "btrfsic: read error at logical %llu dev %s!\n",
1722 block_ctx->start, block_ctx->dev->name);
1723 bio_put(bio);
1724 return -1;
1725 }
1726 bio_put(bio);
1727 dev_bytenr += (j - i) * PAGE_CACHE_SIZE;
1728 i = j;
1729 }
1730 for (i = 0; i < num_pages; i++) {
1731 block_ctx->datav[i] = kmap(block_ctx->pagev[i]);
1732 if (!block_ctx->datav[i]) {
1733 printk(KERN_INFO "btrfsic: kmap() failed (dev %s)!\n",
1734 block_ctx->dev->name);
1735 return -1;
1736 }
1737 }
1539 1738
1540 return block_ctx->len; 1739 return block_ctx->len;
1541} 1740}
1542 1741
1742static void btrfsic_complete_bio_end_io(struct bio *bio, int err)
1743{
1744 complete((struct completion *)bio->bi_private);
1745}
1746
1543static void btrfsic_dump_database(struct btrfsic_state *state) 1747static void btrfsic_dump_database(struct btrfsic_state *state)
1544{ 1748{
1545 struct list_head *elem_all; 1749 struct list_head *elem_all;
@@ -1617,32 +1821,39 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
1617 * (note that this test fails for the super block) 1821 * (note that this test fails for the super block)
1618 */ 1822 */
1619static int btrfsic_test_for_metadata(struct btrfsic_state *state, 1823static int btrfsic_test_for_metadata(struct btrfsic_state *state,
1620 const u8 *data, unsigned int size) 1824 char **datav, unsigned int num_pages)
1621{ 1825{
1622 struct btrfs_header *h; 1826 struct btrfs_header *h;
1623 u8 csum[BTRFS_CSUM_SIZE]; 1827 u8 csum[BTRFS_CSUM_SIZE];
1624 u32 crc = ~(u32)0; 1828 u32 crc = ~(u32)0;
1625 int fail = 0; 1829 unsigned int i;
1626 int crc_fail = 0;
1627 1830
1628 h = (struct btrfs_header *)data; 1831 if (num_pages * PAGE_CACHE_SIZE < state->metablock_size)
1832 return 1; /* not metadata */
1833 num_pages = state->metablock_size >> PAGE_CACHE_SHIFT;
1834 h = (struct btrfs_header *)datav[0];
1629 1835
1630 if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE)) 1836 if (memcmp(h->fsid, state->root->fs_info->fsid, BTRFS_UUID_SIZE))
1631 fail++; 1837 return 1;
1838
1839 for (i = 0; i < num_pages; i++) {
1840 u8 *data = i ? datav[i] : (datav[i] + BTRFS_CSUM_SIZE);
1841 size_t sublen = i ? PAGE_CACHE_SIZE :
1842 (PAGE_CACHE_SIZE - BTRFS_CSUM_SIZE);
1632 1843
1633 crc = crc32c(crc, data + BTRFS_CSUM_SIZE, PAGE_SIZE - BTRFS_CSUM_SIZE); 1844 crc = crc32c(crc, data, sublen);
1845 }
1634 btrfs_csum_final(crc, csum); 1846 btrfs_csum_final(crc, csum);
1635 if (memcmp(csum, h->csum, state->csum_size)) 1847 if (memcmp(csum, h->csum, state->csum_size))
1636 crc_fail++; 1848 return 1;
1637 1849
1638 return fail || crc_fail; 1850 return 0; /* is metadata */
1639} 1851}
1640 1852
1641static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state, 1853static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1642 u64 dev_bytenr, 1854 u64 dev_bytenr, char **mapped_datav,
1643 u8 *mapped_data, unsigned int len, 1855 unsigned int num_pages,
1644 struct bio *bio, 1856 struct bio *bio, int *bio_is_patched,
1645 int *bio_is_patched,
1646 struct buffer_head *bh, 1857 struct buffer_head *bh,
1647 int submit_bio_bh_rw) 1858 int submit_bio_bh_rw)
1648{ 1859{
@@ -1652,12 +1863,19 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1652 int ret; 1863 int ret;
1653 struct btrfsic_state *state = dev_state->state; 1864 struct btrfsic_state *state = dev_state->state;
1654 struct block_device *bdev = dev_state->bdev; 1865 struct block_device *bdev = dev_state->bdev;
1866 unsigned int processed_len;
1655 1867
1656 WARN_ON(len > PAGE_SIZE);
1657 is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_data, len));
1658 if (NULL != bio_is_patched) 1868 if (NULL != bio_is_patched)
1659 *bio_is_patched = 0; 1869 *bio_is_patched = 0;
1660 1870
1871again:
1872 if (num_pages == 0)
1873 return;
1874
1875 processed_len = 0;
1876 is_metadata = (0 == btrfsic_test_for_metadata(state, mapped_datav,
1877 num_pages));
1878
1661 block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr, 1879 block = btrfsic_block_hashtable_lookup(bdev, dev_bytenr,
1662 &state->block_hashtable); 1880 &state->block_hashtable);
1663 if (NULL != block) { 1881 if (NULL != block) {
@@ -1667,8 +1885,16 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1667 1885
1668 if (block->is_superblock) { 1886 if (block->is_superblock) {
1669 bytenr = le64_to_cpu(((struct btrfs_super_block *) 1887 bytenr = le64_to_cpu(((struct btrfs_super_block *)
1670 mapped_data)->bytenr); 1888 mapped_datav[0])->bytenr);
1889 if (num_pages * PAGE_CACHE_SIZE <
1890 BTRFS_SUPER_INFO_SIZE) {
1891 printk(KERN_INFO
1892 "btrfsic: cannot work with too short bios!\n");
1893 return;
1894 }
1671 is_metadata = 1; 1895 is_metadata = 1;
1896 BUG_ON(BTRFS_SUPER_INFO_SIZE & (PAGE_CACHE_SIZE - 1));
1897 processed_len = BTRFS_SUPER_INFO_SIZE;
1672 if (state->print_mask & 1898 if (state->print_mask &
1673 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) { 1899 BTRFSIC_PRINT_MASK_TREE_BEFORE_SB_WRITE) {
1674 printk(KERN_INFO 1900 printk(KERN_INFO
@@ -1678,12 +1904,18 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1678 } 1904 }
1679 if (is_metadata) { 1905 if (is_metadata) {
1680 if (!block->is_superblock) { 1906 if (!block->is_superblock) {
1907 if (num_pages * PAGE_CACHE_SIZE <
1908 state->metablock_size) {
1909 printk(KERN_INFO
1910 "btrfsic: cannot work with too short bios!\n");
1911 return;
1912 }
1913 processed_len = state->metablock_size;
1681 bytenr = le64_to_cpu(((struct btrfs_header *) 1914 bytenr = le64_to_cpu(((struct btrfs_header *)
1682 mapped_data)->bytenr); 1915 mapped_datav[0])->bytenr);
1683 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, 1916 btrfsic_cmp_log_and_dev_bytenr(state, bytenr,
1684 dev_state, 1917 dev_state,
1685 dev_bytenr, 1918 dev_bytenr);
1686 mapped_data);
1687 } 1919 }
1688 if (block->logical_bytenr != bytenr) { 1920 if (block->logical_bytenr != bytenr) {
1689 printk(KERN_INFO 1921 printk(KERN_INFO
@@ -1710,6 +1942,13 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1710 block->mirror_num, 1942 block->mirror_num,
1711 btrfsic_get_block_type(state, block)); 1943 btrfsic_get_block_type(state, block));
1712 } else { 1944 } else {
1945 if (num_pages * PAGE_CACHE_SIZE <
1946 state->datablock_size) {
1947 printk(KERN_INFO
1948 "btrfsic: cannot work with too short bios!\n");
1949 return;
1950 }
1951 processed_len = state->datablock_size;
1713 bytenr = block->logical_bytenr; 1952 bytenr = block->logical_bytenr;
1714 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 1953 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1715 printk(KERN_INFO 1954 printk(KERN_INFO
@@ -1747,7 +1986,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1747 le64_to_cpu(block->disk_key.offset), 1986 le64_to_cpu(block->disk_key.offset),
1748 (unsigned long long) 1987 (unsigned long long)
1749 le64_to_cpu(((struct btrfs_header *) 1988 le64_to_cpu(((struct btrfs_header *)
1750 mapped_data)->generation), 1989 mapped_datav[0])->generation),
1751 (unsigned long long) 1990 (unsigned long long)
1752 state->max_superblock_generation); 1991 state->max_superblock_generation);
1753 btrfsic_dump_tree(state); 1992 btrfsic_dump_tree(state);
@@ -1765,10 +2004,10 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1765 (unsigned long long)block->generation, 2004 (unsigned long long)block->generation,
1766 (unsigned long long) 2005 (unsigned long long)
1767 le64_to_cpu(((struct btrfs_header *) 2006 le64_to_cpu(((struct btrfs_header *)
1768 mapped_data)->generation)); 2007 mapped_datav[0])->generation));
1769 /* it would not be safe to go on */ 2008 /* it would not be safe to go on */
1770 btrfsic_dump_tree(state); 2009 btrfsic_dump_tree(state);
1771 return; 2010 goto continue_loop;
1772 } 2011 }
1773 2012
1774 /* 2013 /*
@@ -1796,18 +2035,19 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1796 } 2035 }
1797 2036
1798 if (block->is_superblock) 2037 if (block->is_superblock)
1799 ret = btrfsic_map_superblock(state, bytenr, len, 2038 ret = btrfsic_map_superblock(state, bytenr,
2039 processed_len,
1800 bdev, &block_ctx); 2040 bdev, &block_ctx);
1801 else 2041 else
1802 ret = btrfsic_map_block(state, bytenr, len, 2042 ret = btrfsic_map_block(state, bytenr, processed_len,
1803 &block_ctx, 0); 2043 &block_ctx, 0);
1804 if (ret) { 2044 if (ret) {
1805 printk(KERN_INFO 2045 printk(KERN_INFO
1806 "btrfsic: btrfsic_map_block(root @%llu)" 2046 "btrfsic: btrfsic_map_block(root @%llu)"
1807 " failed!\n", (unsigned long long)bytenr); 2047 " failed!\n", (unsigned long long)bytenr);
1808 return; 2048 goto continue_loop;
1809 } 2049 }
1810 block_ctx.data = mapped_data; 2050 block_ctx.datav = mapped_datav;
1811 /* the following is required in case of writes to mirrors, 2051 /* the following is required in case of writes to mirrors,
1812 * use the same that was used for the lookup */ 2052 * use the same that was used for the lookup */
1813 block_ctx.dev = dev_state; 2053 block_ctx.dev = dev_state;
@@ -1863,11 +2103,13 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1863 block->logical_bytenr = bytenr; 2103 block->logical_bytenr = bytenr;
1864 block->is_metadata = 1; 2104 block->is_metadata = 1;
1865 if (block->is_superblock) { 2105 if (block->is_superblock) {
2106 BUG_ON(PAGE_CACHE_SIZE !=
2107 BTRFS_SUPER_INFO_SIZE);
1866 ret = btrfsic_process_written_superblock( 2108 ret = btrfsic_process_written_superblock(
1867 state, 2109 state,
1868 block, 2110 block,
1869 (struct btrfs_super_block *) 2111 (struct btrfs_super_block *)
1870 mapped_data); 2112 mapped_datav[0]);
1871 if (state->print_mask & 2113 if (state->print_mask &
1872 BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) { 2114 BTRFSIC_PRINT_MASK_TREE_AFTER_SB_WRITE) {
1873 printk(KERN_INFO 2115 printk(KERN_INFO
@@ -1880,8 +2122,6 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1880 state, 2122 state,
1881 block, 2123 block,
1882 &block_ctx, 2124 &block_ctx,
1883 (struct btrfs_header *)
1884 block_ctx.data,
1885 0, 0); 2125 0, 0);
1886 } 2126 }
1887 if (ret) 2127 if (ret)
@@ -1912,26 +2152,30 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1912 u64 bytenr; 2152 u64 bytenr;
1913 2153
1914 if (!is_metadata) { 2154 if (!is_metadata) {
2155 processed_len = state->datablock_size;
1915 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2156 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1916 printk(KERN_INFO "Written block (%s/%llu/?)" 2157 printk(KERN_INFO "Written block (%s/%llu/?)"
1917 " !found in hash table, D.\n", 2158 " !found in hash table, D.\n",
1918 dev_state->name, 2159 dev_state->name,
1919 (unsigned long long)dev_bytenr); 2160 (unsigned long long)dev_bytenr);
1920 if (!state->include_extent_data) 2161 if (!state->include_extent_data) {
1921 return; /* ignore that written D block */ 2162 /* ignore that written D block */
2163 goto continue_loop;
2164 }
1922 2165
1923 /* this is getting ugly for the 2166 /* this is getting ugly for the
1924 * include_extent_data case... */ 2167 * include_extent_data case... */
1925 bytenr = 0; /* unknown */ 2168 bytenr = 0; /* unknown */
1926 block_ctx.start = bytenr; 2169 block_ctx.start = bytenr;
1927 block_ctx.len = len; 2170 block_ctx.len = processed_len;
1928 block_ctx.bh = NULL; 2171 block_ctx.mem_to_free = NULL;
2172 block_ctx.pagev = NULL;
1929 } else { 2173 } else {
2174 processed_len = state->metablock_size;
1930 bytenr = le64_to_cpu(((struct btrfs_header *) 2175 bytenr = le64_to_cpu(((struct btrfs_header *)
1931 mapped_data)->bytenr); 2176 mapped_datav[0])->bytenr);
1932 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state, 2177 btrfsic_cmp_log_and_dev_bytenr(state, bytenr, dev_state,
1933 dev_bytenr, 2178 dev_bytenr);
1934 mapped_data);
1935 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE) 2179 if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
1936 printk(KERN_INFO 2180 printk(KERN_INFO
1937 "Written block @%llu (%s/%llu/?)" 2181 "Written block @%llu (%s/%llu/?)"
@@ -1940,17 +2184,17 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1940 dev_state->name, 2184 dev_state->name,
1941 (unsigned long long)dev_bytenr); 2185 (unsigned long long)dev_bytenr);
1942 2186
1943 ret = btrfsic_map_block(state, bytenr, len, &block_ctx, 2187 ret = btrfsic_map_block(state, bytenr, processed_len,
1944 0); 2188 &block_ctx, 0);
1945 if (ret) { 2189 if (ret) {
1946 printk(KERN_INFO 2190 printk(KERN_INFO
1947 "btrfsic: btrfsic_map_block(root @%llu)" 2191 "btrfsic: btrfsic_map_block(root @%llu)"
1948 " failed!\n", 2192 " failed!\n",
1949 (unsigned long long)dev_bytenr); 2193 (unsigned long long)dev_bytenr);
1950 return; 2194 goto continue_loop;
1951 } 2195 }
1952 } 2196 }
1953 block_ctx.data = mapped_data; 2197 block_ctx.datav = mapped_datav;
1954 /* the following is required in case of writes to mirrors, 2198 /* the following is required in case of writes to mirrors,
1955 * use the same that was used for the lookup */ 2199 * use the same that was used for the lookup */
1956 block_ctx.dev = dev_state; 2200 block_ctx.dev = dev_state;
@@ -1960,7 +2204,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
1960 if (NULL == block) { 2204 if (NULL == block) {
1961 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n"); 2205 printk(KERN_INFO "btrfsic: error, kmalloc failed!\n");
1962 btrfsic_release_block_ctx(&block_ctx); 2206 btrfsic_release_block_ctx(&block_ctx);
1963 return; 2207 goto continue_loop;
1964 } 2208 }
1965 block->dev_state = dev_state; 2209 block->dev_state = dev_state;
1966 block->dev_bytenr = dev_bytenr; 2210 block->dev_bytenr = dev_bytenr;
@@ -2020,9 +2264,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
2020 2264
2021 if (is_metadata) { 2265 if (is_metadata) {
2022 ret = btrfsic_process_metablock(state, block, 2266 ret = btrfsic_process_metablock(state, block,
2023 &block_ctx, 2267 &block_ctx, 0, 0);
2024 (struct btrfs_header *)
2025 block_ctx.data, 0, 0);
2026 if (ret) 2268 if (ret)
2027 printk(KERN_INFO 2269 printk(KERN_INFO
2028 "btrfsic: process_metablock(root @%llu)" 2270 "btrfsic: process_metablock(root @%llu)"
@@ -2031,6 +2273,13 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
2031 } 2273 }
2032 btrfsic_release_block_ctx(&block_ctx); 2274 btrfsic_release_block_ctx(&block_ctx);
2033 } 2275 }
2276
2277continue_loop:
2278 BUG_ON(!processed_len);
2279 dev_bytenr += processed_len;
2280 mapped_datav += processed_len >> PAGE_CACHE_SHIFT;
2281 num_pages -= processed_len >> PAGE_CACHE_SHIFT;
2282 goto again;
2034} 2283}
2035 2284
2036static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status) 2285static void btrfsic_bio_end_io(struct bio *bp, int bio_error_status)
@@ -2213,7 +2462,7 @@ static int btrfsic_process_written_superblock(
2213 2462
2214 num_copies = 2463 num_copies =
2215 btrfs_num_copies(&state->root->fs_info->mapping_tree, 2464 btrfs_num_copies(&state->root->fs_info->mapping_tree,
2216 next_bytenr, PAGE_SIZE); 2465 next_bytenr, BTRFS_SUPER_INFO_SIZE);
2217 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES) 2466 if (state->print_mask & BTRFSIC_PRINT_MASK_NUM_COPIES)
2218 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n", 2467 printk(KERN_INFO "num_copies(log_bytenr=%llu) = %d\n",
2219 (unsigned long long)next_bytenr, num_copies); 2468 (unsigned long long)next_bytenr, num_copies);
@@ -2224,7 +2473,8 @@ static int btrfsic_process_written_superblock(
2224 printk(KERN_INFO 2473 printk(KERN_INFO
2225 "btrfsic_process_written_superblock(" 2474 "btrfsic_process_written_superblock("
2226 "mirror_num=%d)\n", mirror_num); 2475 "mirror_num=%d)\n", mirror_num);
2227 ret = btrfsic_map_block(state, next_bytenr, PAGE_SIZE, 2476 ret = btrfsic_map_block(state, next_bytenr,
2477 BTRFS_SUPER_INFO_SIZE,
2228 &tmp_next_block_ctx, 2478 &tmp_next_block_ctx,
2229 mirror_num); 2479 mirror_num);
2230 if (ret) { 2480 if (ret) {
@@ -2689,7 +2939,7 @@ static struct btrfsic_block *btrfsic_block_lookup_or_add(
2689static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state, 2939static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
2690 u64 bytenr, 2940 u64 bytenr,
2691 struct btrfsic_dev_state *dev_state, 2941 struct btrfsic_dev_state *dev_state,
2692 u64 dev_bytenr, char *data) 2942 u64 dev_bytenr)
2693{ 2943{
2694 int num_copies; 2944 int num_copies;
2695 int mirror_num; 2945 int mirror_num;
@@ -2698,10 +2948,10 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
2698 int match = 0; 2948 int match = 0;
2699 2949
2700 num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree, 2950 num_copies = btrfs_num_copies(&state->root->fs_info->mapping_tree,
2701 bytenr, PAGE_SIZE); 2951 bytenr, state->metablock_size);
2702 2952
2703 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2953 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
2704 ret = btrfsic_map_block(state, bytenr, PAGE_SIZE, 2954 ret = btrfsic_map_block(state, bytenr, state->metablock_size,
2705 &block_ctx, mirror_num); 2955 &block_ctx, mirror_num);
2706 if (ret) { 2956 if (ret) {
2707 printk(KERN_INFO "btrfsic:" 2957 printk(KERN_INFO "btrfsic:"
@@ -2727,7 +2977,8 @@ static void btrfsic_cmp_log_and_dev_bytenr(struct btrfsic_state *state,
2727 (unsigned long long)bytenr, dev_state->name, 2977 (unsigned long long)bytenr, dev_state->name,
2728 (unsigned long long)dev_bytenr); 2978 (unsigned long long)dev_bytenr);
2729 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) { 2979 for (mirror_num = 1; mirror_num <= num_copies; mirror_num++) {
2730 ret = btrfsic_map_block(state, bytenr, PAGE_SIZE, 2980 ret = btrfsic_map_block(state, bytenr,
2981 state->metablock_size,
2731 &block_ctx, mirror_num); 2982 &block_ctx, mirror_num);
2732 if (ret) 2983 if (ret)
2733 continue; 2984 continue;
@@ -2781,13 +3032,13 @@ int btrfsic_submit_bh(int rw, struct buffer_head *bh)
2781 (unsigned long)bh->b_size, bh->b_data, 3032 (unsigned long)bh->b_size, bh->b_data,
2782 bh->b_bdev); 3033 bh->b_bdev);
2783 btrfsic_process_written_block(dev_state, dev_bytenr, 3034 btrfsic_process_written_block(dev_state, dev_bytenr,
2784 bh->b_data, bh->b_size, NULL, 3035 &bh->b_data, 1, NULL,
2785 NULL, bh, rw); 3036 NULL, bh, rw);
2786 } else if (NULL != dev_state && (rw & REQ_FLUSH)) { 3037 } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
2787 if (dev_state->state->print_mask & 3038 if (dev_state->state->print_mask &
2788 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 3039 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2789 printk(KERN_INFO 3040 printk(KERN_INFO
2790 "submit_bh(rw=0x%x) FLUSH, bdev=%p)\n", 3041 "submit_bh(rw=0x%x FLUSH, bdev=%p)\n",
2791 rw, bh->b_bdev); 3042 rw, bh->b_bdev);
2792 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 3043 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
2793 if ((dev_state->state->print_mask & 3044 if ((dev_state->state->print_mask &
@@ -2836,6 +3087,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
2836 unsigned int i; 3087 unsigned int i;
2837 u64 dev_bytenr; 3088 u64 dev_bytenr;
2838 int bio_is_patched; 3089 int bio_is_patched;
3090 char **mapped_datav;
2839 3091
2840 dev_bytenr = 512 * bio->bi_sector; 3092 dev_bytenr = 512 * bio->bi_sector;
2841 bio_is_patched = 0; 3093 bio_is_patched = 0;
@@ -2848,35 +3100,46 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
2848 (unsigned long long)dev_bytenr, 3100 (unsigned long long)dev_bytenr,
2849 bio->bi_bdev); 3101 bio->bi_bdev);
2850 3102
3103 mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
3104 GFP_NOFS);
3105 if (!mapped_datav)
3106 goto leave;
2851 for (i = 0; i < bio->bi_vcnt; i++) { 3107 for (i = 0; i < bio->bi_vcnt; i++) {
2852 u8 *mapped_data; 3108 BUG_ON(bio->bi_io_vec[i].bv_len != PAGE_CACHE_SIZE);
2853 3109 mapped_datav[i] = kmap(bio->bi_io_vec[i].bv_page);
2854 mapped_data = kmap(bio->bi_io_vec[i].bv_page); 3110 if (!mapped_datav[i]) {
3111 while (i > 0) {
3112 i--;
3113 kunmap(bio->bi_io_vec[i].bv_page);
3114 }
3115 kfree(mapped_datav);
3116 goto leave;
3117 }
2855 if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | 3118 if ((BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
2856 BTRFSIC_PRINT_MASK_VERBOSE) == 3119 BTRFSIC_PRINT_MASK_VERBOSE) ==
2857 (dev_state->state->print_mask & 3120 (dev_state->state->print_mask &
2858 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH | 3121 (BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH |
2859 BTRFSIC_PRINT_MASK_VERBOSE))) 3122 BTRFSIC_PRINT_MASK_VERBOSE)))
2860 printk(KERN_INFO 3123 printk(KERN_INFO
2861 "#%u: page=%p, mapped=%p, len=%u," 3124 "#%u: page=%p, len=%u, offset=%u\n",
2862 " offset=%u\n",
2863 i, bio->bi_io_vec[i].bv_page, 3125 i, bio->bi_io_vec[i].bv_page,
2864 mapped_data,
2865 bio->bi_io_vec[i].bv_len, 3126 bio->bi_io_vec[i].bv_len,
2866 bio->bi_io_vec[i].bv_offset); 3127 bio->bi_io_vec[i].bv_offset);
2867 btrfsic_process_written_block(dev_state, dev_bytenr, 3128 }
2868 mapped_data, 3129 btrfsic_process_written_block(dev_state, dev_bytenr,
2869 bio->bi_io_vec[i].bv_len, 3130 mapped_datav, bio->bi_vcnt,
2870 bio, &bio_is_patched, 3131 bio, &bio_is_patched,
2871 NULL, rw); 3132 NULL, rw);
3133 while (i > 0) {
3134 i--;
2872 kunmap(bio->bi_io_vec[i].bv_page); 3135 kunmap(bio->bi_io_vec[i].bv_page);
2873 dev_bytenr += bio->bi_io_vec[i].bv_len;
2874 } 3136 }
3137 kfree(mapped_datav);
2875 } else if (NULL != dev_state && (rw & REQ_FLUSH)) { 3138 } else if (NULL != dev_state && (rw & REQ_FLUSH)) {
2876 if (dev_state->state->print_mask & 3139 if (dev_state->state->print_mask &
2877 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 3140 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
2878 printk(KERN_INFO 3141 printk(KERN_INFO
2879 "submit_bio(rw=0x%x) FLUSH, bdev=%p)\n", 3142 "submit_bio(rw=0x%x FLUSH, bdev=%p)\n",
2880 rw, bio->bi_bdev); 3143 rw, bio->bi_bdev);
2881 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) { 3144 if (!dev_state->dummy_block_for_bio_bh_flush.is_iodone) {
2882 if ((dev_state->state->print_mask & 3145 if ((dev_state->state->print_mask &
@@ -2903,6 +3166,7 @@ void btrfsic_submit_bio(int rw, struct bio *bio)
2903 bio->bi_end_io = btrfsic_bio_end_io; 3166 bio->bi_end_io = btrfsic_bio_end_io;
2904 } 3167 }
2905 } 3168 }
3169leave:
2906 mutex_unlock(&btrfsic_mutex); 3170 mutex_unlock(&btrfsic_mutex);
2907 3171
2908 submit_bio(rw, bio); 3172 submit_bio(rw, bio);
@@ -2917,6 +3181,30 @@ int btrfsic_mount(struct btrfs_root *root,
2917 struct list_head *dev_head = &fs_devices->devices; 3181 struct list_head *dev_head = &fs_devices->devices;
2918 struct btrfs_device *device; 3182 struct btrfs_device *device;
2919 3183
3184 if (root->nodesize != root->leafsize) {
3185 printk(KERN_INFO
3186 "btrfsic: cannot handle nodesize %d != leafsize %d!\n",
3187 root->nodesize, root->leafsize);
3188 return -1;
3189 }
3190 if (root->nodesize & ((u64)PAGE_CACHE_SIZE - 1)) {
3191 printk(KERN_INFO
3192 "btrfsic: cannot handle nodesize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
3193 root->nodesize, (unsigned long)PAGE_CACHE_SIZE);
3194 return -1;
3195 }
3196 if (root->leafsize & ((u64)PAGE_CACHE_SIZE - 1)) {
3197 printk(KERN_INFO
3198 "btrfsic: cannot handle leafsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
3199 root->leafsize, (unsigned long)PAGE_CACHE_SIZE);
3200 return -1;
3201 }
3202 if (root->sectorsize & ((u64)PAGE_CACHE_SIZE - 1)) {
3203 printk(KERN_INFO
3204 "btrfsic: cannot handle sectorsize %d not being a multiple of PAGE_CACHE_SIZE %ld!\n",
3205 root->sectorsize, (unsigned long)PAGE_CACHE_SIZE);
3206 return -1;
3207 }
2920 state = kzalloc(sizeof(*state), GFP_NOFS); 3208 state = kzalloc(sizeof(*state), GFP_NOFS);
2921 if (NULL == state) { 3209 if (NULL == state) {
2922 printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n"); 3210 printk(KERN_INFO "btrfs check-integrity: kmalloc() failed!\n");
@@ -2933,6 +3221,8 @@ int btrfsic_mount(struct btrfs_root *root,
2933 state->print_mask = print_mask; 3221 state->print_mask = print_mask;
2934 state->include_extent_data = including_extent_data; 3222 state->include_extent_data = including_extent_data;
2935 state->csum_size = 0; 3223 state->csum_size = 0;
3224 state->metablock_size = root->nodesize;
3225 state->datablock_size = root->sectorsize;
2936 INIT_LIST_HEAD(&state->all_blocks_list); 3226 INIT_LIST_HEAD(&state->all_blocks_list);
2937 btrfsic_block_hashtable_init(&state->block_hashtable); 3227 btrfsic_block_hashtable_init(&state->block_hashtable);
2938 btrfsic_block_link_hashtable_init(&state->block_link_hashtable); 3228 btrfsic_block_link_hashtable_init(&state->block_link_hashtable);
@@ -3049,7 +3339,7 @@ void btrfsic_unmount(struct btrfs_root *root,
3049 btrfsic_block_link_free(l); 3339 btrfsic_block_link_free(l);
3050 } 3340 }
3051 3341
3052 if (b_all->is_iodone) 3342 if (b_all->is_iodone || b_all->never_written)
3053 btrfsic_block_free(b_all); 3343 btrfsic_block_free(b_all);
3054 else 3344 else
3055 printk(KERN_INFO "btrfs: attempt to free %c-block" 3345 printk(KERN_INFO "btrfs: attempt to free %c-block"
diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
index 4106264fbc65..15cbc2bf4ff0 100644
--- a/fs/btrfs/ctree.c
+++ b/fs/btrfs/ctree.c
@@ -18,6 +18,7 @@
18 18
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/rbtree.h>
21#include "ctree.h" 22#include "ctree.h"
22#include "disk-io.h" 23#include "disk-io.h"
23#include "transaction.h" 24#include "transaction.h"
@@ -37,7 +38,16 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct extent_buffer *dst_buf, 38 struct extent_buffer *dst_buf,
38 struct extent_buffer *src_buf); 39 struct extent_buffer *src_buf);
39static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 40static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
40 struct btrfs_path *path, int level, int slot); 41 struct btrfs_path *path, int level, int slot,
42 int tree_mod_log);
43static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
44 struct extent_buffer *eb);
45struct extent_buffer *read_old_tree_block(struct btrfs_root *root, u64 bytenr,
46 u32 blocksize, u64 parent_transid,
47 u64 time_seq);
48struct extent_buffer *btrfs_find_old_tree_block(struct btrfs_root *root,
49 u64 bytenr, u32 blocksize,
50 u64 time_seq);
41 51
42struct btrfs_path *btrfs_alloc_path(void) 52struct btrfs_path *btrfs_alloc_path(void)
43{ 53{
@@ -255,7 +265,7 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
255 265
256 cow = btrfs_alloc_free_block(trans, root, buf->len, 0, 266 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
257 new_root_objectid, &disk_key, level, 267 new_root_objectid, &disk_key, level,
258 buf->start, 0, 1); 268 buf->start, 0);
259 if (IS_ERR(cow)) 269 if (IS_ERR(cow))
260 return PTR_ERR(cow); 270 return PTR_ERR(cow);
261 271
@@ -288,6 +298,449 @@ int btrfs_copy_root(struct btrfs_trans_handle *trans,
288 return 0; 298 return 0;
289} 299}
290 300
301enum mod_log_op {
302 MOD_LOG_KEY_REPLACE,
303 MOD_LOG_KEY_ADD,
304 MOD_LOG_KEY_REMOVE,
305 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
306 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
307 MOD_LOG_MOVE_KEYS,
308 MOD_LOG_ROOT_REPLACE,
309};
310
311struct tree_mod_move {
312 int dst_slot;
313 int nr_items;
314};
315
316struct tree_mod_root {
317 u64 logical;
318 u8 level;
319};
320
321struct tree_mod_elem {
322 struct rb_node node;
323 u64 index; /* shifted logical */
324 struct seq_list elem;
325 enum mod_log_op op;
326
327 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
328 int slot;
329
330 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
331 u64 generation;
332
333 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
334 struct btrfs_disk_key key;
335 u64 blockptr;
336
337 /* this is used for op == MOD_LOG_MOVE_KEYS */
338 struct tree_mod_move move;
339
340 /* this is used for op == MOD_LOG_ROOT_REPLACE */
341 struct tree_mod_root old_root;
342};
343
344static inline void
345__get_tree_mod_seq(struct btrfs_fs_info *fs_info, struct seq_list *elem)
346{
347 elem->seq = atomic_inc_return(&fs_info->tree_mod_seq);
348 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
349}
350
351void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
352 struct seq_list *elem)
353{
354 elem->flags = 1;
355 spin_lock(&fs_info->tree_mod_seq_lock);
356 __get_tree_mod_seq(fs_info, elem);
357 spin_unlock(&fs_info->tree_mod_seq_lock);
358}
359
360void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
361 struct seq_list *elem)
362{
363 struct rb_root *tm_root;
364 struct rb_node *node;
365 struct rb_node *next;
366 struct seq_list *cur_elem;
367 struct tree_mod_elem *tm;
368 u64 min_seq = (u64)-1;
369 u64 seq_putting = elem->seq;
370
371 if (!seq_putting)
372 return;
373
374 BUG_ON(!(elem->flags & 1));
375 spin_lock(&fs_info->tree_mod_seq_lock);
376 list_del(&elem->list);
377
378 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
379 if ((cur_elem->flags & 1) && cur_elem->seq < min_seq) {
380 if (seq_putting > cur_elem->seq) {
381 /*
382 * blocker with lower sequence number exists, we
383 * cannot remove anything from the log
384 */
385 goto out;
386 }
387 min_seq = cur_elem->seq;
388 }
389 }
390
391 /*
392 * anything that's lower than the lowest existing (read: blocked)
393 * sequence number can be removed from the tree.
394 */
395 write_lock(&fs_info->tree_mod_log_lock);
396 tm_root = &fs_info->tree_mod_log;
397 for (node = rb_first(tm_root); node; node = next) {
398 next = rb_next(node);
399 tm = container_of(node, struct tree_mod_elem, node);
400 if (tm->elem.seq > min_seq)
401 continue;
402 rb_erase(node, tm_root);
403 list_del(&tm->elem.list);
404 kfree(tm);
405 }
406 write_unlock(&fs_info->tree_mod_log_lock);
407out:
408 spin_unlock(&fs_info->tree_mod_seq_lock);
409}
410
411/*
412 * key order of the log:
413 * index -> sequence
414 *
415 * the index is the shifted logical of the *new* root node for root replace
416 * operations, or the shifted logical of the affected block for all other
417 * operations.
418 */
419static noinline int
420__tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
421{
422 struct rb_root *tm_root;
423 struct rb_node **new;
424 struct rb_node *parent = NULL;
425 struct tree_mod_elem *cur;
426 int ret = 0;
427
428 BUG_ON(!tm || !tm->elem.seq);
429
430 write_lock(&fs_info->tree_mod_log_lock);
431 tm_root = &fs_info->tree_mod_log;
432 new = &tm_root->rb_node;
433 while (*new) {
434 cur = container_of(*new, struct tree_mod_elem, node);
435 parent = *new;
436 if (cur->index < tm->index)
437 new = &((*new)->rb_left);
438 else if (cur->index > tm->index)
439 new = &((*new)->rb_right);
440 else if (cur->elem.seq < tm->elem.seq)
441 new = &((*new)->rb_left);
442 else if (cur->elem.seq > tm->elem.seq)
443 new = &((*new)->rb_right);
444 else {
445 kfree(tm);
446 ret = -EEXIST;
447 goto unlock;
448 }
449 }
450
451 rb_link_node(&tm->node, parent, new);
452 rb_insert_color(&tm->node, tm_root);
453unlock:
454 write_unlock(&fs_info->tree_mod_log_lock);
455 return ret;
456}
457
458static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
459 struct extent_buffer *eb) {
460 smp_mb();
461 if (list_empty(&(fs_info)->tree_mod_seq_list))
462 return 1;
463 if (!eb)
464 return 0;
465 if (btrfs_header_level(eb) == 0)
466 return 1;
467 return 0;
468}
469
470/*
471 * This allocates memory and gets a tree modification sequence number when
472 * needed.
473 *
474 * Returns 0 when no sequence number is needed, < 0 on error.
475 * Returns 1 when a sequence number was added. In this case,
476 * fs_info->tree_mod_seq_lock was acquired and must be released by the caller
477 * after inserting into the rb tree.
478 */
479static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
480 struct tree_mod_elem **tm_ret)
481{
482 struct tree_mod_elem *tm;
483 int seq;
484
485 if (tree_mod_dont_log(fs_info, NULL))
486 return 0;
487
488 tm = *tm_ret = kzalloc(sizeof(*tm), flags);
489 if (!tm)
490 return -ENOMEM;
491
492 tm->elem.flags = 0;
493 spin_lock(&fs_info->tree_mod_seq_lock);
494 if (list_empty(&fs_info->tree_mod_seq_list)) {
495 /*
496 * someone emptied the list while we were waiting for the lock.
497 * we must not add to the list, because no blocker exists. items
498 * are removed from the list only when the existing blocker is
499 * removed from the list.
500 */
501 kfree(tm);
502 seq = 0;
503 spin_unlock(&fs_info->tree_mod_seq_lock);
504 } else {
505 __get_tree_mod_seq(fs_info, &tm->elem);
506 seq = tm->elem.seq;
507 }
508
509 return seq;
510}
511
512static noinline int
513tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
514 struct extent_buffer *eb, int slot,
515 enum mod_log_op op, gfp_t flags)
516{
517 struct tree_mod_elem *tm;
518 int ret;
519
520 ret = tree_mod_alloc(fs_info, flags, &tm);
521 if (ret <= 0)
522 return ret;
523
524 tm->index = eb->start >> PAGE_CACHE_SHIFT;
525 if (op != MOD_LOG_KEY_ADD) {
526 btrfs_node_key(eb, &tm->key, slot);
527 tm->blockptr = btrfs_node_blockptr(eb, slot);
528 }
529 tm->op = op;
530 tm->slot = slot;
531 tm->generation = btrfs_node_ptr_generation(eb, slot);
532
533 ret = __tree_mod_log_insert(fs_info, tm);
534 spin_unlock(&fs_info->tree_mod_seq_lock);
535 return ret;
536}
537
538static noinline int
539tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
540 int slot, enum mod_log_op op)
541{
542 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
543}
544
545static noinline int
546tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
547 struct extent_buffer *eb, int dst_slot, int src_slot,
548 int nr_items, gfp_t flags)
549{
550 struct tree_mod_elem *tm;
551 int ret;
552 int i;
553
554 if (tree_mod_dont_log(fs_info, eb))
555 return 0;
556
557 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
558 ret = tree_mod_log_insert_key(fs_info, eb, i + dst_slot,
559 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
560 BUG_ON(ret < 0);
561 }
562
563 ret = tree_mod_alloc(fs_info, flags, &tm);
564 if (ret <= 0)
565 return ret;
566
567 tm->index = eb->start >> PAGE_CACHE_SHIFT;
568 tm->slot = src_slot;
569 tm->move.dst_slot = dst_slot;
570 tm->move.nr_items = nr_items;
571 tm->op = MOD_LOG_MOVE_KEYS;
572
573 ret = __tree_mod_log_insert(fs_info, tm);
574 spin_unlock(&fs_info->tree_mod_seq_lock);
575 return ret;
576}
577
578static noinline int
579tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
580 struct extent_buffer *old_root,
581 struct extent_buffer *new_root, gfp_t flags)
582{
583 struct tree_mod_elem *tm;
584 int ret;
585
586 ret = tree_mod_alloc(fs_info, flags, &tm);
587 if (ret <= 0)
588 return ret;
589
590 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
591 tm->old_root.logical = old_root->start;
592 tm->old_root.level = btrfs_header_level(old_root);
593 tm->generation = btrfs_header_generation(old_root);
594 tm->op = MOD_LOG_ROOT_REPLACE;
595
596 ret = __tree_mod_log_insert(fs_info, tm);
597 spin_unlock(&fs_info->tree_mod_seq_lock);
598 return ret;
599}
600
601static struct tree_mod_elem *
602__tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
603 int smallest)
604{
605 struct rb_root *tm_root;
606 struct rb_node *node;
607 struct tree_mod_elem *cur = NULL;
608 struct tree_mod_elem *found = NULL;
609 u64 index = start >> PAGE_CACHE_SHIFT;
610
611 read_lock(&fs_info->tree_mod_log_lock);
612 tm_root = &fs_info->tree_mod_log;
613 node = tm_root->rb_node;
614 while (node) {
615 cur = container_of(node, struct tree_mod_elem, node);
616 if (cur->index < index) {
617 node = node->rb_left;
618 } else if (cur->index > index) {
619 node = node->rb_right;
620 } else if (cur->elem.seq < min_seq) {
621 node = node->rb_left;
622 } else if (!smallest) {
623 /* we want the node with the highest seq */
624 if (found)
625 BUG_ON(found->elem.seq > cur->elem.seq);
626 found = cur;
627 node = node->rb_left;
628 } else if (cur->elem.seq > min_seq) {
629 /* we want the node with the smallest seq */
630 if (found)
631 BUG_ON(found->elem.seq < cur->elem.seq);
632 found = cur;
633 node = node->rb_right;
634 } else {
635 found = cur;
636 break;
637 }
638 }
639 read_unlock(&fs_info->tree_mod_log_lock);
640
641 return found;
642}
643
644/*
645 * this returns the element from the log with the smallest time sequence
646 * value that's in the log (the oldest log item). any element with a time
647 * sequence lower than min_seq will be ignored.
648 */
649static struct tree_mod_elem *
650tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
651 u64 min_seq)
652{
653 return __tree_mod_log_search(fs_info, start, min_seq, 1);
654}
655
656/*
657 * this returns the element from the log with the largest time sequence
658 * value that's in the log (the most recent log item). any element with
659 * a time sequence lower than min_seq will be ignored.
660 */
661static struct tree_mod_elem *
662tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
663{
664 return __tree_mod_log_search(fs_info, start, min_seq, 0);
665}
666
667static inline void
668tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
669 struct extent_buffer *src, unsigned long dst_offset,
670 unsigned long src_offset, int nr_items)
671{
672 int ret;
673 int i;
674
675 if (tree_mod_dont_log(fs_info, NULL))
676 return;
677
678 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
679 return;
680
681 /* speed this up by single seq for all operations? */
682 for (i = 0; i < nr_items; i++) {
683 ret = tree_mod_log_insert_key(fs_info, src, i + src_offset,
684 MOD_LOG_KEY_REMOVE);
685 BUG_ON(ret < 0);
686 ret = tree_mod_log_insert_key(fs_info, dst, i + dst_offset,
687 MOD_LOG_KEY_ADD);
688 BUG_ON(ret < 0);
689 }
690}
691
692static inline void
693tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
694 int dst_offset, int src_offset, int nr_items)
695{
696 int ret;
697 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
698 nr_items, GFP_NOFS);
699 BUG_ON(ret < 0);
700}
701
702static inline void
703tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
704 struct extent_buffer *eb,
705 struct btrfs_disk_key *disk_key, int slot, int atomic)
706{
707 int ret;
708
709 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
710 MOD_LOG_KEY_REPLACE,
711 atomic ? GFP_ATOMIC : GFP_NOFS);
712 BUG_ON(ret < 0);
713}
714
715static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
716 struct extent_buffer *eb)
717{
718 int i;
719 int ret;
720 u32 nritems;
721
722 if (tree_mod_dont_log(fs_info, eb))
723 return;
724
725 nritems = btrfs_header_nritems(eb);
726 for (i = nritems - 1; i >= 0; i--) {
727 ret = tree_mod_log_insert_key(fs_info, eb, i,
728 MOD_LOG_KEY_REMOVE_WHILE_FREEING);
729 BUG_ON(ret < 0);
730 }
731}
732
733static inline void
734tree_mod_log_set_root_pointer(struct btrfs_root *root,
735 struct extent_buffer *new_root_node)
736{
737 int ret;
738 tree_mod_log_free_eb(root->fs_info, root->node);
739 ret = tree_mod_log_insert_root(root->fs_info, root->node,
740 new_root_node, GFP_NOFS);
741 BUG_ON(ret < 0);
742}
743
291/* 744/*
292 * check if the tree block can be shared by multiple trees 745 * check if the tree block can be shared by multiple trees
293 */ 746 */
@@ -409,6 +862,12 @@ static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
409 ret = btrfs_dec_ref(trans, root, buf, 1, 1); 862 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
410 BUG_ON(ret); /* -ENOMEM */ 863 BUG_ON(ret); /* -ENOMEM */
411 } 864 }
865 /*
866 * don't log freeing in case we're freeing the root node, this
867 * is done by tree_mod_log_set_root_pointer later
868 */
869 if (buf != root->node && btrfs_header_level(buf) != 0)
870 tree_mod_log_free_eb(root->fs_info, buf);
412 clean_tree_block(trans, root, buf); 871 clean_tree_block(trans, root, buf);
413 *last_ref = 1; 872 *last_ref = 1;
414 } 873 }
@@ -467,7 +926,7 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
467 926
468 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start, 927 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
469 root->root_key.objectid, &disk_key, 928 root->root_key.objectid, &disk_key,
470 level, search_start, empty_size, 1); 929 level, search_start, empty_size);
471 if (IS_ERR(cow)) 930 if (IS_ERR(cow))
472 return PTR_ERR(cow); 931 return PTR_ERR(cow);
473 932
@@ -506,10 +965,11 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
506 parent_start = 0; 965 parent_start = 0;
507 966
508 extent_buffer_get(cow); 967 extent_buffer_get(cow);
968 tree_mod_log_set_root_pointer(root, cow);
509 rcu_assign_pointer(root->node, cow); 969 rcu_assign_pointer(root->node, cow);
510 970
511 btrfs_free_tree_block(trans, root, buf, parent_start, 971 btrfs_free_tree_block(trans, root, buf, parent_start,
512 last_ref, 1); 972 last_ref);
513 free_extent_buffer(buf); 973 free_extent_buffer(buf);
514 add_root_to_dirty_list(root); 974 add_root_to_dirty_list(root);
515 } else { 975 } else {
@@ -519,13 +979,15 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
519 parent_start = 0; 979 parent_start = 0;
520 980
521 WARN_ON(trans->transid != btrfs_header_generation(parent)); 981 WARN_ON(trans->transid != btrfs_header_generation(parent));
982 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
983 MOD_LOG_KEY_REPLACE);
522 btrfs_set_node_blockptr(parent, parent_slot, 984 btrfs_set_node_blockptr(parent, parent_slot,
523 cow->start); 985 cow->start);
524 btrfs_set_node_ptr_generation(parent, parent_slot, 986 btrfs_set_node_ptr_generation(parent, parent_slot,
525 trans->transid); 987 trans->transid);
526 btrfs_mark_buffer_dirty(parent); 988 btrfs_mark_buffer_dirty(parent);
527 btrfs_free_tree_block(trans, root, buf, parent_start, 989 btrfs_free_tree_block(trans, root, buf, parent_start,
528 last_ref, 1); 990 last_ref);
529 } 991 }
530 if (unlock_orig) 992 if (unlock_orig)
531 btrfs_tree_unlock(buf); 993 btrfs_tree_unlock(buf);
@@ -535,6 +997,231 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
535 return 0; 997 return 0;
536} 998}
537 999
1000/*
1001 * returns the logical address of the oldest predecessor of the given root.
1002 * entries older than time_seq are ignored.
1003 */
1004static struct tree_mod_elem *
1005__tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1006 struct btrfs_root *root, u64 time_seq)
1007{
1008 struct tree_mod_elem *tm;
1009 struct tree_mod_elem *found = NULL;
1010 u64 root_logical = root->node->start;
1011 int looped = 0;
1012
1013 if (!time_seq)
1014 return 0;
1015
1016 /*
1017 * the very last operation that's logged for a root is the replacement
1018 * operation (if it is replaced at all). this has the index of the *new*
1019 * root, making it the very first operation that's logged for this root.
1020 */
1021 while (1) {
1022 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1023 time_seq);
1024 if (!looped && !tm)
1025 return 0;
1026 /*
1027 * we must have key remove operations in the log before the
1028 * replace operation.
1029 */
1030 BUG_ON(!tm);
1031
1032 if (tm->op != MOD_LOG_ROOT_REPLACE)
1033 break;
1034
1035 found = tm;
1036 root_logical = tm->old_root.logical;
1037 BUG_ON(root_logical == root->node->start);
1038 looped = 1;
1039 }
1040
1041 /* if there's no old root to return, return what we found instead */
1042 if (!found)
1043 found = tm;
1044
1045 return found;
1046}
1047
1048/*
1049 * tm is a pointer to the first operation to rewind within eb. then, all
1050 * previous operations will be rewinded (until we reach something older than
1051 * time_seq).
1052 */
1053static void
1054__tree_mod_log_rewind(struct extent_buffer *eb, u64 time_seq,
1055 struct tree_mod_elem *first_tm)
1056{
1057 u32 n;
1058 struct rb_node *next;
1059 struct tree_mod_elem *tm = first_tm;
1060 unsigned long o_dst;
1061 unsigned long o_src;
1062 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1063
1064 n = btrfs_header_nritems(eb);
1065 while (tm && tm->elem.seq >= time_seq) {
1066 /*
1067 * all the operations are recorded with the operator used for
1068 * the modification. as we're going backwards, we do the
1069 * opposite of each operation here.
1070 */
1071 switch (tm->op) {
1072 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1073 BUG_ON(tm->slot < n);
1074 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1075 case MOD_LOG_KEY_REMOVE:
1076 btrfs_set_node_key(eb, &tm->key, tm->slot);
1077 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1078 btrfs_set_node_ptr_generation(eb, tm->slot,
1079 tm->generation);
1080 n++;
1081 break;
1082 case MOD_LOG_KEY_REPLACE:
1083 BUG_ON(tm->slot >= n);
1084 btrfs_set_node_key(eb, &tm->key, tm->slot);
1085 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1086 btrfs_set_node_ptr_generation(eb, tm->slot,
1087 tm->generation);
1088 break;
1089 case MOD_LOG_KEY_ADD:
1090 if (tm->slot != n - 1) {
1091 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1092 o_src = btrfs_node_key_ptr_offset(tm->slot + 1);
1093 memmove_extent_buffer(eb, o_dst, o_src, p_size);
1094 }
1095 n--;
1096 break;
1097 case MOD_LOG_MOVE_KEYS:
1098 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1099 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1100 memmove_extent_buffer(eb, o_dst, o_src,
1101 tm->move.nr_items * p_size);
1102 break;
1103 case MOD_LOG_ROOT_REPLACE:
1104 /*
1105 * this operation is special. for roots, this must be
1106 * handled explicitly before rewinding.
1107 * for non-roots, this operation may exist if the node
1108 * was a root: root A -> child B; then A gets empty and
1109 * B is promoted to the new root. in the mod log, we'll
1110 * have a root-replace operation for B, a tree block
1111 * that is no root. we simply ignore that operation.
1112 */
1113 break;
1114 }
1115 next = rb_next(&tm->node);
1116 if (!next)
1117 break;
1118 tm = container_of(next, struct tree_mod_elem, node);
1119 if (tm->index != first_tm->index)
1120 break;
1121 }
1122 btrfs_set_header_nritems(eb, n);
1123}
1124
1125static struct extent_buffer *
1126tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1127 u64 time_seq)
1128{
1129 struct extent_buffer *eb_rewin;
1130 struct tree_mod_elem *tm;
1131
1132 if (!time_seq)
1133 return eb;
1134
1135 if (btrfs_header_level(eb) == 0)
1136 return eb;
1137
1138 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1139 if (!tm)
1140 return eb;
1141
1142 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1143 BUG_ON(tm->slot != 0);
1144 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1145 fs_info->tree_root->nodesize);
1146 BUG_ON(!eb_rewin);
1147 btrfs_set_header_bytenr(eb_rewin, eb->start);
1148 btrfs_set_header_backref_rev(eb_rewin,
1149 btrfs_header_backref_rev(eb));
1150 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1151 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1152 } else {
1153 eb_rewin = btrfs_clone_extent_buffer(eb);
1154 BUG_ON(!eb_rewin);
1155 }
1156
1157 extent_buffer_get(eb_rewin);
1158 free_extent_buffer(eb);
1159
1160 __tree_mod_log_rewind(eb_rewin, time_seq, tm);
1161
1162 return eb_rewin;
1163}
1164
1165/*
1166 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1167 * value. If there are no changes, the current root->root_node is returned. If
1168 * anything changed in between, there's a fresh buffer allocated on which the
1169 * rewind operations are done. In any case, the returned buffer is read locked.
1170 * Returns NULL on error (with no locks held).
1171 */
1172static inline struct extent_buffer *
1173get_old_root(struct btrfs_root *root, u64 time_seq)
1174{
1175 struct tree_mod_elem *tm;
1176 struct extent_buffer *eb;
1177 struct tree_mod_root *old_root = NULL;
1178 u64 old_generation = 0;
1179 u64 logical;
1180
1181 eb = btrfs_read_lock_root_node(root);
1182 tm = __tree_mod_log_oldest_root(root->fs_info, root, time_seq);
1183 if (!tm)
1184 return root->node;
1185
1186 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1187 old_root = &tm->old_root;
1188 old_generation = tm->generation;
1189 logical = old_root->logical;
1190 } else {
1191 logical = root->node->start;
1192 }
1193
1194 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1195 /*
1196 * there was an item in the log when __tree_mod_log_oldest_root
1197 * returned. this one must not go away, because the time_seq passed to
1198 * us must be blocking its removal.
1199 */
1200 BUG_ON(!tm);
1201
1202 if (old_root)
1203 eb = alloc_dummy_extent_buffer(tm->index << PAGE_CACHE_SHIFT,
1204 root->nodesize);
1205 else
1206 eb = btrfs_clone_extent_buffer(root->node);
1207 btrfs_tree_read_unlock(root->node);
1208 free_extent_buffer(root->node);
1209 if (!eb)
1210 return NULL;
1211 btrfs_tree_read_lock(eb);
1212 if (old_root) {
1213 btrfs_set_header_bytenr(eb, eb->start);
1214 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1215 btrfs_set_header_owner(eb, root->root_key.objectid);
1216 btrfs_set_header_level(eb, old_root->level);
1217 btrfs_set_header_generation(eb, old_generation);
1218 }
1219 __tree_mod_log_rewind(eb, time_seq, tm);
1220 extent_buffer_get(eb);
1221
1222 return eb;
1223}
1224
538static inline int should_cow_block(struct btrfs_trans_handle *trans, 1225static inline int should_cow_block(struct btrfs_trans_handle *trans,
539 struct btrfs_root *root, 1226 struct btrfs_root *root,
540 struct extent_buffer *buf) 1227 struct extent_buffer *buf)
@@ -739,7 +1426,11 @@ int btrfs_realloc_node(struct btrfs_trans_handle *trans,
739 if (!cur) 1426 if (!cur)
740 return -EIO; 1427 return -EIO;
741 } else if (!uptodate) { 1428 } else if (!uptodate) {
742 btrfs_read_buffer(cur, gen); 1429 err = btrfs_read_buffer(cur, gen);
1430 if (err) {
1431 free_extent_buffer(cur);
1432 return err;
1433 }
743 } 1434 }
744 } 1435 }
745 if (search_start == 0) 1436 if (search_start == 0)
@@ -854,20 +1545,18 @@ static noinline int generic_bin_search(struct extent_buffer *eb,
854static int bin_search(struct extent_buffer *eb, struct btrfs_key *key, 1545static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
855 int level, int *slot) 1546 int level, int *slot)
856{ 1547{
857 if (level == 0) { 1548 if (level == 0)
858 return generic_bin_search(eb, 1549 return generic_bin_search(eb,
859 offsetof(struct btrfs_leaf, items), 1550 offsetof(struct btrfs_leaf, items),
860 sizeof(struct btrfs_item), 1551 sizeof(struct btrfs_item),
861 key, btrfs_header_nritems(eb), 1552 key, btrfs_header_nritems(eb),
862 slot); 1553 slot);
863 } else { 1554 else
864 return generic_bin_search(eb, 1555 return generic_bin_search(eb,
865 offsetof(struct btrfs_node, ptrs), 1556 offsetof(struct btrfs_node, ptrs),
866 sizeof(struct btrfs_key_ptr), 1557 sizeof(struct btrfs_key_ptr),
867 key, btrfs_header_nritems(eb), 1558 key, btrfs_header_nritems(eb),
868 slot); 1559 slot);
869 }
870 return -1;
871} 1560}
872 1561
873int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key, 1562int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
@@ -974,6 +1663,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
974 goto enospc; 1663 goto enospc;
975 } 1664 }
976 1665
1666 tree_mod_log_set_root_pointer(root, child);
977 rcu_assign_pointer(root->node, child); 1667 rcu_assign_pointer(root->node, child);
978 1668
979 add_root_to_dirty_list(root); 1669 add_root_to_dirty_list(root);
@@ -987,7 +1677,7 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
987 free_extent_buffer(mid); 1677 free_extent_buffer(mid);
988 1678
989 root_sub_used(root, mid->len); 1679 root_sub_used(root, mid->len);
990 btrfs_free_tree_block(trans, root, mid, 0, 1, 0); 1680 btrfs_free_tree_block(trans, root, mid, 0, 1);
991 /* once for the root ptr */ 1681 /* once for the root ptr */
992 free_extent_buffer_stale(mid); 1682 free_extent_buffer_stale(mid);
993 return 0; 1683 return 0;
@@ -996,8 +1686,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
996 BTRFS_NODEPTRS_PER_BLOCK(root) / 4) 1686 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
997 return 0; 1687 return 0;
998 1688
999 btrfs_header_nritems(mid);
1000
1001 left = read_node_slot(root, parent, pslot - 1); 1689 left = read_node_slot(root, parent, pslot - 1);
1002 if (left) { 1690 if (left) {
1003 btrfs_tree_lock(left); 1691 btrfs_tree_lock(left);
@@ -1027,7 +1715,6 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1027 wret = push_node_left(trans, root, left, mid, 1); 1715 wret = push_node_left(trans, root, left, mid, 1);
1028 if (wret < 0) 1716 if (wret < 0)
1029 ret = wret; 1717 ret = wret;
1030 btrfs_header_nritems(mid);
1031 } 1718 }
1032 1719
1033 /* 1720 /*
@@ -1040,14 +1727,16 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1040 if (btrfs_header_nritems(right) == 0) { 1727 if (btrfs_header_nritems(right) == 0) {
1041 clean_tree_block(trans, root, right); 1728 clean_tree_block(trans, root, right);
1042 btrfs_tree_unlock(right); 1729 btrfs_tree_unlock(right);
1043 del_ptr(trans, root, path, level + 1, pslot + 1); 1730 del_ptr(trans, root, path, level + 1, pslot + 1, 1);
1044 root_sub_used(root, right->len); 1731 root_sub_used(root, right->len);
1045 btrfs_free_tree_block(trans, root, right, 0, 1, 0); 1732 btrfs_free_tree_block(trans, root, right, 0, 1);
1046 free_extent_buffer_stale(right); 1733 free_extent_buffer_stale(right);
1047 right = NULL; 1734 right = NULL;
1048 } else { 1735 } else {
1049 struct btrfs_disk_key right_key; 1736 struct btrfs_disk_key right_key;
1050 btrfs_node_key(right, &right_key, 0); 1737 btrfs_node_key(right, &right_key, 0);
1738 tree_mod_log_set_node_key(root->fs_info, parent,
1739 &right_key, pslot + 1, 0);
1051 btrfs_set_node_key(parent, &right_key, pslot + 1); 1740 btrfs_set_node_key(parent, &right_key, pslot + 1);
1052 btrfs_mark_buffer_dirty(parent); 1741 btrfs_mark_buffer_dirty(parent);
1053 } 1742 }
@@ -1082,15 +1771,17 @@ static noinline int balance_level(struct btrfs_trans_handle *trans,
1082 if (btrfs_header_nritems(mid) == 0) { 1771 if (btrfs_header_nritems(mid) == 0) {
1083 clean_tree_block(trans, root, mid); 1772 clean_tree_block(trans, root, mid);
1084 btrfs_tree_unlock(mid); 1773 btrfs_tree_unlock(mid);
1085 del_ptr(trans, root, path, level + 1, pslot); 1774 del_ptr(trans, root, path, level + 1, pslot, 1);
1086 root_sub_used(root, mid->len); 1775 root_sub_used(root, mid->len);
1087 btrfs_free_tree_block(trans, root, mid, 0, 1, 0); 1776 btrfs_free_tree_block(trans, root, mid, 0, 1);
1088 free_extent_buffer_stale(mid); 1777 free_extent_buffer_stale(mid);
1089 mid = NULL; 1778 mid = NULL;
1090 } else { 1779 } else {
1091 /* update the parent key to reflect our changes */ 1780 /* update the parent key to reflect our changes */
1092 struct btrfs_disk_key mid_key; 1781 struct btrfs_disk_key mid_key;
1093 btrfs_node_key(mid, &mid_key, 0); 1782 btrfs_node_key(mid, &mid_key, 0);
1783 tree_mod_log_set_node_key(root->fs_info, parent, &mid_key,
1784 pslot, 0);
1094 btrfs_set_node_key(parent, &mid_key, pslot); 1785 btrfs_set_node_key(parent, &mid_key, pslot);
1095 btrfs_mark_buffer_dirty(parent); 1786 btrfs_mark_buffer_dirty(parent);
1096 } 1787 }
@@ -1188,6 +1879,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1188 struct btrfs_disk_key disk_key; 1879 struct btrfs_disk_key disk_key;
1189 orig_slot += left_nr; 1880 orig_slot += left_nr;
1190 btrfs_node_key(mid, &disk_key, 0); 1881 btrfs_node_key(mid, &disk_key, 0);
1882 tree_mod_log_set_node_key(root->fs_info, parent,
1883 &disk_key, pslot, 0);
1191 btrfs_set_node_key(parent, &disk_key, pslot); 1884 btrfs_set_node_key(parent, &disk_key, pslot);
1192 btrfs_mark_buffer_dirty(parent); 1885 btrfs_mark_buffer_dirty(parent);
1193 if (btrfs_header_nritems(left) > orig_slot) { 1886 if (btrfs_header_nritems(left) > orig_slot) {
@@ -1239,6 +1932,8 @@ static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1239 struct btrfs_disk_key disk_key; 1932 struct btrfs_disk_key disk_key;
1240 1933
1241 btrfs_node_key(right, &disk_key, 0); 1934 btrfs_node_key(right, &disk_key, 0);
1935 tree_mod_log_set_node_key(root->fs_info, parent,
1936 &disk_key, pslot + 1, 0);
1242 btrfs_set_node_key(parent, &disk_key, pslot + 1); 1937 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1243 btrfs_mark_buffer_dirty(parent); 1938 btrfs_mark_buffer_dirty(parent);
1244 1939
@@ -1496,7 +2191,7 @@ static int
1496read_block_for_search(struct btrfs_trans_handle *trans, 2191read_block_for_search(struct btrfs_trans_handle *trans,
1497 struct btrfs_root *root, struct btrfs_path *p, 2192 struct btrfs_root *root, struct btrfs_path *p,
1498 struct extent_buffer **eb_ret, int level, int slot, 2193 struct extent_buffer **eb_ret, int level, int slot,
1499 struct btrfs_key *key) 2194 struct btrfs_key *key, u64 time_seq)
1500{ 2195{
1501 u64 blocknr; 2196 u64 blocknr;
1502 u64 gen; 2197 u64 gen;
@@ -1850,7 +2545,7 @@ cow_done:
1850 } 2545 }
1851 2546
1852 err = read_block_for_search(trans, root, p, 2547 err = read_block_for_search(trans, root, p,
1853 &b, level, slot, key); 2548 &b, level, slot, key, 0);
1854 if (err == -EAGAIN) 2549 if (err == -EAGAIN)
1855 goto again; 2550 goto again;
1856 if (err) { 2551 if (err) {
@@ -1922,6 +2617,113 @@ done:
1922} 2617}
1923 2618
1924/* 2619/*
2620 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2621 * current state of the tree together with the operations recorded in the tree
2622 * modification log to search for the key in a previous version of this tree, as
2623 * denoted by the time_seq parameter.
2624 *
2625 * Naturally, there is no support for insert, delete or cow operations.
2626 *
2627 * The resulting path and return value will be set up as if we called
2628 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2629 */
2630int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2631 struct btrfs_path *p, u64 time_seq)
2632{
2633 struct extent_buffer *b;
2634 int slot;
2635 int ret;
2636 int err;
2637 int level;
2638 int lowest_unlock = 1;
2639 u8 lowest_level = 0;
2640
2641 lowest_level = p->lowest_level;
2642 WARN_ON(p->nodes[0] != NULL);
2643
2644 if (p->search_commit_root) {
2645 BUG_ON(time_seq);
2646 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2647 }
2648
2649again:
2650 b = get_old_root(root, time_seq);
2651 level = btrfs_header_level(b);
2652 p->locks[level] = BTRFS_READ_LOCK;
2653
2654 while (b) {
2655 level = btrfs_header_level(b);
2656 p->nodes[level] = b;
2657 btrfs_clear_path_blocking(p, NULL, 0);
2658
2659 /*
2660 * we have a lock on b and as long as we aren't changing
2661 * the tree, there is no way to for the items in b to change.
2662 * It is safe to drop the lock on our parent before we
2663 * go through the expensive btree search on b.
2664 */
2665 btrfs_unlock_up_safe(p, level + 1);
2666
2667 ret = bin_search(b, key, level, &slot);
2668
2669 if (level != 0) {
2670 int dec = 0;
2671 if (ret && slot > 0) {
2672 dec = 1;
2673 slot -= 1;
2674 }
2675 p->slots[level] = slot;
2676 unlock_up(p, level, lowest_unlock, 0, NULL);
2677
2678 if (level == lowest_level) {
2679 if (dec)
2680 p->slots[level]++;
2681 goto done;
2682 }
2683
2684 err = read_block_for_search(NULL, root, p, &b, level,
2685 slot, key, time_seq);
2686 if (err == -EAGAIN)
2687 goto again;
2688 if (err) {
2689 ret = err;
2690 goto done;
2691 }
2692
2693 level = btrfs_header_level(b);
2694 err = btrfs_try_tree_read_lock(b);
2695 if (!err) {
2696 btrfs_set_path_blocking(p);
2697 btrfs_tree_read_lock(b);
2698 btrfs_clear_path_blocking(p, b,
2699 BTRFS_READ_LOCK);
2700 }
2701 p->locks[level] = BTRFS_READ_LOCK;
2702 p->nodes[level] = b;
2703 b = tree_mod_log_rewind(root->fs_info, b, time_seq);
2704 if (b != p->nodes[level]) {
2705 btrfs_tree_unlock_rw(p->nodes[level],
2706 p->locks[level]);
2707 p->locks[level] = 0;
2708 p->nodes[level] = b;
2709 }
2710 } else {
2711 p->slots[level] = slot;
2712 unlock_up(p, level, lowest_unlock, 0, NULL);
2713 goto done;
2714 }
2715 }
2716 ret = 1;
2717done:
2718 if (!p->leave_spinning)
2719 btrfs_set_path_blocking(p);
2720 if (ret < 0)
2721 btrfs_release_path(p);
2722
2723 return ret;
2724}
2725
2726/*
1925 * adjust the pointers going up the tree, starting at level 2727 * adjust the pointers going up the tree, starting at level
1926 * making sure the right key of each node is points to 'key'. 2728 * making sure the right key of each node is points to 'key'.
1927 * This is used after shifting pointers to the left, so it stops 2729 * This is used after shifting pointers to the left, so it stops
@@ -1941,6 +2743,7 @@ static void fixup_low_keys(struct btrfs_trans_handle *trans,
1941 if (!path->nodes[i]) 2743 if (!path->nodes[i])
1942 break; 2744 break;
1943 t = path->nodes[i]; 2745 t = path->nodes[i];
2746 tree_mod_log_set_node_key(root->fs_info, t, key, tslot, 1);
1944 btrfs_set_node_key(t, key, tslot); 2747 btrfs_set_node_key(t, key, tslot);
1945 btrfs_mark_buffer_dirty(path->nodes[i]); 2748 btrfs_mark_buffer_dirty(path->nodes[i]);
1946 if (tslot != 0) 2749 if (tslot != 0)
@@ -2023,12 +2826,16 @@ static int push_node_left(struct btrfs_trans_handle *trans,
2023 } else 2826 } else
2024 push_items = min(src_nritems - 8, push_items); 2827 push_items = min(src_nritems - 8, push_items);
2025 2828
2829 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
2830 push_items);
2026 copy_extent_buffer(dst, src, 2831 copy_extent_buffer(dst, src,
2027 btrfs_node_key_ptr_offset(dst_nritems), 2832 btrfs_node_key_ptr_offset(dst_nritems),
2028 btrfs_node_key_ptr_offset(0), 2833 btrfs_node_key_ptr_offset(0),
2029 push_items * sizeof(struct btrfs_key_ptr)); 2834 push_items * sizeof(struct btrfs_key_ptr));
2030 2835
2031 if (push_items < src_nritems) { 2836 if (push_items < src_nritems) {
2837 tree_mod_log_eb_move(root->fs_info, src, 0, push_items,
2838 src_nritems - push_items);
2032 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0), 2839 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2033 btrfs_node_key_ptr_offset(push_items), 2840 btrfs_node_key_ptr_offset(push_items),
2034 (src_nritems - push_items) * 2841 (src_nritems - push_items) *
@@ -2082,11 +2889,14 @@ static int balance_node_right(struct btrfs_trans_handle *trans,
2082 if (max_push < push_items) 2889 if (max_push < push_items)
2083 push_items = max_push; 2890 push_items = max_push;
2084 2891
2892 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
2085 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items), 2893 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2086 btrfs_node_key_ptr_offset(0), 2894 btrfs_node_key_ptr_offset(0),
2087 (dst_nritems) * 2895 (dst_nritems) *
2088 sizeof(struct btrfs_key_ptr)); 2896 sizeof(struct btrfs_key_ptr));
2089 2897
2898 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
2899 src_nritems - push_items, push_items);
2090 copy_extent_buffer(dst, src, 2900 copy_extent_buffer(dst, src,
2091 btrfs_node_key_ptr_offset(0), 2901 btrfs_node_key_ptr_offset(0),
2092 btrfs_node_key_ptr_offset(src_nritems - push_items), 2902 btrfs_node_key_ptr_offset(src_nritems - push_items),
@@ -2129,7 +2939,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2129 2939
2130 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0, 2940 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2131 root->root_key.objectid, &lower_key, 2941 root->root_key.objectid, &lower_key,
2132 level, root->node->start, 0, 0); 2942 level, root->node->start, 0);
2133 if (IS_ERR(c)) 2943 if (IS_ERR(c))
2134 return PTR_ERR(c); 2944 return PTR_ERR(c);
2135 2945
@@ -2161,6 +2971,7 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2161 btrfs_mark_buffer_dirty(c); 2971 btrfs_mark_buffer_dirty(c);
2162 2972
2163 old = root->node; 2973 old = root->node;
2974 tree_mod_log_set_root_pointer(root, c);
2164 rcu_assign_pointer(root->node, c); 2975 rcu_assign_pointer(root->node, c);
2165 2976
2166 /* the super has an extra ref to root->node */ 2977 /* the super has an extra ref to root->node */
@@ -2184,10 +2995,11 @@ static noinline int insert_new_root(struct btrfs_trans_handle *trans,
2184static void insert_ptr(struct btrfs_trans_handle *trans, 2995static void insert_ptr(struct btrfs_trans_handle *trans,
2185 struct btrfs_root *root, struct btrfs_path *path, 2996 struct btrfs_root *root, struct btrfs_path *path,
2186 struct btrfs_disk_key *key, u64 bytenr, 2997 struct btrfs_disk_key *key, u64 bytenr,
2187 int slot, int level) 2998 int slot, int level, int tree_mod_log)
2188{ 2999{
2189 struct extent_buffer *lower; 3000 struct extent_buffer *lower;
2190 int nritems; 3001 int nritems;
3002 int ret;
2191 3003
2192 BUG_ON(!path->nodes[level]); 3004 BUG_ON(!path->nodes[level]);
2193 btrfs_assert_tree_locked(path->nodes[level]); 3005 btrfs_assert_tree_locked(path->nodes[level]);
@@ -2196,11 +3008,19 @@ static void insert_ptr(struct btrfs_trans_handle *trans,
2196 BUG_ON(slot > nritems); 3008 BUG_ON(slot > nritems);
2197 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root)); 3009 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
2198 if (slot != nritems) { 3010 if (slot != nritems) {
3011 if (tree_mod_log && level)
3012 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3013 slot, nritems - slot);
2199 memmove_extent_buffer(lower, 3014 memmove_extent_buffer(lower,
2200 btrfs_node_key_ptr_offset(slot + 1), 3015 btrfs_node_key_ptr_offset(slot + 1),
2201 btrfs_node_key_ptr_offset(slot), 3016 btrfs_node_key_ptr_offset(slot),
2202 (nritems - slot) * sizeof(struct btrfs_key_ptr)); 3017 (nritems - slot) * sizeof(struct btrfs_key_ptr));
2203 } 3018 }
3019 if (tree_mod_log && level) {
3020 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3021 MOD_LOG_KEY_ADD);
3022 BUG_ON(ret < 0);
3023 }
2204 btrfs_set_node_key(lower, key, slot); 3024 btrfs_set_node_key(lower, key, slot);
2205 btrfs_set_node_blockptr(lower, slot, bytenr); 3025 btrfs_set_node_blockptr(lower, slot, bytenr);
2206 WARN_ON(trans->transid == 0); 3026 WARN_ON(trans->transid == 0);
@@ -2252,7 +3072,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
2252 3072
2253 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0, 3073 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
2254 root->root_key.objectid, 3074 root->root_key.objectid,
2255 &disk_key, level, c->start, 0, 0); 3075 &disk_key, level, c->start, 0);
2256 if (IS_ERR(split)) 3076 if (IS_ERR(split))
2257 return PTR_ERR(split); 3077 return PTR_ERR(split);
2258 3078
@@ -2271,7 +3091,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
2271 (unsigned long)btrfs_header_chunk_tree_uuid(split), 3091 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2272 BTRFS_UUID_SIZE); 3092 BTRFS_UUID_SIZE);
2273 3093
2274 3094 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
2275 copy_extent_buffer(split, c, 3095 copy_extent_buffer(split, c,
2276 btrfs_node_key_ptr_offset(0), 3096 btrfs_node_key_ptr_offset(0),
2277 btrfs_node_key_ptr_offset(mid), 3097 btrfs_node_key_ptr_offset(mid),
@@ -2284,7 +3104,7 @@ static noinline int split_node(struct btrfs_trans_handle *trans,
2284 btrfs_mark_buffer_dirty(split); 3104 btrfs_mark_buffer_dirty(split);
2285 3105
2286 insert_ptr(trans, root, path, &disk_key, split->start, 3106 insert_ptr(trans, root, path, &disk_key, split->start,
2287 path->slots[level + 1] + 1, level + 1); 3107 path->slots[level + 1] + 1, level + 1, 1);
2288 3108
2289 if (path->slots[level] >= mid) { 3109 if (path->slots[level] >= mid) {
2290 path->slots[level] -= mid; 3110 path->slots[level] -= mid;
@@ -2821,7 +3641,7 @@ static noinline void copy_for_split(struct btrfs_trans_handle *trans,
2821 btrfs_set_header_nritems(l, mid); 3641 btrfs_set_header_nritems(l, mid);
2822 btrfs_item_key(right, &disk_key, 0); 3642 btrfs_item_key(right, &disk_key, 0);
2823 insert_ptr(trans, root, path, &disk_key, right->start, 3643 insert_ptr(trans, root, path, &disk_key, right->start,
2824 path->slots[1] + 1, 1); 3644 path->slots[1] + 1, 1, 0);
2825 3645
2826 btrfs_mark_buffer_dirty(right); 3646 btrfs_mark_buffer_dirty(right);
2827 btrfs_mark_buffer_dirty(l); 3647 btrfs_mark_buffer_dirty(l);
@@ -3004,7 +3824,7 @@ again:
3004 3824
3005 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0, 3825 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
3006 root->root_key.objectid, 3826 root->root_key.objectid,
3007 &disk_key, 0, l->start, 0, 0); 3827 &disk_key, 0, l->start, 0);
3008 if (IS_ERR(right)) 3828 if (IS_ERR(right))
3009 return PTR_ERR(right); 3829 return PTR_ERR(right);
3010 3830
@@ -3028,7 +3848,7 @@ again:
3028 if (mid <= slot) { 3848 if (mid <= slot) {
3029 btrfs_set_header_nritems(right, 0); 3849 btrfs_set_header_nritems(right, 0);
3030 insert_ptr(trans, root, path, &disk_key, right->start, 3850 insert_ptr(trans, root, path, &disk_key, right->start,
3031 path->slots[1] + 1, 1); 3851 path->slots[1] + 1, 1, 0);
3032 btrfs_tree_unlock(path->nodes[0]); 3852 btrfs_tree_unlock(path->nodes[0]);
3033 free_extent_buffer(path->nodes[0]); 3853 free_extent_buffer(path->nodes[0]);
3034 path->nodes[0] = right; 3854 path->nodes[0] = right;
@@ -3037,7 +3857,7 @@ again:
3037 } else { 3857 } else {
3038 btrfs_set_header_nritems(right, 0); 3858 btrfs_set_header_nritems(right, 0);
3039 insert_ptr(trans, root, path, &disk_key, right->start, 3859 insert_ptr(trans, root, path, &disk_key, right->start,
3040 path->slots[1], 1); 3860 path->slots[1], 1, 0);
3041 btrfs_tree_unlock(path->nodes[0]); 3861 btrfs_tree_unlock(path->nodes[0]);
3042 free_extent_buffer(path->nodes[0]); 3862 free_extent_buffer(path->nodes[0]);
3043 path->nodes[0] = right; 3863 path->nodes[0] = right;
@@ -3749,19 +4569,29 @@ int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3749 * empty a node. 4569 * empty a node.
3750 */ 4570 */
3751static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root, 4571static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3752 struct btrfs_path *path, int level, int slot) 4572 struct btrfs_path *path, int level, int slot,
4573 int tree_mod_log)
3753{ 4574{
3754 struct extent_buffer *parent = path->nodes[level]; 4575 struct extent_buffer *parent = path->nodes[level];
3755 u32 nritems; 4576 u32 nritems;
4577 int ret;
3756 4578
3757 nritems = btrfs_header_nritems(parent); 4579 nritems = btrfs_header_nritems(parent);
3758 if (slot != nritems - 1) { 4580 if (slot != nritems - 1) {
4581 if (tree_mod_log && level)
4582 tree_mod_log_eb_move(root->fs_info, parent, slot,
4583 slot + 1, nritems - slot - 1);
3759 memmove_extent_buffer(parent, 4584 memmove_extent_buffer(parent,
3760 btrfs_node_key_ptr_offset(slot), 4585 btrfs_node_key_ptr_offset(slot),
3761 btrfs_node_key_ptr_offset(slot + 1), 4586 btrfs_node_key_ptr_offset(slot + 1),
3762 sizeof(struct btrfs_key_ptr) * 4587 sizeof(struct btrfs_key_ptr) *
3763 (nritems - slot - 1)); 4588 (nritems - slot - 1));
4589 } else if (tree_mod_log && level) {
4590 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4591 MOD_LOG_KEY_REMOVE);
4592 BUG_ON(ret < 0);
3764 } 4593 }
4594
3765 nritems--; 4595 nritems--;
3766 btrfs_set_header_nritems(parent, nritems); 4596 btrfs_set_header_nritems(parent, nritems);
3767 if (nritems == 0 && parent == root->node) { 4597 if (nritems == 0 && parent == root->node) {
@@ -3793,7 +4623,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
3793 struct extent_buffer *leaf) 4623 struct extent_buffer *leaf)
3794{ 4624{
3795 WARN_ON(btrfs_header_generation(leaf) != trans->transid); 4625 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
3796 del_ptr(trans, root, path, 1, path->slots[1]); 4626 del_ptr(trans, root, path, 1, path->slots[1], 1);
3797 4627
3798 /* 4628 /*
3799 * btrfs_free_extent is expensive, we want to make sure we 4629 * btrfs_free_extent is expensive, we want to make sure we
@@ -3804,7 +4634,7 @@ static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
3804 root_sub_used(root, leaf->len); 4634 root_sub_used(root, leaf->len);
3805 4635
3806 extent_buffer_get(leaf); 4636 extent_buffer_get(leaf);
3807 btrfs_free_tree_block(trans, root, leaf, 0, 1, 0); 4637 btrfs_free_tree_block(trans, root, leaf, 0, 1);
3808 free_extent_buffer_stale(leaf); 4638 free_extent_buffer_stale(leaf);
3809} 4639}
3810/* 4640/*
@@ -4202,6 +5032,12 @@ next:
4202 */ 5032 */
4203int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path) 5033int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
4204{ 5034{
5035 return btrfs_next_old_leaf(root, path, 0);
5036}
5037
5038int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5039 u64 time_seq)
5040{
4205 int slot; 5041 int slot;
4206 int level; 5042 int level;
4207 struct extent_buffer *c; 5043 struct extent_buffer *c;
@@ -4226,7 +5062,10 @@ again:
4226 path->keep_locks = 1; 5062 path->keep_locks = 1;
4227 path->leave_spinning = 1; 5063 path->leave_spinning = 1;
4228 5064
4229 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0); 5065 if (time_seq)
5066 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5067 else
5068 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4230 path->keep_locks = 0; 5069 path->keep_locks = 0;
4231 5070
4232 if (ret < 0) 5071 if (ret < 0)
@@ -4271,7 +5110,7 @@ again:
4271 next = c; 5110 next = c;
4272 next_rw_lock = path->locks[level]; 5111 next_rw_lock = path->locks[level];
4273 ret = read_block_for_search(NULL, root, path, &next, level, 5112 ret = read_block_for_search(NULL, root, path, &next, level,
4274 slot, &key); 5113 slot, &key, 0);
4275 if (ret == -EAGAIN) 5114 if (ret == -EAGAIN)
4276 goto again; 5115 goto again;
4277 5116
@@ -4308,7 +5147,7 @@ again:
4308 break; 5147 break;
4309 5148
4310 ret = read_block_for_search(NULL, root, path, &next, level, 5149 ret = read_block_for_search(NULL, root, path, &next, level,
4311 0, &key); 5150 0, &key, 0);
4312 if (ret == -EAGAIN) 5151 if (ret == -EAGAIN)
4313 goto again; 5152 goto again;
4314 5153
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 8fd72331d600..fa5c45b39075 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -173,6 +173,9 @@ static int btrfs_csum_sizes[] = { 4, 0 };
173#define BTRFS_FT_XATTR 8 173#define BTRFS_FT_XATTR 8
174#define BTRFS_FT_MAX 9 174#define BTRFS_FT_MAX 9
175 175
176/* ioprio of readahead is set to idle */
177#define BTRFS_IOPRIO_READA (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0))
178
176/* 179/*
177 * The key defines the order in the tree, and so it also defines (optimal) 180 * The key defines the order in the tree, and so it also defines (optimal)
178 * block layout. 181 * block layout.
@@ -823,6 +826,14 @@ struct btrfs_csum_item {
823 u8 csum; 826 u8 csum;
824} __attribute__ ((__packed__)); 827} __attribute__ ((__packed__));
825 828
829struct btrfs_dev_stats_item {
830 /*
831 * grow this item struct at the end for future enhancements and keep
832 * the existing values unchanged
833 */
834 __le64 values[BTRFS_DEV_STAT_VALUES_MAX];
835} __attribute__ ((__packed__));
836
826/* different types of block groups (and chunks) */ 837/* different types of block groups (and chunks) */
827#define BTRFS_BLOCK_GROUP_DATA (1ULL << 0) 838#define BTRFS_BLOCK_GROUP_DATA (1ULL << 0)
828#define BTRFS_BLOCK_GROUP_SYSTEM (1ULL << 1) 839#define BTRFS_BLOCK_GROUP_SYSTEM (1ULL << 1)
@@ -1129,6 +1140,15 @@ struct btrfs_fs_info {
1129 spinlock_t delayed_iput_lock; 1140 spinlock_t delayed_iput_lock;
1130 struct list_head delayed_iputs; 1141 struct list_head delayed_iputs;
1131 1142
1143 /* this protects tree_mod_seq_list */
1144 spinlock_t tree_mod_seq_lock;
1145 atomic_t tree_mod_seq;
1146 struct list_head tree_mod_seq_list;
1147
1148 /* this protects tree_mod_log */
1149 rwlock_t tree_mod_log_lock;
1150 struct rb_root tree_mod_log;
1151
1132 atomic_t nr_async_submits; 1152 atomic_t nr_async_submits;
1133 atomic_t async_submit_draining; 1153 atomic_t async_submit_draining;
1134 atomic_t nr_async_bios; 1154 atomic_t nr_async_bios;
@@ -1375,7 +1395,7 @@ struct btrfs_root {
1375 struct list_head root_list; 1395 struct list_head root_list;
1376 1396
1377 spinlock_t orphan_lock; 1397 spinlock_t orphan_lock;
1378 struct list_head orphan_list; 1398 atomic_t orphan_inodes;
1379 struct btrfs_block_rsv *orphan_block_rsv; 1399 struct btrfs_block_rsv *orphan_block_rsv;
1380 int orphan_item_inserted; 1400 int orphan_item_inserted;
1381 int orphan_cleanup_state; 1401 int orphan_cleanup_state;
@@ -1508,6 +1528,12 @@ struct btrfs_ioctl_defrag_range_args {
1508#define BTRFS_BALANCE_ITEM_KEY 248 1528#define BTRFS_BALANCE_ITEM_KEY 248
1509 1529
1510/* 1530/*
1531 * Persistantly stores the io stats in the device tree.
1532 * One key for all stats, (0, BTRFS_DEV_STATS_KEY, devid).
1533 */
1534#define BTRFS_DEV_STATS_KEY 249
1535
1536/*
1511 * string items are for debugging. They just store a short string of 1537 * string items are for debugging. They just store a short string of
1512 * data in the FS 1538 * data in the FS
1513 */ 1539 */
@@ -2415,6 +2441,30 @@ static inline u32 btrfs_file_extent_inline_item_len(struct extent_buffer *eb,
2415 return btrfs_item_size(eb, e) - offset; 2441 return btrfs_item_size(eb, e) - offset;
2416} 2442}
2417 2443
2444/* btrfs_dev_stats_item */
2445static inline u64 btrfs_dev_stats_value(struct extent_buffer *eb,
2446 struct btrfs_dev_stats_item *ptr,
2447 int index)
2448{
2449 u64 val;
2450
2451 read_extent_buffer(eb, &val,
2452 offsetof(struct btrfs_dev_stats_item, values) +
2453 ((unsigned long)ptr) + (index * sizeof(u64)),
2454 sizeof(val));
2455 return val;
2456}
2457
2458static inline void btrfs_set_dev_stats_value(struct extent_buffer *eb,
2459 struct btrfs_dev_stats_item *ptr,
2460 int index, u64 val)
2461{
2462 write_extent_buffer(eb, &val,
2463 offsetof(struct btrfs_dev_stats_item, values) +
2464 ((unsigned long)ptr) + (index * sizeof(u64)),
2465 sizeof(val));
2466}
2467
2418static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb) 2468static inline struct btrfs_fs_info *btrfs_sb(struct super_block *sb)
2419{ 2469{
2420 return sb->s_fs_info; 2470 return sb->s_fs_info;
@@ -2496,11 +2546,11 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
2496 struct btrfs_root *root, u32 blocksize, 2546 struct btrfs_root *root, u32 blocksize,
2497 u64 parent, u64 root_objectid, 2547 u64 parent, u64 root_objectid,
2498 struct btrfs_disk_key *key, int level, 2548 struct btrfs_disk_key *key, int level,
2499 u64 hint, u64 empty_size, int for_cow); 2549 u64 hint, u64 empty_size);
2500void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 2550void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
2501 struct btrfs_root *root, 2551 struct btrfs_root *root,
2502 struct extent_buffer *buf, 2552 struct extent_buffer *buf,
2503 u64 parent, int last_ref, int for_cow); 2553 u64 parent, int last_ref);
2504struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans, 2554struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
2505 struct btrfs_root *root, 2555 struct btrfs_root *root,
2506 u64 bytenr, u32 blocksize, 2556 u64 bytenr, u32 blocksize,
@@ -2659,6 +2709,8 @@ int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
2659int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root 2709int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2660 *root, struct btrfs_key *key, struct btrfs_path *p, int 2710 *root, struct btrfs_key *key, struct btrfs_path *p, int
2661 ins_len, int cow); 2711 ins_len, int cow);
2712int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2713 struct btrfs_path *p, u64 time_seq);
2662int btrfs_realloc_node(struct btrfs_trans_handle *trans, 2714int btrfs_realloc_node(struct btrfs_trans_handle *trans,
2663 struct btrfs_root *root, struct extent_buffer *parent, 2715 struct btrfs_root *root, struct extent_buffer *parent,
2664 int start_slot, int cache_only, u64 *last_ret, 2716 int start_slot, int cache_only, u64 *last_ret,
@@ -2701,13 +2753,20 @@ static inline int btrfs_insert_empty_item(struct btrfs_trans_handle *trans,
2701} 2753}
2702 2754
2703int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path); 2755int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path);
2704static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p) 2756int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
2757 u64 time_seq);
2758static inline int btrfs_next_old_item(struct btrfs_root *root,
2759 struct btrfs_path *p, u64 time_seq)
2705{ 2760{
2706 ++p->slots[0]; 2761 ++p->slots[0];
2707 if (p->slots[0] >= btrfs_header_nritems(p->nodes[0])) 2762 if (p->slots[0] >= btrfs_header_nritems(p->nodes[0]))
2708 return btrfs_next_leaf(root, p); 2763 return btrfs_next_old_leaf(root, p, time_seq);
2709 return 0; 2764 return 0;
2710} 2765}
2766static inline int btrfs_next_item(struct btrfs_root *root, struct btrfs_path *p)
2767{
2768 return btrfs_next_old_item(root, p, 0);
2769}
2711int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path); 2770int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
2712int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf); 2771int btrfs_leaf_free_space(struct btrfs_root *root, struct extent_buffer *leaf);
2713int __must_check btrfs_drop_snapshot(struct btrfs_root *root, 2772int __must_check btrfs_drop_snapshot(struct btrfs_root *root,
@@ -2922,7 +2981,6 @@ int btrfs_readpage(struct file *file, struct page *page);
2922void btrfs_evict_inode(struct inode *inode); 2981void btrfs_evict_inode(struct inode *inode);
2923int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc); 2982int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc);
2924int btrfs_dirty_inode(struct inode *inode); 2983int btrfs_dirty_inode(struct inode *inode);
2925int btrfs_update_time(struct file *file);
2926struct inode *btrfs_alloc_inode(struct super_block *sb); 2984struct inode *btrfs_alloc_inode(struct super_block *sb);
2927void btrfs_destroy_inode(struct inode *inode); 2985void btrfs_destroy_inode(struct inode *inode);
2928int btrfs_drop_inode(struct inode *inode); 2986int btrfs_drop_inode(struct inode *inode);
@@ -3098,4 +3156,23 @@ void btrfs_reada_detach(void *handle);
3098int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb, 3156int btree_readahead_hook(struct btrfs_root *root, struct extent_buffer *eb,
3099 u64 start, int err); 3157 u64 start, int err);
3100 3158
3159/* delayed seq elem */
3160struct seq_list {
3161 struct list_head list;
3162 u64 seq;
3163 u32 flags;
3164};
3165
3166void btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
3167 struct seq_list *elem);
3168void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
3169 struct seq_list *elem);
3170
3171static inline int is_fstree(u64 rootid)
3172{
3173 if (rootid == BTRFS_FS_TREE_OBJECTID ||
3174 (s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
3175 return 1;
3176 return 0;
3177}
3101#endif 3178#endif
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 03e3748d84d0..2399f4086915 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -669,8 +669,8 @@ static int btrfs_delayed_inode_reserve_metadata(
669 return ret; 669 return ret;
670 } else if (src_rsv == &root->fs_info->delalloc_block_rsv) { 670 } else if (src_rsv == &root->fs_info->delalloc_block_rsv) {
671 spin_lock(&BTRFS_I(inode)->lock); 671 spin_lock(&BTRFS_I(inode)->lock);
672 if (BTRFS_I(inode)->delalloc_meta_reserved) { 672 if (test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
673 BTRFS_I(inode)->delalloc_meta_reserved = 0; 673 &BTRFS_I(inode)->runtime_flags)) {
674 spin_unlock(&BTRFS_I(inode)->lock); 674 spin_unlock(&BTRFS_I(inode)->lock);
675 release = true; 675 release = true;
676 goto migrate; 676 goto migrate;
@@ -1706,7 +1706,7 @@ static void fill_stack_inode_item(struct btrfs_trans_handle *trans,
1706 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode)); 1706 btrfs_set_stack_inode_nbytes(inode_item, inode_get_bytes(inode));
1707 btrfs_set_stack_inode_generation(inode_item, 1707 btrfs_set_stack_inode_generation(inode_item,
1708 BTRFS_I(inode)->generation); 1708 BTRFS_I(inode)->generation);
1709 btrfs_set_stack_inode_sequence(inode_item, BTRFS_I(inode)->sequence); 1709 btrfs_set_stack_inode_sequence(inode_item, inode->i_version);
1710 btrfs_set_stack_inode_transid(inode_item, trans->transid); 1710 btrfs_set_stack_inode_transid(inode_item, trans->transid);
1711 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev); 1711 btrfs_set_stack_inode_rdev(inode_item, inode->i_rdev);
1712 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags); 1712 btrfs_set_stack_inode_flags(inode_item, BTRFS_I(inode)->flags);
@@ -1754,7 +1754,7 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev)
1754 set_nlink(inode, btrfs_stack_inode_nlink(inode_item)); 1754 set_nlink(inode, btrfs_stack_inode_nlink(inode_item));
1755 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item)); 1755 inode_set_bytes(inode, btrfs_stack_inode_nbytes(inode_item));
1756 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item); 1756 BTRFS_I(inode)->generation = btrfs_stack_inode_generation(inode_item);
1757 BTRFS_I(inode)->sequence = btrfs_stack_inode_sequence(inode_item); 1757 inode->i_version = btrfs_stack_inode_sequence(inode_item);
1758 inode->i_rdev = 0; 1758 inode->i_rdev = 0;
1759 *rdev = btrfs_stack_inode_rdev(inode_item); 1759 *rdev = btrfs_stack_inode_rdev(inode_item);
1760 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item); 1760 BTRFS_I(inode)->flags = btrfs_stack_inode_flags(inode_item);
@@ -1879,3 +1879,21 @@ void btrfs_kill_all_delayed_nodes(struct btrfs_root *root)
1879 } 1879 }
1880 } 1880 }
1881} 1881}
1882
1883void btrfs_destroy_delayed_inodes(struct btrfs_root *root)
1884{
1885 struct btrfs_delayed_root *delayed_root;
1886 struct btrfs_delayed_node *curr_node, *prev_node;
1887
1888 delayed_root = btrfs_get_delayed_root(root);
1889
1890 curr_node = btrfs_first_delayed_node(delayed_root);
1891 while (curr_node) {
1892 __btrfs_kill_delayed_node(curr_node);
1893
1894 prev_node = curr_node;
1895 curr_node = btrfs_next_delayed_node(curr_node);
1896 btrfs_release_delayed_node(prev_node);
1897 }
1898}
1899
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index 7083d08b2a21..f5aa4023d3e1 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -124,6 +124,9 @@ int btrfs_fill_inode(struct inode *inode, u32 *rdev);
124/* Used for drop dead root */ 124/* Used for drop dead root */
125void btrfs_kill_all_delayed_nodes(struct btrfs_root *root); 125void btrfs_kill_all_delayed_nodes(struct btrfs_root *root);
126 126
127/* Used for clean the transaction */
128void btrfs_destroy_delayed_inodes(struct btrfs_root *root);
129
127/* Used for readdir() */ 130/* Used for readdir() */
128void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list, 131void btrfs_get_delayed_items(struct inode *inode, struct list_head *ins_list,
129 struct list_head *del_list); 132 struct list_head *del_list);
diff --git a/fs/btrfs/delayed-ref.c b/fs/btrfs/delayed-ref.c
index 69f22e3ab3bc..13ae7b04790e 100644
--- a/fs/btrfs/delayed-ref.c
+++ b/fs/btrfs/delayed-ref.c
@@ -525,7 +525,7 @@ static noinline void add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
525 ref->is_head = 0; 525 ref->is_head = 0;
526 ref->in_tree = 1; 526 ref->in_tree = 1;
527 527
528 if (need_ref_seq(for_cow, ref_root)) 528 if (is_fstree(ref_root))
529 seq = inc_delayed_seq(delayed_refs); 529 seq = inc_delayed_seq(delayed_refs);
530 ref->seq = seq; 530 ref->seq = seq;
531 531
@@ -584,7 +584,7 @@ static noinline void add_delayed_data_ref(struct btrfs_fs_info *fs_info,
584 ref->is_head = 0; 584 ref->is_head = 0;
585 ref->in_tree = 1; 585 ref->in_tree = 1;
586 586
587 if (need_ref_seq(for_cow, ref_root)) 587 if (is_fstree(ref_root))
588 seq = inc_delayed_seq(delayed_refs); 588 seq = inc_delayed_seq(delayed_refs);
589 ref->seq = seq; 589 ref->seq = seq;
590 590
@@ -658,10 +658,11 @@ int btrfs_add_delayed_tree_ref(struct btrfs_fs_info *fs_info,
658 add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr, 658 add_delayed_tree_ref(fs_info, trans, &ref->node, bytenr,
659 num_bytes, parent, ref_root, level, action, 659 num_bytes, parent, ref_root, level, action,
660 for_cow); 660 for_cow);
661 if (!need_ref_seq(for_cow, ref_root) && 661 if (!is_fstree(ref_root) &&
662 waitqueue_active(&delayed_refs->seq_wait)) 662 waitqueue_active(&delayed_refs->seq_wait))
663 wake_up(&delayed_refs->seq_wait); 663 wake_up(&delayed_refs->seq_wait);
664 spin_unlock(&delayed_refs->lock); 664 spin_unlock(&delayed_refs->lock);
665
665 return 0; 666 return 0;
666} 667}
667 668
@@ -706,10 +707,11 @@ int btrfs_add_delayed_data_ref(struct btrfs_fs_info *fs_info,
706 add_delayed_data_ref(fs_info, trans, &ref->node, bytenr, 707 add_delayed_data_ref(fs_info, trans, &ref->node, bytenr,
707 num_bytes, parent, ref_root, owner, offset, 708 num_bytes, parent, ref_root, owner, offset,
708 action, for_cow); 709 action, for_cow);
709 if (!need_ref_seq(for_cow, ref_root) && 710 if (!is_fstree(ref_root) &&
710 waitqueue_active(&delayed_refs->seq_wait)) 711 waitqueue_active(&delayed_refs->seq_wait))
711 wake_up(&delayed_refs->seq_wait); 712 wake_up(&delayed_refs->seq_wait);
712 spin_unlock(&delayed_refs->lock); 713 spin_unlock(&delayed_refs->lock);
714
713 return 0; 715 return 0;
714} 716}
715 717
diff --git a/fs/btrfs/delayed-ref.h b/fs/btrfs/delayed-ref.h
index d8f244d94925..413927fb9957 100644
--- a/fs/btrfs/delayed-ref.h
+++ b/fs/btrfs/delayed-ref.h
@@ -195,11 +195,6 @@ int btrfs_delayed_ref_lock(struct btrfs_trans_handle *trans,
195int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans, 195int btrfs_find_ref_cluster(struct btrfs_trans_handle *trans,
196 struct list_head *cluster, u64 search_start); 196 struct list_head *cluster, u64 search_start);
197 197
198struct seq_list {
199 struct list_head list;
200 u64 seq;
201};
202
203static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs) 198static inline u64 inc_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs)
204{ 199{
205 assert_spin_locked(&delayed_refs->lock); 200 assert_spin_locked(&delayed_refs->lock);
@@ -230,25 +225,6 @@ int btrfs_check_delayed_seq(struct btrfs_delayed_ref_root *delayed_refs,
230 u64 seq); 225 u64 seq);
231 226
232/* 227/*
233 * delayed refs with a ref_seq > 0 must be held back during backref walking.
234 * this only applies to items in one of the fs-trees. for_cow items never need
235 * to be held back, so they won't get a ref_seq number.
236 */
237static inline int need_ref_seq(int for_cow, u64 rootid)
238{
239 if (for_cow)
240 return 0;
241
242 if (rootid == BTRFS_FS_TREE_OBJECTID)
243 return 1;
244
245 if ((s64)rootid >= (s64)BTRFS_FIRST_FREE_OBJECTID)
246 return 1;
247
248 return 0;
249}
250
251/*
252 * a node might live in a head or a regular ref, this lets you 228 * a node might live in a head or a regular ref, this lets you
253 * test for the proper type to use. 229 * test for the proper type to use.
254 */ 230 */
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index e1fe74a2ce16..7b845ff4af99 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -44,6 +44,7 @@
44#include "free-space-cache.h" 44#include "free-space-cache.h"
45#include "inode-map.h" 45#include "inode-map.h"
46#include "check-integrity.h" 46#include "check-integrity.h"
47#include "rcu-string.h"
47 48
48static struct extent_io_ops btree_extent_io_ops; 49static struct extent_io_ops btree_extent_io_ops;
49static void end_workqueue_fn(struct btrfs_work *work); 50static void end_workqueue_fn(struct btrfs_work *work);
@@ -1153,7 +1154,6 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1153 root->orphan_block_rsv = NULL; 1154 root->orphan_block_rsv = NULL;
1154 1155
1155 INIT_LIST_HEAD(&root->dirty_list); 1156 INIT_LIST_HEAD(&root->dirty_list);
1156 INIT_LIST_HEAD(&root->orphan_list);
1157 INIT_LIST_HEAD(&root->root_list); 1157 INIT_LIST_HEAD(&root->root_list);
1158 spin_lock_init(&root->orphan_lock); 1158 spin_lock_init(&root->orphan_lock);
1159 spin_lock_init(&root->inode_lock); 1159 spin_lock_init(&root->inode_lock);
@@ -1166,6 +1166,7 @@ static void __setup_root(u32 nodesize, u32 leafsize, u32 sectorsize,
1166 atomic_set(&root->log_commit[0], 0); 1166 atomic_set(&root->log_commit[0], 0);
1167 atomic_set(&root->log_commit[1], 0); 1167 atomic_set(&root->log_commit[1], 0);
1168 atomic_set(&root->log_writers, 0); 1168 atomic_set(&root->log_writers, 0);
1169 atomic_set(&root->orphan_inodes, 0);
1169 root->log_batch = 0; 1170 root->log_batch = 0;
1170 root->log_transid = 0; 1171 root->log_transid = 0;
1171 root->last_log_commit = 0; 1172 root->last_log_commit = 0;
@@ -1252,7 +1253,7 @@ static struct btrfs_root *alloc_log_tree(struct btrfs_trans_handle *trans,
1252 1253
1253 leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0, 1254 leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
1254 BTRFS_TREE_LOG_OBJECTID, NULL, 1255 BTRFS_TREE_LOG_OBJECTID, NULL,
1255 0, 0, 0, 0); 1256 0, 0, 0);
1256 if (IS_ERR(leaf)) { 1257 if (IS_ERR(leaf)) {
1257 kfree(root); 1258 kfree(root);
1258 return ERR_CAST(leaf); 1259 return ERR_CAST(leaf);
@@ -1914,11 +1915,14 @@ int open_ctree(struct super_block *sb,
1914 spin_lock_init(&fs_info->delayed_iput_lock); 1915 spin_lock_init(&fs_info->delayed_iput_lock);
1915 spin_lock_init(&fs_info->defrag_inodes_lock); 1916 spin_lock_init(&fs_info->defrag_inodes_lock);
1916 spin_lock_init(&fs_info->free_chunk_lock); 1917 spin_lock_init(&fs_info->free_chunk_lock);
1918 spin_lock_init(&fs_info->tree_mod_seq_lock);
1919 rwlock_init(&fs_info->tree_mod_log_lock);
1917 mutex_init(&fs_info->reloc_mutex); 1920 mutex_init(&fs_info->reloc_mutex);
1918 1921
1919 init_completion(&fs_info->kobj_unregister); 1922 init_completion(&fs_info->kobj_unregister);
1920 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots); 1923 INIT_LIST_HEAD(&fs_info->dirty_cowonly_roots);
1921 INIT_LIST_HEAD(&fs_info->space_info); 1924 INIT_LIST_HEAD(&fs_info->space_info);
1925 INIT_LIST_HEAD(&fs_info->tree_mod_seq_list);
1922 btrfs_mapping_init(&fs_info->mapping_tree); 1926 btrfs_mapping_init(&fs_info->mapping_tree);
1923 btrfs_init_block_rsv(&fs_info->global_block_rsv); 1927 btrfs_init_block_rsv(&fs_info->global_block_rsv);
1924 btrfs_init_block_rsv(&fs_info->delalloc_block_rsv); 1928 btrfs_init_block_rsv(&fs_info->delalloc_block_rsv);
@@ -1931,12 +1935,14 @@ int open_ctree(struct super_block *sb,
1931 atomic_set(&fs_info->async_submit_draining, 0); 1935 atomic_set(&fs_info->async_submit_draining, 0);
1932 atomic_set(&fs_info->nr_async_bios, 0); 1936 atomic_set(&fs_info->nr_async_bios, 0);
1933 atomic_set(&fs_info->defrag_running, 0); 1937 atomic_set(&fs_info->defrag_running, 0);
1938 atomic_set(&fs_info->tree_mod_seq, 0);
1934 fs_info->sb = sb; 1939 fs_info->sb = sb;
1935 fs_info->max_inline = 8192 * 1024; 1940 fs_info->max_inline = 8192 * 1024;
1936 fs_info->metadata_ratio = 0; 1941 fs_info->metadata_ratio = 0;
1937 fs_info->defrag_inodes = RB_ROOT; 1942 fs_info->defrag_inodes = RB_ROOT;
1938 fs_info->trans_no_join = 0; 1943 fs_info->trans_no_join = 0;
1939 fs_info->free_chunk_space = 0; 1944 fs_info->free_chunk_space = 0;
1945 fs_info->tree_mod_log = RB_ROOT;
1940 1946
1941 /* readahead state */ 1947 /* readahead state */
1942 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT); 1948 INIT_RADIX_TREE(&fs_info->reada_tree, GFP_NOFS & ~__GFP_WAIT);
@@ -2001,7 +2007,8 @@ int open_ctree(struct super_block *sb,
2001 BTRFS_I(fs_info->btree_inode)->root = tree_root; 2007 BTRFS_I(fs_info->btree_inode)->root = tree_root;
2002 memset(&BTRFS_I(fs_info->btree_inode)->location, 0, 2008 memset(&BTRFS_I(fs_info->btree_inode)->location, 0,
2003 sizeof(struct btrfs_key)); 2009 sizeof(struct btrfs_key));
2004 BTRFS_I(fs_info->btree_inode)->dummy_inode = 1; 2010 set_bit(BTRFS_INODE_DUMMY,
2011 &BTRFS_I(fs_info->btree_inode)->runtime_flags);
2005 insert_inode_hash(fs_info->btree_inode); 2012 insert_inode_hash(fs_info->btree_inode);
2006 2013
2007 spin_lock_init(&fs_info->block_group_cache_lock); 2014 spin_lock_init(&fs_info->block_group_cache_lock);
@@ -2112,7 +2119,7 @@ int open_ctree(struct super_block *sb,
2112 2119
2113 features = btrfs_super_incompat_flags(disk_super); 2120 features = btrfs_super_incompat_flags(disk_super);
2114 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF; 2121 features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
2115 if (tree_root->fs_info->compress_type & BTRFS_COMPRESS_LZO) 2122 if (tree_root->fs_info->compress_type == BTRFS_COMPRESS_LZO)
2116 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO; 2123 features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
2117 2124
2118 /* 2125 /*
@@ -2353,6 +2360,13 @@ retry_root_backup:
2353 fs_info->generation = generation; 2360 fs_info->generation = generation;
2354 fs_info->last_trans_committed = generation; 2361 fs_info->last_trans_committed = generation;
2355 2362
2363 ret = btrfs_init_dev_stats(fs_info);
2364 if (ret) {
2365 printk(KERN_ERR "btrfs: failed to init dev_stats: %d\n",
2366 ret);
2367 goto fail_block_groups;
2368 }
2369
2356 ret = btrfs_init_space_info(fs_info); 2370 ret = btrfs_init_space_info(fs_info);
2357 if (ret) { 2371 if (ret) {
2358 printk(KERN_ERR "Failed to initial space info: %d\n", ret); 2372 printk(KERN_ERR "Failed to initial space info: %d\n", ret);
@@ -2556,18 +2570,20 @@ recovery_tree_root:
2556 2570
2557static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate) 2571static void btrfs_end_buffer_write_sync(struct buffer_head *bh, int uptodate)
2558{ 2572{
2559 char b[BDEVNAME_SIZE];
2560
2561 if (uptodate) { 2573 if (uptodate) {
2562 set_buffer_uptodate(bh); 2574 set_buffer_uptodate(bh);
2563 } else { 2575 } else {
2564 printk_ratelimited(KERN_WARNING "lost page write due to " 2576 struct btrfs_device *device = (struct btrfs_device *)
2565 "I/O error on %s\n", 2577 bh->b_private;
2566 bdevname(bh->b_bdev, b)); 2578
2579 printk_ratelimited_in_rcu(KERN_WARNING "lost page write due to "
2580 "I/O error on %s\n",
2581 rcu_str_deref(device->name));
2567 /* note, we dont' set_buffer_write_io_error because we have 2582 /* note, we dont' set_buffer_write_io_error because we have
2568 * our own ways of dealing with the IO errors 2583 * our own ways of dealing with the IO errors
2569 */ 2584 */
2570 clear_buffer_uptodate(bh); 2585 clear_buffer_uptodate(bh);
2586 btrfs_dev_stat_inc_and_print(device, BTRFS_DEV_STAT_WRITE_ERRS);
2571 } 2587 }
2572 unlock_buffer(bh); 2588 unlock_buffer(bh);
2573 put_bh(bh); 2589 put_bh(bh);
@@ -2682,6 +2698,7 @@ static int write_dev_supers(struct btrfs_device *device,
2682 set_buffer_uptodate(bh); 2698 set_buffer_uptodate(bh);
2683 lock_buffer(bh); 2699 lock_buffer(bh);
2684 bh->b_end_io = btrfs_end_buffer_write_sync; 2700 bh->b_end_io = btrfs_end_buffer_write_sync;
2701 bh->b_private = device;
2685 } 2702 }
2686 2703
2687 /* 2704 /*
@@ -2734,12 +2751,15 @@ static int write_dev_flush(struct btrfs_device *device, int wait)
2734 wait_for_completion(&device->flush_wait); 2751 wait_for_completion(&device->flush_wait);
2735 2752
2736 if (bio_flagged(bio, BIO_EOPNOTSUPP)) { 2753 if (bio_flagged(bio, BIO_EOPNOTSUPP)) {
2737 printk("btrfs: disabling barriers on dev %s\n", 2754 printk_in_rcu("btrfs: disabling barriers on dev %s\n",
2738 device->name); 2755 rcu_str_deref(device->name));
2739 device->nobarriers = 1; 2756 device->nobarriers = 1;
2740 } 2757 }
2741 if (!bio_flagged(bio, BIO_UPTODATE)) { 2758 if (!bio_flagged(bio, BIO_UPTODATE)) {
2742 ret = -EIO; 2759 ret = -EIO;
2760 if (!bio_flagged(bio, BIO_EOPNOTSUPP))
2761 btrfs_dev_stat_inc_and_print(device,
2762 BTRFS_DEV_STAT_FLUSH_ERRS);
2743 } 2763 }
2744 2764
2745 /* drop the reference from the wait == 0 run */ 2765 /* drop the reference from the wait == 0 run */
@@ -2902,19 +2922,6 @@ int write_ctree_super(struct btrfs_trans_handle *trans,
2902 return ret; 2922 return ret;
2903} 2923}
2904 2924
2905/* Kill all outstanding I/O */
2906void btrfs_abort_devices(struct btrfs_root *root)
2907{
2908 struct list_head *head;
2909 struct btrfs_device *dev;
2910 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
2911 head = &root->fs_info->fs_devices->devices;
2912 list_for_each_entry_rcu(dev, head, dev_list) {
2913 blk_abort_queue(dev->bdev->bd_disk->queue);
2914 }
2915 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
2916}
2917
2918void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root) 2925void btrfs_free_fs_root(struct btrfs_fs_info *fs_info, struct btrfs_root *root)
2919{ 2926{
2920 spin_lock(&fs_info->fs_roots_radix_lock); 2927 spin_lock(&fs_info->fs_roots_radix_lock);
@@ -3395,7 +3402,6 @@ int btrfs_destroy_delayed_refs(struct btrfs_transaction *trans,
3395 3402
3396 delayed_refs = &trans->delayed_refs; 3403 delayed_refs = &trans->delayed_refs;
3397 3404
3398again:
3399 spin_lock(&delayed_refs->lock); 3405 spin_lock(&delayed_refs->lock);
3400 if (delayed_refs->num_entries == 0) { 3406 if (delayed_refs->num_entries == 0) {
3401 spin_unlock(&delayed_refs->lock); 3407 spin_unlock(&delayed_refs->lock);
@@ -3403,31 +3409,37 @@ again:
3403 return ret; 3409 return ret;
3404 } 3410 }
3405 3411
3406 node = rb_first(&delayed_refs->root); 3412 while ((node = rb_first(&delayed_refs->root)) != NULL) {
3407 while (node) {
3408 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node); 3413 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
3409 node = rb_next(node);
3410
3411 ref->in_tree = 0;
3412 rb_erase(&ref->rb_node, &delayed_refs->root);
3413 delayed_refs->num_entries--;
3414 3414
3415 atomic_set(&ref->refs, 1); 3415 atomic_set(&ref->refs, 1);
3416 if (btrfs_delayed_ref_is_head(ref)) { 3416 if (btrfs_delayed_ref_is_head(ref)) {
3417 struct btrfs_delayed_ref_head *head; 3417 struct btrfs_delayed_ref_head *head;
3418 3418
3419 head = btrfs_delayed_node_to_head(ref); 3419 head = btrfs_delayed_node_to_head(ref);
3420 spin_unlock(&delayed_refs->lock); 3420 if (!mutex_trylock(&head->mutex)) {
3421 mutex_lock(&head->mutex); 3421 atomic_inc(&ref->refs);
3422 spin_unlock(&delayed_refs->lock);
3423
3424 /* Need to wait for the delayed ref to run */
3425 mutex_lock(&head->mutex);
3426 mutex_unlock(&head->mutex);
3427 btrfs_put_delayed_ref(ref);
3428
3429 spin_lock(&delayed_refs->lock);
3430 continue;
3431 }
3432
3422 kfree(head->extent_op); 3433 kfree(head->extent_op);
3423 delayed_refs->num_heads--; 3434 delayed_refs->num_heads--;
3424 if (list_empty(&head->cluster)) 3435 if (list_empty(&head->cluster))
3425 delayed_refs->num_heads_ready--; 3436 delayed_refs->num_heads_ready--;
3426 list_del_init(&head->cluster); 3437 list_del_init(&head->cluster);
3427 mutex_unlock(&head->mutex);
3428 btrfs_put_delayed_ref(ref);
3429 goto again;
3430 } 3438 }
3439 ref->in_tree = 0;
3440 rb_erase(&ref->rb_node, &delayed_refs->root);
3441 delayed_refs->num_entries--;
3442
3431 spin_unlock(&delayed_refs->lock); 3443 spin_unlock(&delayed_refs->lock);
3432 btrfs_put_delayed_ref(ref); 3444 btrfs_put_delayed_ref(ref);
3433 3445
@@ -3515,11 +3527,9 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3515 &(&BTRFS_I(page->mapping->host)->io_tree)->buffer, 3527 &(&BTRFS_I(page->mapping->host)->io_tree)->buffer,
3516 offset >> PAGE_CACHE_SHIFT); 3528 offset >> PAGE_CACHE_SHIFT);
3517 spin_unlock(&dirty_pages->buffer_lock); 3529 spin_unlock(&dirty_pages->buffer_lock);
3518 if (eb) { 3530 if (eb)
3519 ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY, 3531 ret = test_and_clear_bit(EXTENT_BUFFER_DIRTY,
3520 &eb->bflags); 3532 &eb->bflags);
3521 atomic_set(&eb->refs, 1);
3522 }
3523 if (PageWriteback(page)) 3533 if (PageWriteback(page))
3524 end_page_writeback(page); 3534 end_page_writeback(page);
3525 3535
@@ -3533,8 +3543,8 @@ static int btrfs_destroy_marked_extents(struct btrfs_root *root,
3533 spin_unlock_irq(&page->mapping->tree_lock); 3543 spin_unlock_irq(&page->mapping->tree_lock);
3534 } 3544 }
3535 3545
3536 page->mapping->a_ops->invalidatepage(page, 0);
3537 unlock_page(page); 3546 unlock_page(page);
3547 page_cache_release(page);
3538 } 3548 }
3539 } 3549 }
3540 3550
@@ -3548,8 +3558,10 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3548 u64 start; 3558 u64 start;
3549 u64 end; 3559 u64 end;
3550 int ret; 3560 int ret;
3561 bool loop = true;
3551 3562
3552 unpin = pinned_extents; 3563 unpin = pinned_extents;
3564again:
3553 while (1) { 3565 while (1) {
3554 ret = find_first_extent_bit(unpin, 0, &start, &end, 3566 ret = find_first_extent_bit(unpin, 0, &start, &end,
3555 EXTENT_DIRTY); 3567 EXTENT_DIRTY);
@@ -3567,6 +3579,15 @@ static int btrfs_destroy_pinned_extent(struct btrfs_root *root,
3567 cond_resched(); 3579 cond_resched();
3568 } 3580 }
3569 3581
3582 if (loop) {
3583 if (unpin == &root->fs_info->freed_extents[0])
3584 unpin = &root->fs_info->freed_extents[1];
3585 else
3586 unpin = &root->fs_info->freed_extents[0];
3587 loop = false;
3588 goto again;
3589 }
3590
3570 return 0; 3591 return 0;
3571} 3592}
3572 3593
@@ -3580,21 +3601,23 @@ void btrfs_cleanup_one_transaction(struct btrfs_transaction *cur_trans,
3580 /* FIXME: cleanup wait for commit */ 3601 /* FIXME: cleanup wait for commit */
3581 cur_trans->in_commit = 1; 3602 cur_trans->in_commit = 1;
3582 cur_trans->blocked = 1; 3603 cur_trans->blocked = 1;
3583 if (waitqueue_active(&root->fs_info->transaction_blocked_wait)) 3604 wake_up(&root->fs_info->transaction_blocked_wait);
3584 wake_up(&root->fs_info->transaction_blocked_wait);
3585 3605
3586 cur_trans->blocked = 0; 3606 cur_trans->blocked = 0;
3587 if (waitqueue_active(&root->fs_info->transaction_wait)) 3607 wake_up(&root->fs_info->transaction_wait);
3588 wake_up(&root->fs_info->transaction_wait);
3589 3608
3590 cur_trans->commit_done = 1; 3609 cur_trans->commit_done = 1;
3591 if (waitqueue_active(&cur_trans->commit_wait)) 3610 wake_up(&cur_trans->commit_wait);
3592 wake_up(&cur_trans->commit_wait); 3611
3612 btrfs_destroy_delayed_inodes(root);
3613 btrfs_assert_delayed_root_empty(root);
3593 3614
3594 btrfs_destroy_pending_snapshots(cur_trans); 3615 btrfs_destroy_pending_snapshots(cur_trans);
3595 3616
3596 btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages, 3617 btrfs_destroy_marked_extents(root, &cur_trans->dirty_pages,
3597 EXTENT_DIRTY); 3618 EXTENT_DIRTY);
3619 btrfs_destroy_pinned_extent(root,
3620 root->fs_info->pinned_extents);
3598 3621
3599 /* 3622 /*
3600 memset(cur_trans, 0, sizeof(*cur_trans)); 3623 memset(cur_trans, 0, sizeof(*cur_trans));
@@ -3643,6 +3666,9 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
3643 if (waitqueue_active(&t->commit_wait)) 3666 if (waitqueue_active(&t->commit_wait))
3644 wake_up(&t->commit_wait); 3667 wake_up(&t->commit_wait);
3645 3668
3669 btrfs_destroy_delayed_inodes(root);
3670 btrfs_assert_delayed_root_empty(root);
3671
3646 btrfs_destroy_pending_snapshots(t); 3672 btrfs_destroy_pending_snapshots(t);
3647 3673
3648 btrfs_destroy_delalloc_inodes(root); 3674 btrfs_destroy_delalloc_inodes(root);
@@ -3671,17 +3697,6 @@ int btrfs_cleanup_transaction(struct btrfs_root *root)
3671 return 0; 3697 return 0;
3672} 3698}
3673 3699
3674static int btree_writepage_io_failed_hook(struct bio *bio, struct page *page,
3675 u64 start, u64 end,
3676 struct extent_state *state)
3677{
3678 struct super_block *sb = page->mapping->host->i_sb;
3679 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
3680 btrfs_error(fs_info, -EIO,
3681 "Error occured while writing out btree at %llu", start);
3682 return -EIO;
3683}
3684
3685static struct extent_io_ops btree_extent_io_ops = { 3700static struct extent_io_ops btree_extent_io_ops = {
3686 .write_cache_pages_lock_hook = btree_lock_page_hook, 3701 .write_cache_pages_lock_hook = btree_lock_page_hook,
3687 .readpage_end_io_hook = btree_readpage_end_io_hook, 3702 .readpage_end_io_hook = btree_readpage_end_io_hook,
@@ -3689,5 +3704,4 @@ static struct extent_io_ops btree_extent_io_ops = {
3689 .submit_bio_hook = btree_submit_bio_hook, 3704 .submit_bio_hook = btree_submit_bio_hook,
3690 /* note we're sharing with inode.c for the merge bio hook */ 3705 /* note we're sharing with inode.c for the merge bio hook */
3691 .merge_bio_hook = btrfs_merge_bio_hook, 3706 .merge_bio_hook = btrfs_merge_bio_hook,
3692 .writepage_io_failed_hook = btree_writepage_io_failed_hook,
3693}; 3707};
diff --git a/fs/btrfs/disk-io.h b/fs/btrfs/disk-io.h
index ab1830aaf0ed..05b3fab39f7e 100644
--- a/fs/btrfs/disk-io.h
+++ b/fs/btrfs/disk-io.h
@@ -89,7 +89,6 @@ int btrfs_add_log_tree(struct btrfs_trans_handle *trans,
89int btrfs_cleanup_transaction(struct btrfs_root *root); 89int btrfs_cleanup_transaction(struct btrfs_root *root);
90void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans, 90void btrfs_cleanup_one_transaction(struct btrfs_transaction *trans,
91 struct btrfs_root *root); 91 struct btrfs_root *root);
92void btrfs_abort_devices(struct btrfs_root *root);
93 92
94#ifdef CONFIG_DEBUG_LOCK_ALLOC 93#ifdef CONFIG_DEBUG_LOCK_ALLOC
95void btrfs_init_lockdep(void); 94void btrfs_init_lockdep(void);
diff --git a/fs/btrfs/export.c b/fs/btrfs/export.c
index e887ee62b6d4..614f34a899c2 100644
--- a/fs/btrfs/export.c
+++ b/fs/btrfs/export.c
@@ -13,15 +13,14 @@
13 parent_root_objectid) / 4) 13 parent_root_objectid) / 4)
14#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4) 14#define BTRFS_FID_SIZE_CONNECTABLE_ROOT (sizeof(struct btrfs_fid) / 4)
15 15
16static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, 16static int btrfs_encode_fh(struct inode *inode, u32 *fh, int *max_len,
17 int connectable) 17 struct inode *parent)
18{ 18{
19 struct btrfs_fid *fid = (struct btrfs_fid *)fh; 19 struct btrfs_fid *fid = (struct btrfs_fid *)fh;
20 struct inode *inode = dentry->d_inode;
21 int len = *max_len; 20 int len = *max_len;
22 int type; 21 int type;
23 22
24 if (connectable && (len < BTRFS_FID_SIZE_CONNECTABLE)) { 23 if (parent && (len < BTRFS_FID_SIZE_CONNECTABLE)) {
25 *max_len = BTRFS_FID_SIZE_CONNECTABLE; 24 *max_len = BTRFS_FID_SIZE_CONNECTABLE;
26 return 255; 25 return 255;
27 } else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) { 26 } else if (len < BTRFS_FID_SIZE_NON_CONNECTABLE) {
@@ -36,19 +35,13 @@ static int btrfs_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
36 fid->root_objectid = BTRFS_I(inode)->root->objectid; 35 fid->root_objectid = BTRFS_I(inode)->root->objectid;
37 fid->gen = inode->i_generation; 36 fid->gen = inode->i_generation;
38 37
39 if (connectable && !S_ISDIR(inode->i_mode)) { 38 if (parent) {
40 struct inode *parent;
41 u64 parent_root_id; 39 u64 parent_root_id;
42 40
43 spin_lock(&dentry->d_lock);
44
45 parent = dentry->d_parent->d_inode;
46 fid->parent_objectid = BTRFS_I(parent)->location.objectid; 41 fid->parent_objectid = BTRFS_I(parent)->location.objectid;
47 fid->parent_gen = parent->i_generation; 42 fid->parent_gen = parent->i_generation;
48 parent_root_id = BTRFS_I(parent)->root->objectid; 43 parent_root_id = BTRFS_I(parent)->root->objectid;
49 44
50 spin_unlock(&dentry->d_lock);
51
52 if (parent_root_id != fid->root_objectid) { 45 if (parent_root_id != fid->root_objectid) {
53 fid->parent_root_objectid = parent_root_id; 46 fid->parent_root_objectid = parent_root_id;
54 len = BTRFS_FID_SIZE_CONNECTABLE_ROOT; 47 len = BTRFS_FID_SIZE_CONNECTABLE_ROOT;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index 49fd7b66d57b..4b5a1e1bdefb 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -3578,7 +3578,7 @@ again:
3578 space_info->chunk_alloc = 0; 3578 space_info->chunk_alloc = 0;
3579 spin_unlock(&space_info->lock); 3579 spin_unlock(&space_info->lock);
3580out: 3580out:
3581 mutex_unlock(&extent_root->fs_info->chunk_mutex); 3581 mutex_unlock(&fs_info->chunk_mutex);
3582 return ret; 3582 return ret;
3583} 3583}
3584 3584
@@ -4355,10 +4355,9 @@ static unsigned drop_outstanding_extent(struct inode *inode)
4355 BTRFS_I(inode)->outstanding_extents--; 4355 BTRFS_I(inode)->outstanding_extents--;
4356 4356
4357 if (BTRFS_I(inode)->outstanding_extents == 0 && 4357 if (BTRFS_I(inode)->outstanding_extents == 0 &&
4358 BTRFS_I(inode)->delalloc_meta_reserved) { 4358 test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4359 &BTRFS_I(inode)->runtime_flags))
4359 drop_inode_space = 1; 4360 drop_inode_space = 1;
4360 BTRFS_I(inode)->delalloc_meta_reserved = 0;
4361 }
4362 4361
4363 /* 4362 /*
4364 * If we have more or the same amount of outsanding extents than we have 4363 * If we have more or the same amount of outsanding extents than we have
@@ -4465,7 +4464,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4465 * Add an item to reserve for updating the inode when we complete the 4464 * Add an item to reserve for updating the inode when we complete the
4466 * delalloc io. 4465 * delalloc io.
4467 */ 4466 */
4468 if (!BTRFS_I(inode)->delalloc_meta_reserved) { 4467 if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4468 &BTRFS_I(inode)->runtime_flags)) {
4469 nr_extents++; 4469 nr_extents++;
4470 extra_reserve = 1; 4470 extra_reserve = 1;
4471 } 4471 }
@@ -4511,7 +4511,8 @@ int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
4511 4511
4512 spin_lock(&BTRFS_I(inode)->lock); 4512 spin_lock(&BTRFS_I(inode)->lock);
4513 if (extra_reserve) { 4513 if (extra_reserve) {
4514 BTRFS_I(inode)->delalloc_meta_reserved = 1; 4514 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
4515 &BTRFS_I(inode)->runtime_flags);
4515 nr_extents--; 4516 nr_extents--;
4516 } 4517 }
4517 BTRFS_I(inode)->reserved_extents += nr_extents; 4518 BTRFS_I(inode)->reserved_extents += nr_extents;
@@ -5217,7 +5218,7 @@ out:
5217void btrfs_free_tree_block(struct btrfs_trans_handle *trans, 5218void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5218 struct btrfs_root *root, 5219 struct btrfs_root *root,
5219 struct extent_buffer *buf, 5220 struct extent_buffer *buf,
5220 u64 parent, int last_ref, int for_cow) 5221 u64 parent, int last_ref)
5221{ 5222{
5222 struct btrfs_block_group_cache *cache = NULL; 5223 struct btrfs_block_group_cache *cache = NULL;
5223 int ret; 5224 int ret;
@@ -5227,7 +5228,7 @@ void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
5227 buf->start, buf->len, 5228 buf->start, buf->len,
5228 parent, root->root_key.objectid, 5229 parent, root->root_key.objectid,
5229 btrfs_header_level(buf), 5230 btrfs_header_level(buf),
5230 BTRFS_DROP_DELAYED_REF, NULL, for_cow); 5231 BTRFS_DROP_DELAYED_REF, NULL, 0);
5231 BUG_ON(ret); /* -ENOMEM */ 5232 BUG_ON(ret); /* -ENOMEM */
5232 } 5233 }
5233 5234
@@ -6249,7 +6250,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6249 struct btrfs_root *root, u32 blocksize, 6250 struct btrfs_root *root, u32 blocksize,
6250 u64 parent, u64 root_objectid, 6251 u64 parent, u64 root_objectid,
6251 struct btrfs_disk_key *key, int level, 6252 struct btrfs_disk_key *key, int level,
6252 u64 hint, u64 empty_size, int for_cow) 6253 u64 hint, u64 empty_size)
6253{ 6254{
6254 struct btrfs_key ins; 6255 struct btrfs_key ins;
6255 struct btrfs_block_rsv *block_rsv; 6256 struct btrfs_block_rsv *block_rsv;
@@ -6297,7 +6298,7 @@ struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
6297 ins.objectid, 6298 ins.objectid,
6298 ins.offset, parent, root_objectid, 6299 ins.offset, parent, root_objectid,
6299 level, BTRFS_ADD_DELAYED_EXTENT, 6300 level, BTRFS_ADD_DELAYED_EXTENT,
6300 extent_op, for_cow); 6301 extent_op, 0);
6301 BUG_ON(ret); /* -ENOMEM */ 6302 BUG_ON(ret); /* -ENOMEM */
6302 } 6303 }
6303 return buf; 6304 return buf;
@@ -6715,7 +6716,7 @@ static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6715 btrfs_header_owner(path->nodes[level + 1])); 6716 btrfs_header_owner(path->nodes[level + 1]));
6716 } 6717 }
6717 6718
6718 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1, 0); 6719 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6719out: 6720out:
6720 wc->refs[level] = 0; 6721 wc->refs[level] = 0;
6721 wc->flags[level] = 0; 6722 wc->flags[level] = 0;
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index c9018a05036e..aaa12c1eb348 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -20,6 +20,7 @@
20#include "volumes.h" 20#include "volumes.h"
21#include "check-integrity.h" 21#include "check-integrity.h"
22#include "locking.h" 22#include "locking.h"
23#include "rcu-string.h"
23 24
24static struct kmem_cache *extent_state_cache; 25static struct kmem_cache *extent_state_cache;
25static struct kmem_cache *extent_buffer_cache; 26static struct kmem_cache *extent_buffer_cache;
@@ -186,7 +187,6 @@ static struct rb_node *tree_insert(struct rb_root *root, u64 offset,
186 return parent; 187 return parent;
187 } 188 }
188 189
189 entry = rb_entry(node, struct tree_entry, rb_node);
190 rb_link_node(node, parent, p); 190 rb_link_node(node, parent, p);
191 rb_insert_color(node, root); 191 rb_insert_color(node, root);
192 return NULL; 192 return NULL;
@@ -413,7 +413,7 @@ static struct extent_state *next_state(struct extent_state *state)
413 413
414/* 414/*
415 * utility function to clear some bits in an extent state struct. 415 * utility function to clear some bits in an extent state struct.
416 * it will optionally wake up any one waiting on this state (wake == 1) 416 * it will optionally wake up any one waiting on this state (wake == 1).
417 * 417 *
418 * If no bits are set on the state struct after clearing things, the 418 * If no bits are set on the state struct after clearing things, the
419 * struct is freed and removed from the tree 419 * struct is freed and removed from the tree
@@ -570,10 +570,8 @@ hit_next:
570 if (err) 570 if (err)
571 goto out; 571 goto out;
572 if (state->end <= end) { 572 if (state->end <= end) {
573 clear_state_bit(tree, state, &bits, wake); 573 state = clear_state_bit(tree, state, &bits, wake);
574 if (last_end == (u64)-1) 574 goto next;
575 goto out;
576 start = last_end + 1;
577 } 575 }
578 goto search_again; 576 goto search_again;
579 } 577 }
@@ -781,7 +779,6 @@ hit_next:
781 * Just lock what we found and keep going 779 * Just lock what we found and keep going
782 */ 780 */
783 if (state->start == start && state->end <= end) { 781 if (state->start == start && state->end <= end) {
784 struct rb_node *next_node;
785 if (state->state & exclusive_bits) { 782 if (state->state & exclusive_bits) {
786 *failed_start = state->start; 783 *failed_start = state->start;
787 err = -EEXIST; 784 err = -EEXIST;
@@ -789,20 +786,15 @@ hit_next:
789 } 786 }
790 787
791 set_state_bits(tree, state, &bits); 788 set_state_bits(tree, state, &bits);
792
793 cache_state(state, cached_state); 789 cache_state(state, cached_state);
794 merge_state(tree, state); 790 merge_state(tree, state);
795 if (last_end == (u64)-1) 791 if (last_end == (u64)-1)
796 goto out; 792 goto out;
797
798 start = last_end + 1; 793 start = last_end + 1;
799 next_node = rb_next(&state->rb_node); 794 state = next_state(state);
800 if (next_node && start < end && prealloc && !need_resched()) { 795 if (start < end && state && state->start == start &&
801 state = rb_entry(next_node, struct extent_state, 796 !need_resched())
802 rb_node); 797 goto hit_next;
803 if (state->start == start)
804 goto hit_next;
805 }
806 goto search_again; 798 goto search_again;
807 } 799 }
808 800
@@ -845,6 +837,10 @@ hit_next:
845 if (last_end == (u64)-1) 837 if (last_end == (u64)-1)
846 goto out; 838 goto out;
847 start = last_end + 1; 839 start = last_end + 1;
840 state = next_state(state);
841 if (start < end && state && state->start == start &&
842 !need_resched())
843 goto hit_next;
848 } 844 }
849 goto search_again; 845 goto search_again;
850 } 846 }
@@ -994,21 +990,14 @@ hit_next:
994 * Just lock what we found and keep going 990 * Just lock what we found and keep going
995 */ 991 */
996 if (state->start == start && state->end <= end) { 992 if (state->start == start && state->end <= end) {
997 struct rb_node *next_node;
998
999 set_state_bits(tree, state, &bits); 993 set_state_bits(tree, state, &bits);
1000 clear_state_bit(tree, state, &clear_bits, 0); 994 state = clear_state_bit(tree, state, &clear_bits, 0);
1001 if (last_end == (u64)-1) 995 if (last_end == (u64)-1)
1002 goto out; 996 goto out;
1003
1004 start = last_end + 1; 997 start = last_end + 1;
1005 next_node = rb_next(&state->rb_node); 998 if (start < end && state && state->start == start &&
1006 if (next_node && start < end && prealloc && !need_resched()) { 999 !need_resched())
1007 state = rb_entry(next_node, struct extent_state, 1000 goto hit_next;
1008 rb_node);
1009 if (state->start == start)
1010 goto hit_next;
1011 }
1012 goto search_again; 1001 goto search_again;
1013 } 1002 }
1014 1003
@@ -1042,10 +1031,13 @@ hit_next:
1042 goto out; 1031 goto out;
1043 if (state->end <= end) { 1032 if (state->end <= end) {
1044 set_state_bits(tree, state, &bits); 1033 set_state_bits(tree, state, &bits);
1045 clear_state_bit(tree, state, &clear_bits, 0); 1034 state = clear_state_bit(tree, state, &clear_bits, 0);
1046 if (last_end == (u64)-1) 1035 if (last_end == (u64)-1)
1047 goto out; 1036 goto out;
1048 start = last_end + 1; 1037 start = last_end + 1;
1038 if (start < end && state && state->start == start &&
1039 !need_resched())
1040 goto hit_next;
1049 } 1041 }
1050 goto search_again; 1042 goto search_again;
1051 } 1043 }
@@ -1173,9 +1165,8 @@ int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1173 cached_state, mask); 1165 cached_state, mask);
1174} 1166}
1175 1167
1176static int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, 1168int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
1177 u64 end, struct extent_state **cached_state, 1169 struct extent_state **cached_state, gfp_t mask)
1178 gfp_t mask)
1179{ 1170{
1180 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0, 1171 return clear_extent_bit(tree, start, end, EXTENT_UPTODATE, 0, 0,
1181 cached_state, mask); 1172 cached_state, mask);
@@ -1293,7 +1284,7 @@ out:
1293 * returned if we find something, and *start_ret and *end_ret are 1284 * returned if we find something, and *start_ret and *end_ret are
1294 * set to reflect the state struct that was found. 1285 * set to reflect the state struct that was found.
1295 * 1286 *
1296 * If nothing was found, 1 is returned, < 0 on error 1287 * If nothing was found, 1 is returned. If found something, return 0.
1297 */ 1288 */
1298int find_first_extent_bit(struct extent_io_tree *tree, u64 start, 1289int find_first_extent_bit(struct extent_io_tree *tree, u64 start,
1299 u64 *start_ret, u64 *end_ret, int bits) 1290 u64 *start_ret, u64 *end_ret, int bits)
@@ -1923,12 +1914,13 @@ int repair_io_failure(struct btrfs_mapping_tree *map_tree, u64 start,
1923 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) { 1914 if (!test_bit(BIO_UPTODATE, &bio->bi_flags)) {
1924 /* try to remap that extent elsewhere? */ 1915 /* try to remap that extent elsewhere? */
1925 bio_put(bio); 1916 bio_put(bio);
1917 btrfs_dev_stat_inc_and_print(dev, BTRFS_DEV_STAT_WRITE_ERRS);
1926 return -EIO; 1918 return -EIO;
1927 } 1919 }
1928 1920
1929 printk(KERN_INFO "btrfs read error corrected: ino %lu off %llu (dev %s " 1921 printk_in_rcu(KERN_INFO "btrfs read error corrected: ino %lu off %llu "
1930 "sector %llu)\n", page->mapping->host->i_ino, start, 1922 "(dev %s sector %llu)\n", page->mapping->host->i_ino,
1931 dev->name, sector); 1923 start, rcu_str_deref(dev->name), sector);
1932 1924
1933 bio_put(bio); 1925 bio_put(bio);
1934 return 0; 1926 return 0;
@@ -2222,17 +2214,7 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2222 uptodate = 0; 2214 uptodate = 0;
2223 } 2215 }
2224 2216
2225 if (!uptodate && tree->ops &&
2226 tree->ops->writepage_io_failed_hook) {
2227 ret = tree->ops->writepage_io_failed_hook(NULL, page,
2228 start, end, NULL);
2229 /* Writeback already completed */
2230 if (ret == 0)
2231 return 1;
2232 }
2233
2234 if (!uptodate) { 2217 if (!uptodate) {
2235 clear_extent_uptodate(tree, start, end, NULL, GFP_NOFS);
2236 ClearPageUptodate(page); 2218 ClearPageUptodate(page);
2237 SetPageError(page); 2219 SetPageError(page);
2238 } 2220 }
@@ -2347,10 +2329,23 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2347 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) { 2329 if (uptodate && tree->ops && tree->ops->readpage_end_io_hook) {
2348 ret = tree->ops->readpage_end_io_hook(page, start, end, 2330 ret = tree->ops->readpage_end_io_hook(page, start, end,
2349 state, mirror); 2331 state, mirror);
2350 if (ret) 2332 if (ret) {
2333 /* no IO indicated but software detected errors
2334 * in the block, either checksum errors or
2335 * issues with the contents */
2336 struct btrfs_root *root =
2337 BTRFS_I(page->mapping->host)->root;
2338 struct btrfs_device *device;
2339
2351 uptodate = 0; 2340 uptodate = 0;
2352 else 2341 device = btrfs_find_device_for_logical(
2342 root, start, mirror);
2343 if (device)
2344 btrfs_dev_stat_inc_and_print(device,
2345 BTRFS_DEV_STAT_CORRUPTION_ERRS);
2346 } else {
2353 clean_io_failure(start, page); 2347 clean_io_failure(start, page);
2348 }
2354 } 2349 }
2355 2350
2356 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) { 2351 if (!uptodate && tree->ops && tree->ops->readpage_io_failed_hook) {
@@ -3164,7 +3159,7 @@ static int write_one_eb(struct extent_buffer *eb,
3164 u64 offset = eb->start; 3159 u64 offset = eb->start;
3165 unsigned long i, num_pages; 3160 unsigned long i, num_pages;
3166 int rw = (epd->sync_io ? WRITE_SYNC : WRITE); 3161 int rw = (epd->sync_io ? WRITE_SYNC : WRITE);
3167 int ret; 3162 int ret = 0;
3168 3163
3169 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags); 3164 clear_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3170 num_pages = num_extent_pages(eb->start, eb->len); 3165 num_pages = num_extent_pages(eb->start, eb->len);
@@ -3930,6 +3925,7 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3930 eb->start = start; 3925 eb->start = start;
3931 eb->len = len; 3926 eb->len = len;
3932 eb->tree = tree; 3927 eb->tree = tree;
3928 eb->bflags = 0;
3933 rwlock_init(&eb->lock); 3929 rwlock_init(&eb->lock);
3934 atomic_set(&eb->write_locks, 0); 3930 atomic_set(&eb->write_locks, 0);
3935 atomic_set(&eb->read_locks, 0); 3931 atomic_set(&eb->read_locks, 0);
@@ -3967,6 +3963,60 @@ static struct extent_buffer *__alloc_extent_buffer(struct extent_io_tree *tree,
3967 return eb; 3963 return eb;
3968} 3964}
3969 3965
3966struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src)
3967{
3968 unsigned long i;
3969 struct page *p;
3970 struct extent_buffer *new;
3971 unsigned long num_pages = num_extent_pages(src->start, src->len);
3972
3973 new = __alloc_extent_buffer(NULL, src->start, src->len, GFP_ATOMIC);
3974 if (new == NULL)
3975 return NULL;
3976
3977 for (i = 0; i < num_pages; i++) {
3978 p = alloc_page(GFP_ATOMIC);
3979 BUG_ON(!p);
3980 attach_extent_buffer_page(new, p);
3981 WARN_ON(PageDirty(p));
3982 SetPageUptodate(p);
3983 new->pages[i] = p;
3984 }
3985
3986 copy_extent_buffer(new, src, 0, 0, src->len);
3987 set_bit(EXTENT_BUFFER_UPTODATE, &new->bflags);
3988 set_bit(EXTENT_BUFFER_DUMMY, &new->bflags);
3989
3990 return new;
3991}
3992
3993struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len)
3994{
3995 struct extent_buffer *eb;
3996 unsigned long num_pages = num_extent_pages(0, len);
3997 unsigned long i;
3998
3999 eb = __alloc_extent_buffer(NULL, start, len, GFP_ATOMIC);
4000 if (!eb)
4001 return NULL;
4002
4003 for (i = 0; i < num_pages; i++) {
4004 eb->pages[i] = alloc_page(GFP_ATOMIC);
4005 if (!eb->pages[i])
4006 goto err;
4007 }
4008 set_extent_buffer_uptodate(eb);
4009 btrfs_set_header_nritems(eb, 0);
4010 set_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
4011
4012 return eb;
4013err:
4014 for (i--; i > 0; i--)
4015 __free_page(eb->pages[i]);
4016 __free_extent_buffer(eb);
4017 return NULL;
4018}
4019
3970static int extent_buffer_under_io(struct extent_buffer *eb) 4020static int extent_buffer_under_io(struct extent_buffer *eb)
3971{ 4021{
3972 return (atomic_read(&eb->io_pages) || 4022 return (atomic_read(&eb->io_pages) ||
@@ -3981,18 +4031,21 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
3981 unsigned long start_idx) 4031 unsigned long start_idx)
3982{ 4032{
3983 unsigned long index; 4033 unsigned long index;
4034 unsigned long num_pages;
3984 struct page *page; 4035 struct page *page;
4036 int mapped = !test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags);
3985 4037
3986 BUG_ON(extent_buffer_under_io(eb)); 4038 BUG_ON(extent_buffer_under_io(eb));
3987 4039
3988 index = num_extent_pages(eb->start, eb->len); 4040 num_pages = num_extent_pages(eb->start, eb->len);
4041 index = start_idx + num_pages;
3989 if (start_idx >= index) 4042 if (start_idx >= index)
3990 return; 4043 return;
3991 4044
3992 do { 4045 do {
3993 index--; 4046 index--;
3994 page = extent_buffer_page(eb, index); 4047 page = extent_buffer_page(eb, index);
3995 if (page) { 4048 if (page && mapped) {
3996 spin_lock(&page->mapping->private_lock); 4049 spin_lock(&page->mapping->private_lock);
3997 /* 4050 /*
3998 * We do this since we'll remove the pages after we've 4051 * We do this since we'll remove the pages after we've
@@ -4017,6 +4070,8 @@ static void btrfs_release_extent_buffer_page(struct extent_buffer *eb,
4017 } 4070 }
4018 spin_unlock(&page->mapping->private_lock); 4071 spin_unlock(&page->mapping->private_lock);
4019 4072
4073 }
4074 if (page) {
4020 /* One for when we alloced the page */ 4075 /* One for when we alloced the page */
4021 page_cache_release(page); 4076 page_cache_release(page);
4022 } 4077 }
@@ -4235,14 +4290,18 @@ static void release_extent_buffer(struct extent_buffer *eb, gfp_t mask)
4235{ 4290{
4236 WARN_ON(atomic_read(&eb->refs) == 0); 4291 WARN_ON(atomic_read(&eb->refs) == 0);
4237 if (atomic_dec_and_test(&eb->refs)) { 4292 if (atomic_dec_and_test(&eb->refs)) {
4238 struct extent_io_tree *tree = eb->tree; 4293 if (test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags)) {
4294 spin_unlock(&eb->refs_lock);
4295 } else {
4296 struct extent_io_tree *tree = eb->tree;
4239 4297
4240 spin_unlock(&eb->refs_lock); 4298 spin_unlock(&eb->refs_lock);
4241 4299
4242 spin_lock(&tree->buffer_lock); 4300 spin_lock(&tree->buffer_lock);
4243 radix_tree_delete(&tree->buffer, 4301 radix_tree_delete(&tree->buffer,
4244 eb->start >> PAGE_CACHE_SHIFT); 4302 eb->start >> PAGE_CACHE_SHIFT);
4245 spin_unlock(&tree->buffer_lock); 4303 spin_unlock(&tree->buffer_lock);
4304 }
4246 4305
4247 /* Should be safe to release our pages at this point */ 4306 /* Should be safe to release our pages at this point */
4248 btrfs_release_extent_buffer_page(eb, 0); 4307 btrfs_release_extent_buffer_page(eb, 0);
@@ -4260,6 +4319,10 @@ void free_extent_buffer(struct extent_buffer *eb)
4260 4319
4261 spin_lock(&eb->refs_lock); 4320 spin_lock(&eb->refs_lock);
4262 if (atomic_read(&eb->refs) == 2 && 4321 if (atomic_read(&eb->refs) == 2 &&
4322 test_bit(EXTENT_BUFFER_DUMMY, &eb->bflags))
4323 atomic_dec(&eb->refs);
4324
4325 if (atomic_read(&eb->refs) == 2 &&
4263 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) && 4326 test_bit(EXTENT_BUFFER_STALE, &eb->bflags) &&
4264 !extent_buffer_under_io(eb) && 4327 !extent_buffer_under_io(eb) &&
4265 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags)) 4328 test_and_clear_bit(EXTENT_BUFFER_TREE_REF, &eb->bflags))
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index b516c3b8dec6..25900af5b15d 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -39,6 +39,7 @@
39#define EXTENT_BUFFER_STALE 6 39#define EXTENT_BUFFER_STALE 6
40#define EXTENT_BUFFER_WRITEBACK 7 40#define EXTENT_BUFFER_WRITEBACK 7
41#define EXTENT_BUFFER_IOERR 8 41#define EXTENT_BUFFER_IOERR 8
42#define EXTENT_BUFFER_DUMMY 9
42 43
43/* these are flags for extent_clear_unlock_delalloc */ 44/* these are flags for extent_clear_unlock_delalloc */
44#define EXTENT_CLEAR_UNLOCK_PAGE 0x1 45#define EXTENT_CLEAR_UNLOCK_PAGE 0x1
@@ -75,9 +76,6 @@ struct extent_io_ops {
75 unsigned long bio_flags); 76 unsigned long bio_flags);
76 int (*readpage_io_hook)(struct page *page, u64 start, u64 end); 77 int (*readpage_io_hook)(struct page *page, u64 start, u64 end);
77 int (*readpage_io_failed_hook)(struct page *page, int failed_mirror); 78 int (*readpage_io_failed_hook)(struct page *page, int failed_mirror);
78 int (*writepage_io_failed_hook)(struct bio *bio, struct page *page,
79 u64 start, u64 end,
80 struct extent_state *state);
81 int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, 79 int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end,
82 struct extent_state *state, int mirror); 80 struct extent_state *state, int mirror);
83 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, 81 int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end,
@@ -225,6 +223,8 @@ int set_extent_bit(struct extent_io_tree *tree, u64 start, u64 end,
225 struct extent_state **cached_state, gfp_t mask); 223 struct extent_state **cached_state, gfp_t mask);
226int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end, 224int set_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
227 struct extent_state **cached_state, gfp_t mask); 225 struct extent_state **cached_state, gfp_t mask);
226int clear_extent_uptodate(struct extent_io_tree *tree, u64 start, u64 end,
227 struct extent_state **cached_state, gfp_t mask);
228int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end, 228int set_extent_new(struct extent_io_tree *tree, u64 start, u64 end,
229 gfp_t mask); 229 gfp_t mask);
230int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end, 230int set_extent_dirty(struct extent_io_tree *tree, u64 start, u64 end,
@@ -265,6 +265,8 @@ void set_page_extent_mapped(struct page *page);
265 265
266struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree, 266struct extent_buffer *alloc_extent_buffer(struct extent_io_tree *tree,
267 u64 start, unsigned long len); 267 u64 start, unsigned long len);
268struct extent_buffer *alloc_dummy_extent_buffer(u64 start, unsigned long len);
269struct extent_buffer *btrfs_clone_extent_buffer(struct extent_buffer *src);
268struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree, 270struct extent_buffer *find_extent_buffer(struct extent_io_tree *tree,
269 u64 start, unsigned long len); 271 u64 start, unsigned long len);
270void free_extent_buffer(struct extent_buffer *eb); 272void free_extent_buffer(struct extent_buffer *eb);
diff --git a/fs/btrfs/file.c b/fs/btrfs/file.c
index 53bf2d764bbc..70dc8ca73e25 100644
--- a/fs/btrfs/file.c
+++ b/fs/btrfs/file.c
@@ -65,6 +65,21 @@ struct inode_defrag {
65 int cycled; 65 int cycled;
66}; 66};
67 67
68static int __compare_inode_defrag(struct inode_defrag *defrag1,
69 struct inode_defrag *defrag2)
70{
71 if (defrag1->root > defrag2->root)
72 return 1;
73 else if (defrag1->root < defrag2->root)
74 return -1;
75 else if (defrag1->ino > defrag2->ino)
76 return 1;
77 else if (defrag1->ino < defrag2->ino)
78 return -1;
79 else
80 return 0;
81}
82
68/* pop a record for an inode into the defrag tree. The lock 83/* pop a record for an inode into the defrag tree. The lock
69 * must be held already 84 * must be held already
70 * 85 *
@@ -81,15 +96,17 @@ static void __btrfs_add_inode_defrag(struct inode *inode,
81 struct inode_defrag *entry; 96 struct inode_defrag *entry;
82 struct rb_node **p; 97 struct rb_node **p;
83 struct rb_node *parent = NULL; 98 struct rb_node *parent = NULL;
99 int ret;
84 100
85 p = &root->fs_info->defrag_inodes.rb_node; 101 p = &root->fs_info->defrag_inodes.rb_node;
86 while (*p) { 102 while (*p) {
87 parent = *p; 103 parent = *p;
88 entry = rb_entry(parent, struct inode_defrag, rb_node); 104 entry = rb_entry(parent, struct inode_defrag, rb_node);
89 105
90 if (defrag->ino < entry->ino) 106 ret = __compare_inode_defrag(defrag, entry);
107 if (ret < 0)
91 p = &parent->rb_left; 108 p = &parent->rb_left;
92 else if (defrag->ino > entry->ino) 109 else if (ret > 0)
93 p = &parent->rb_right; 110 p = &parent->rb_right;
94 else { 111 else {
95 /* if we're reinserting an entry for 112 /* if we're reinserting an entry for
@@ -103,7 +120,7 @@ static void __btrfs_add_inode_defrag(struct inode *inode,
103 goto exists; 120 goto exists;
104 } 121 }
105 } 122 }
106 BTRFS_I(inode)->in_defrag = 1; 123 set_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
107 rb_link_node(&defrag->rb_node, parent, p); 124 rb_link_node(&defrag->rb_node, parent, p);
108 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes); 125 rb_insert_color(&defrag->rb_node, &root->fs_info->defrag_inodes);
109 return; 126 return;
@@ -131,7 +148,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
131 if (btrfs_fs_closing(root->fs_info)) 148 if (btrfs_fs_closing(root->fs_info))
132 return 0; 149 return 0;
133 150
134 if (BTRFS_I(inode)->in_defrag) 151 if (test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
135 return 0; 152 return 0;
136 153
137 if (trans) 154 if (trans)
@@ -148,7 +165,7 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
148 defrag->root = root->root_key.objectid; 165 defrag->root = root->root_key.objectid;
149 166
150 spin_lock(&root->fs_info->defrag_inodes_lock); 167 spin_lock(&root->fs_info->defrag_inodes_lock);
151 if (!BTRFS_I(inode)->in_defrag) 168 if (!test_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags))
152 __btrfs_add_inode_defrag(inode, defrag); 169 __btrfs_add_inode_defrag(inode, defrag);
153 else 170 else
154 kfree(defrag); 171 kfree(defrag);
@@ -159,28 +176,35 @@ int btrfs_add_inode_defrag(struct btrfs_trans_handle *trans,
159/* 176/*
160 * must be called with the defrag_inodes lock held 177 * must be called with the defrag_inodes lock held
161 */ 178 */
162struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info, u64 ino, 179struct inode_defrag *btrfs_find_defrag_inode(struct btrfs_fs_info *info,
180 u64 root, u64 ino,
163 struct rb_node **next) 181 struct rb_node **next)
164{ 182{
165 struct inode_defrag *entry = NULL; 183 struct inode_defrag *entry = NULL;
184 struct inode_defrag tmp;
166 struct rb_node *p; 185 struct rb_node *p;
167 struct rb_node *parent = NULL; 186 struct rb_node *parent = NULL;
187 int ret;
188
189 tmp.ino = ino;
190 tmp.root = root;
168 191
169 p = info->defrag_inodes.rb_node; 192 p = info->defrag_inodes.rb_node;
170 while (p) { 193 while (p) {
171 parent = p; 194 parent = p;
172 entry = rb_entry(parent, struct inode_defrag, rb_node); 195 entry = rb_entry(parent, struct inode_defrag, rb_node);
173 196
174 if (ino < entry->ino) 197 ret = __compare_inode_defrag(&tmp, entry);
198 if (ret < 0)
175 p = parent->rb_left; 199 p = parent->rb_left;
176 else if (ino > entry->ino) 200 else if (ret > 0)
177 p = parent->rb_right; 201 p = parent->rb_right;
178 else 202 else
179 return entry; 203 return entry;
180 } 204 }
181 205
182 if (next) { 206 if (next) {
183 while (parent && ino > entry->ino) { 207 while (parent && __compare_inode_defrag(&tmp, entry) > 0) {
184 parent = rb_next(parent); 208 parent = rb_next(parent);
185 entry = rb_entry(parent, struct inode_defrag, rb_node); 209 entry = rb_entry(parent, struct inode_defrag, rb_node);
186 } 210 }
@@ -202,6 +226,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
202 struct btrfs_key key; 226 struct btrfs_key key;
203 struct btrfs_ioctl_defrag_range_args range; 227 struct btrfs_ioctl_defrag_range_args range;
204 u64 first_ino = 0; 228 u64 first_ino = 0;
229 u64 root_objectid = 0;
205 int num_defrag; 230 int num_defrag;
206 int defrag_batch = 1024; 231 int defrag_batch = 1024;
207 232
@@ -214,11 +239,14 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
214 n = NULL; 239 n = NULL;
215 240
216 /* find an inode to defrag */ 241 /* find an inode to defrag */
217 defrag = btrfs_find_defrag_inode(fs_info, first_ino, &n); 242 defrag = btrfs_find_defrag_inode(fs_info, root_objectid,
243 first_ino, &n);
218 if (!defrag) { 244 if (!defrag) {
219 if (n) 245 if (n) {
220 defrag = rb_entry(n, struct inode_defrag, rb_node); 246 defrag = rb_entry(n, struct inode_defrag,
221 else if (first_ino) { 247 rb_node);
248 } else if (root_objectid || first_ino) {
249 root_objectid = 0;
222 first_ino = 0; 250 first_ino = 0;
223 continue; 251 continue;
224 } else { 252 } else {
@@ -228,6 +256,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
228 256
229 /* remove it from the rbtree */ 257 /* remove it from the rbtree */
230 first_ino = defrag->ino + 1; 258 first_ino = defrag->ino + 1;
259 root_objectid = defrag->root;
231 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes); 260 rb_erase(&defrag->rb_node, &fs_info->defrag_inodes);
232 261
233 if (btrfs_fs_closing(fs_info)) 262 if (btrfs_fs_closing(fs_info))
@@ -252,7 +281,7 @@ int btrfs_run_defrag_inodes(struct btrfs_fs_info *fs_info)
252 goto next; 281 goto next;
253 282
254 /* do a chunk of defrag */ 283 /* do a chunk of defrag */
255 BTRFS_I(inode)->in_defrag = 0; 284 clear_bit(BTRFS_INODE_IN_DEFRAG, &BTRFS_I(inode)->runtime_flags);
256 range.start = defrag->last_offset; 285 range.start = defrag->last_offset;
257 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid, 286 num_defrag = btrfs_defrag_file(inode, NULL, &range, defrag->transid,
258 defrag_batch); 287 defrag_batch);
@@ -1404,12 +1433,11 @@ static ssize_t btrfs_file_aio_write(struct kiocb *iocb,
1404 goto out; 1433 goto out;
1405 } 1434 }
1406 1435
1407 err = btrfs_update_time(file); 1436 err = file_update_time(file);
1408 if (err) { 1437 if (err) {
1409 mutex_unlock(&inode->i_mutex); 1438 mutex_unlock(&inode->i_mutex);
1410 goto out; 1439 goto out;
1411 } 1440 }
1412 BTRFS_I(inode)->sequence++;
1413 1441
1414 start_pos = round_down(pos, root->sectorsize); 1442 start_pos = round_down(pos, root->sectorsize);
1415 if (start_pos > i_size_read(inode)) { 1443 if (start_pos > i_size_read(inode)) {
@@ -1466,8 +1494,8 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
1466 * flush down new bytes that may have been written if the 1494 * flush down new bytes that may have been written if the
1467 * application were using truncate to replace a file in place. 1495 * application were using truncate to replace a file in place.
1468 */ 1496 */
1469 if (BTRFS_I(inode)->ordered_data_close) { 1497 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
1470 BTRFS_I(inode)->ordered_data_close = 0; 1498 &BTRFS_I(inode)->runtime_flags)) {
1471 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode); 1499 btrfs_add_ordered_operation(NULL, BTRFS_I(inode)->root, inode);
1472 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT) 1500 if (inode->i_size > BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT)
1473 filemap_flush(inode->i_mapping); 1501 filemap_flush(inode->i_mapping);
@@ -1498,14 +1526,15 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1498 1526
1499 trace_btrfs_sync_file(file, datasync); 1527 trace_btrfs_sync_file(file, datasync);
1500 1528
1501 ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
1502 if (ret)
1503 return ret;
1504 mutex_lock(&inode->i_mutex); 1529 mutex_lock(&inode->i_mutex);
1505 1530
1506 /* we wait first, since the writeback may change the inode */ 1531 /*
1532 * we wait first, since the writeback may change the inode, also wait
1533 * ordered range does a filemape_write_and_wait_range which is why we
1534 * don't do it above like other file systems.
1535 */
1507 root->log_batch++; 1536 root->log_batch++;
1508 btrfs_wait_ordered_range(inode, 0, (u64)-1); 1537 btrfs_wait_ordered_range(inode, start, end);
1509 root->log_batch++; 1538 root->log_batch++;
1510 1539
1511 /* 1540 /*
@@ -1523,7 +1552,8 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
1523 * syncing 1552 * syncing
1524 */ 1553 */
1525 smp_mb(); 1554 smp_mb();
1526 if (BTRFS_I(inode)->last_trans <= 1555 if (btrfs_inode_in_log(inode, root->fs_info->generation) ||
1556 BTRFS_I(inode)->last_trans <=
1527 root->fs_info->last_trans_committed) { 1557 root->fs_info->last_trans_committed) {
1528 BTRFS_I(inode)->last_trans = 0; 1558 BTRFS_I(inode)->last_trans = 0;
1529 mutex_unlock(&inode->i_mutex); 1559 mutex_unlock(&inode->i_mutex);
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index 202008ec367d..81296c57405a 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -33,6 +33,8 @@
33 33
34static int link_free_space(struct btrfs_free_space_ctl *ctl, 34static int link_free_space(struct btrfs_free_space_ctl *ctl,
35 struct btrfs_free_space *info); 35 struct btrfs_free_space *info);
36static void unlink_free_space(struct btrfs_free_space_ctl *ctl,
37 struct btrfs_free_space *info);
36 38
37static struct inode *__lookup_free_space_inode(struct btrfs_root *root, 39static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
38 struct btrfs_path *path, 40 struct btrfs_path *path,
@@ -75,7 +77,8 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
75 return ERR_PTR(-ENOENT); 77 return ERR_PTR(-ENOENT);
76 } 78 }
77 79
78 inode->i_mapping->flags &= ~__GFP_FS; 80 mapping_set_gfp_mask(inode->i_mapping,
81 mapping_gfp_mask(inode->i_mapping) & ~__GFP_FS);
79 82
80 return inode; 83 return inode;
81} 84}
@@ -365,7 +368,7 @@ static int io_ctl_prepare_pages(struct io_ctl *io_ctl, struct inode *inode,
365 368
366static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation) 369static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
367{ 370{
368 u64 *val; 371 __le64 *val;
369 372
370 io_ctl_map_page(io_ctl, 1); 373 io_ctl_map_page(io_ctl, 1);
371 374
@@ -388,7 +391,7 @@ static void io_ctl_set_generation(struct io_ctl *io_ctl, u64 generation)
388 391
389static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation) 392static int io_ctl_check_generation(struct io_ctl *io_ctl, u64 generation)
390{ 393{
391 u64 *gen; 394 __le64 *gen;
392 395
393 /* 396 /*
394 * Skip the crc area. If we don't check crcs then we just have a 64bit 397 * Skip the crc area. If we don't check crcs then we just have a 64bit
@@ -584,6 +587,44 @@ static int io_ctl_read_bitmap(struct io_ctl *io_ctl,
584 return 0; 587 return 0;
585} 588}
586 589
590/*
591 * Since we attach pinned extents after the fact we can have contiguous sections
592 * of free space that are split up in entries. This poses a problem with the
593 * tree logging stuff since it could have allocated across what appears to be 2
594 * entries since we would have merged the entries when adding the pinned extents
595 * back to the free space cache. So run through the space cache that we just
596 * loaded and merge contiguous entries. This will make the log replay stuff not
597 * blow up and it will make for nicer allocator behavior.
598 */
599static void merge_space_tree(struct btrfs_free_space_ctl *ctl)
600{
601 struct btrfs_free_space *e, *prev = NULL;
602 struct rb_node *n;
603
604again:
605 spin_lock(&ctl->tree_lock);
606 for (n = rb_first(&ctl->free_space_offset); n; n = rb_next(n)) {
607 e = rb_entry(n, struct btrfs_free_space, offset_index);
608 if (!prev)
609 goto next;
610 if (e->bitmap || prev->bitmap)
611 goto next;
612 if (prev->offset + prev->bytes == e->offset) {
613 unlink_free_space(ctl, prev);
614 unlink_free_space(ctl, e);
615 prev->bytes += e->bytes;
616 kmem_cache_free(btrfs_free_space_cachep, e);
617 link_free_space(ctl, prev);
618 prev = NULL;
619 spin_unlock(&ctl->tree_lock);
620 goto again;
621 }
622next:
623 prev = e;
624 }
625 spin_unlock(&ctl->tree_lock);
626}
627
587int __load_free_space_cache(struct btrfs_root *root, struct inode *inode, 628int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
588 struct btrfs_free_space_ctl *ctl, 629 struct btrfs_free_space_ctl *ctl,
589 struct btrfs_path *path, u64 offset) 630 struct btrfs_path *path, u64 offset)
@@ -726,6 +767,7 @@ int __load_free_space_cache(struct btrfs_root *root, struct inode *inode,
726 } 767 }
727 768
728 io_ctl_drop_pages(&io_ctl); 769 io_ctl_drop_pages(&io_ctl);
770 merge_space_tree(ctl);
729 ret = 1; 771 ret = 1;
730out: 772out:
731 io_ctl_free(&io_ctl); 773 io_ctl_free(&io_ctl);
@@ -972,9 +1014,7 @@ int __btrfs_write_out_cache(struct btrfs_root *root, struct inode *inode,
972 goto out; 1014 goto out;
973 1015
974 1016
975 ret = filemap_write_and_wait(inode->i_mapping); 1017 btrfs_wait_ordered_range(inode, 0, (u64)-1);
976 if (ret)
977 goto out;
978 1018
979 key.objectid = BTRFS_FREE_SPACE_OBJECTID; 1019 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
980 key.offset = offset; 1020 key.offset = offset;
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index ceb7b9c9edcc..d8bb0dbc4941 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -89,7 +89,7 @@ static unsigned char btrfs_type_by_mode[S_IFMT >> S_SHIFT] = {
89 89
90static int btrfs_setsize(struct inode *inode, loff_t newsize); 90static int btrfs_setsize(struct inode *inode, loff_t newsize);
91static int btrfs_truncate(struct inode *inode); 91static int btrfs_truncate(struct inode *inode);
92static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end); 92static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent);
93static noinline int cow_file_range(struct inode *inode, 93static noinline int cow_file_range(struct inode *inode,
94 struct page *locked_page, 94 struct page *locked_page,
95 u64 start, u64 end, int *page_started, 95 u64 start, u64 end, int *page_started,
@@ -257,10 +257,13 @@ static noinline int cow_file_range_inline(struct btrfs_trans_handle *trans,
257 ret = insert_inline_extent(trans, root, inode, start, 257 ret = insert_inline_extent(trans, root, inode, start,
258 inline_len, compressed_size, 258 inline_len, compressed_size,
259 compress_type, compressed_pages); 259 compress_type, compressed_pages);
260 if (ret) { 260 if (ret && ret != -ENOSPC) {
261 btrfs_abort_transaction(trans, root, ret); 261 btrfs_abort_transaction(trans, root, ret);
262 return ret; 262 return ret;
263 } else if (ret == -ENOSPC) {
264 return 1;
263 } 265 }
266
264 btrfs_delalloc_release_metadata(inode, end + 1 - start); 267 btrfs_delalloc_release_metadata(inode, end + 1 - start);
265 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0); 268 btrfs_drop_extent_cache(inode, start, aligned_end - 1, 0);
266 return 0; 269 return 0;
@@ -827,7 +830,7 @@ static noinline int cow_file_range(struct inode *inode,
827 if (IS_ERR(trans)) { 830 if (IS_ERR(trans)) {
828 extent_clear_unlock_delalloc(inode, 831 extent_clear_unlock_delalloc(inode,
829 &BTRFS_I(inode)->io_tree, 832 &BTRFS_I(inode)->io_tree,
830 start, end, NULL, 833 start, end, locked_page,
831 EXTENT_CLEAR_UNLOCK_PAGE | 834 EXTENT_CLEAR_UNLOCK_PAGE |
832 EXTENT_CLEAR_UNLOCK | 835 EXTENT_CLEAR_UNLOCK |
833 EXTENT_CLEAR_DELALLOC | 836 EXTENT_CLEAR_DELALLOC |
@@ -960,7 +963,7 @@ out:
960out_unlock: 963out_unlock:
961 extent_clear_unlock_delalloc(inode, 964 extent_clear_unlock_delalloc(inode,
962 &BTRFS_I(inode)->io_tree, 965 &BTRFS_I(inode)->io_tree,
963 start, end, NULL, 966 start, end, locked_page,
964 EXTENT_CLEAR_UNLOCK_PAGE | 967 EXTENT_CLEAR_UNLOCK_PAGE |
965 EXTENT_CLEAR_UNLOCK | 968 EXTENT_CLEAR_UNLOCK |
966 EXTENT_CLEAR_DELALLOC | 969 EXTENT_CLEAR_DELALLOC |
@@ -983,8 +986,10 @@ static noinline void async_cow_start(struct btrfs_work *work)
983 compress_file_range(async_cow->inode, async_cow->locked_page, 986 compress_file_range(async_cow->inode, async_cow->locked_page,
984 async_cow->start, async_cow->end, async_cow, 987 async_cow->start, async_cow->end, async_cow,
985 &num_added); 988 &num_added);
986 if (num_added == 0) 989 if (num_added == 0) {
990 btrfs_add_delayed_iput(async_cow->inode);
987 async_cow->inode = NULL; 991 async_cow->inode = NULL;
992 }
988} 993}
989 994
990/* 995/*
@@ -1017,6 +1022,8 @@ static noinline void async_cow_free(struct btrfs_work *work)
1017{ 1022{
1018 struct async_cow *async_cow; 1023 struct async_cow *async_cow;
1019 async_cow = container_of(work, struct async_cow, work); 1024 async_cow = container_of(work, struct async_cow, work);
1025 if (async_cow->inode)
1026 btrfs_add_delayed_iput(async_cow->inode);
1020 kfree(async_cow); 1027 kfree(async_cow);
1021} 1028}
1022 1029
@@ -1035,7 +1042,7 @@ static int cow_file_range_async(struct inode *inode, struct page *locked_page,
1035 while (start < end) { 1042 while (start < end) {
1036 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS); 1043 async_cow = kmalloc(sizeof(*async_cow), GFP_NOFS);
1037 BUG_ON(!async_cow); /* -ENOMEM */ 1044 BUG_ON(!async_cow); /* -ENOMEM */
1038 async_cow->inode = inode; 1045 async_cow->inode = igrab(inode);
1039 async_cow->root = root; 1046 async_cow->root = root;
1040 async_cow->locked_page = locked_page; 1047 async_cow->locked_page = locked_page;
1041 async_cow->start = start; 1048 async_cow->start = start;
@@ -1133,8 +1140,18 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1133 u64 ino = btrfs_ino(inode); 1140 u64 ino = btrfs_ino(inode);
1134 1141
1135 path = btrfs_alloc_path(); 1142 path = btrfs_alloc_path();
1136 if (!path) 1143 if (!path) {
1144 extent_clear_unlock_delalloc(inode,
1145 &BTRFS_I(inode)->io_tree,
1146 start, end, locked_page,
1147 EXTENT_CLEAR_UNLOCK_PAGE |
1148 EXTENT_CLEAR_UNLOCK |
1149 EXTENT_CLEAR_DELALLOC |
1150 EXTENT_CLEAR_DIRTY |
1151 EXTENT_SET_WRITEBACK |
1152 EXTENT_END_WRITEBACK);
1137 return -ENOMEM; 1153 return -ENOMEM;
1154 }
1138 1155
1139 nolock = btrfs_is_free_space_inode(root, inode); 1156 nolock = btrfs_is_free_space_inode(root, inode);
1140 1157
@@ -1144,6 +1161,15 @@ static noinline int run_delalloc_nocow(struct inode *inode,
1144 trans = btrfs_join_transaction(root); 1161 trans = btrfs_join_transaction(root);
1145 1162
1146 if (IS_ERR(trans)) { 1163 if (IS_ERR(trans)) {
1164 extent_clear_unlock_delalloc(inode,
1165 &BTRFS_I(inode)->io_tree,
1166 start, end, locked_page,
1167 EXTENT_CLEAR_UNLOCK_PAGE |
1168 EXTENT_CLEAR_UNLOCK |
1169 EXTENT_CLEAR_DELALLOC |
1170 EXTENT_CLEAR_DIRTY |
1171 EXTENT_SET_WRITEBACK |
1172 EXTENT_END_WRITEBACK);
1147 btrfs_free_path(path); 1173 btrfs_free_path(path);
1148 return PTR_ERR(trans); 1174 return PTR_ERR(trans);
1149 } 1175 }
@@ -1324,8 +1350,11 @@ out_check:
1324 } 1350 }
1325 btrfs_release_path(path); 1351 btrfs_release_path(path);
1326 1352
1327 if (cur_offset <= end && cow_start == (u64)-1) 1353 if (cur_offset <= end && cow_start == (u64)-1) {
1328 cow_start = cur_offset; 1354 cow_start = cur_offset;
1355 cur_offset = end;
1356 }
1357
1329 if (cow_start != (u64)-1) { 1358 if (cow_start != (u64)-1) {
1330 ret = cow_file_range(inode, locked_page, cow_start, end, 1359 ret = cow_file_range(inode, locked_page, cow_start, end,
1331 page_started, nr_written, 1); 1360 page_started, nr_written, 1);
@@ -1344,6 +1373,17 @@ error:
1344 if (!ret) 1373 if (!ret)
1345 ret = err; 1374 ret = err;
1346 1375
1376 if (ret && cur_offset < end)
1377 extent_clear_unlock_delalloc(inode,
1378 &BTRFS_I(inode)->io_tree,
1379 cur_offset, end, locked_page,
1380 EXTENT_CLEAR_UNLOCK_PAGE |
1381 EXTENT_CLEAR_UNLOCK |
1382 EXTENT_CLEAR_DELALLOC |
1383 EXTENT_CLEAR_DIRTY |
1384 EXTENT_SET_WRITEBACK |
1385 EXTENT_END_WRITEBACK);
1386
1347 btrfs_free_path(path); 1387 btrfs_free_path(path);
1348 return ret; 1388 return ret;
1349} 1389}
@@ -1358,20 +1398,23 @@ static int run_delalloc_range(struct inode *inode, struct page *locked_page,
1358 int ret; 1398 int ret;
1359 struct btrfs_root *root = BTRFS_I(inode)->root; 1399 struct btrfs_root *root = BTRFS_I(inode)->root;
1360 1400
1361 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) 1401 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATACOW) {
1362 ret = run_delalloc_nocow(inode, locked_page, start, end, 1402 ret = run_delalloc_nocow(inode, locked_page, start, end,
1363 page_started, 1, nr_written); 1403 page_started, 1, nr_written);
1364 else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) 1404 } else if (BTRFS_I(inode)->flags & BTRFS_INODE_PREALLOC) {
1365 ret = run_delalloc_nocow(inode, locked_page, start, end, 1405 ret = run_delalloc_nocow(inode, locked_page, start, end,
1366 page_started, 0, nr_written); 1406 page_started, 0, nr_written);
1367 else if (!btrfs_test_opt(root, COMPRESS) && 1407 } else if (!btrfs_test_opt(root, COMPRESS) &&
1368 !(BTRFS_I(inode)->force_compress) && 1408 !(BTRFS_I(inode)->force_compress) &&
1369 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) 1409 !(BTRFS_I(inode)->flags & BTRFS_INODE_COMPRESS)) {
1370 ret = cow_file_range(inode, locked_page, start, end, 1410 ret = cow_file_range(inode, locked_page, start, end,
1371 page_started, nr_written, 1); 1411 page_started, nr_written, 1);
1372 else 1412 } else {
1413 set_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
1414 &BTRFS_I(inode)->runtime_flags);
1373 ret = cow_file_range_async(inode, locked_page, start, end, 1415 ret = cow_file_range_async(inode, locked_page, start, end,
1374 page_started, nr_written); 1416 page_started, nr_written);
1417 }
1375 return ret; 1418 return ret;
1376} 1419}
1377 1420
@@ -1572,11 +1615,11 @@ static int btrfs_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
1572 if (btrfs_is_free_space_inode(root, inode)) 1615 if (btrfs_is_free_space_inode(root, inode))
1573 metadata = 2; 1616 metadata = 2;
1574 1617
1575 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1576 if (ret)
1577 return ret;
1578
1579 if (!(rw & REQ_WRITE)) { 1618 if (!(rw & REQ_WRITE)) {
1619 ret = btrfs_bio_wq_end_io(root->fs_info, bio, metadata);
1620 if (ret)
1621 return ret;
1622
1580 if (bio_flags & EXTENT_BIO_COMPRESSED) { 1623 if (bio_flags & EXTENT_BIO_COMPRESSED) {
1581 return btrfs_submit_compressed_read(inode, bio, 1624 return btrfs_submit_compressed_read(inode, bio,
1582 mirror_num, bio_flags); 1625 mirror_num, bio_flags);
@@ -1815,25 +1858,24 @@ out:
1815 * an ordered extent if the range of bytes in the file it covers are 1858 * an ordered extent if the range of bytes in the file it covers are
1816 * fully written. 1859 * fully written.
1817 */ 1860 */
1818static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end) 1861static int btrfs_finish_ordered_io(struct btrfs_ordered_extent *ordered_extent)
1819{ 1862{
1863 struct inode *inode = ordered_extent->inode;
1820 struct btrfs_root *root = BTRFS_I(inode)->root; 1864 struct btrfs_root *root = BTRFS_I(inode)->root;
1821 struct btrfs_trans_handle *trans = NULL; 1865 struct btrfs_trans_handle *trans = NULL;
1822 struct btrfs_ordered_extent *ordered_extent = NULL;
1823 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 1866 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
1824 struct extent_state *cached_state = NULL; 1867 struct extent_state *cached_state = NULL;
1825 int compress_type = 0; 1868 int compress_type = 0;
1826 int ret; 1869 int ret;
1827 bool nolock; 1870 bool nolock;
1828 1871
1829 ret = btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
1830 end - start + 1);
1831 if (!ret)
1832 return 0;
1833 BUG_ON(!ordered_extent); /* Logic error */
1834
1835 nolock = btrfs_is_free_space_inode(root, inode); 1872 nolock = btrfs_is_free_space_inode(root, inode);
1836 1873
1874 if (test_bit(BTRFS_ORDERED_IOERR, &ordered_extent->flags)) {
1875 ret = -EIO;
1876 goto out;
1877 }
1878
1837 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) { 1879 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered_extent->flags)) {
1838 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */ 1880 BUG_ON(!list_empty(&ordered_extent->list)); /* Logic error */
1839 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent); 1881 ret = btrfs_ordered_update_i_size(inode, 0, ordered_extent);
@@ -1889,12 +1931,10 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1889 ordered_extent->file_offset, 1931 ordered_extent->file_offset,
1890 ordered_extent->len); 1932 ordered_extent->len);
1891 } 1933 }
1892 unlock_extent_cached(io_tree, ordered_extent->file_offset, 1934
1893 ordered_extent->file_offset +
1894 ordered_extent->len - 1, &cached_state, GFP_NOFS);
1895 if (ret < 0) { 1935 if (ret < 0) {
1896 btrfs_abort_transaction(trans, root, ret); 1936 btrfs_abort_transaction(trans, root, ret);
1897 goto out; 1937 goto out_unlock;
1898 } 1938 }
1899 1939
1900 add_pending_csums(trans, inode, ordered_extent->file_offset, 1940 add_pending_csums(trans, inode, ordered_extent->file_offset,
@@ -1905,10 +1945,14 @@ static int btrfs_finish_ordered_io(struct inode *inode, u64 start, u64 end)
1905 ret = btrfs_update_inode_fallback(trans, root, inode); 1945 ret = btrfs_update_inode_fallback(trans, root, inode);
1906 if (ret) { /* -ENOMEM or corruption */ 1946 if (ret) { /* -ENOMEM or corruption */
1907 btrfs_abort_transaction(trans, root, ret); 1947 btrfs_abort_transaction(trans, root, ret);
1908 goto out; 1948 goto out_unlock;
1909 } 1949 }
1910 } 1950 }
1911 ret = 0; 1951 ret = 0;
1952out_unlock:
1953 unlock_extent_cached(io_tree, ordered_extent->file_offset,
1954 ordered_extent->file_offset +
1955 ordered_extent->len - 1, &cached_state, GFP_NOFS);
1912out: 1956out:
1913 if (root != root->fs_info->tree_root) 1957 if (root != root->fs_info->tree_root)
1914 btrfs_delalloc_release_metadata(inode, ordered_extent->len); 1958 btrfs_delalloc_release_metadata(inode, ordered_extent->len);
@@ -1919,26 +1963,57 @@ out:
1919 btrfs_end_transaction(trans, root); 1963 btrfs_end_transaction(trans, root);
1920 } 1964 }
1921 1965
1966 if (ret)
1967 clear_extent_uptodate(io_tree, ordered_extent->file_offset,
1968 ordered_extent->file_offset +
1969 ordered_extent->len - 1, NULL, GFP_NOFS);
1970
1971 /*
1972 * This needs to be dont to make sure anybody waiting knows we are done
1973 * upating everything for this ordered extent.
1974 */
1975 btrfs_remove_ordered_extent(inode, ordered_extent);
1976
1922 /* once for us */ 1977 /* once for us */
1923 btrfs_put_ordered_extent(ordered_extent); 1978 btrfs_put_ordered_extent(ordered_extent);
1924 /* once for the tree */ 1979 /* once for the tree */
1925 btrfs_put_ordered_extent(ordered_extent); 1980 btrfs_put_ordered_extent(ordered_extent);
1926 1981
1927 return 0; 1982 return ret;
1928out_unlock: 1983}
1929 unlock_extent_cached(io_tree, ordered_extent->file_offset, 1984
1930 ordered_extent->file_offset + 1985static void finish_ordered_fn(struct btrfs_work *work)
1931 ordered_extent->len - 1, &cached_state, GFP_NOFS); 1986{
1932 goto out; 1987 struct btrfs_ordered_extent *ordered_extent;
1988 ordered_extent = container_of(work, struct btrfs_ordered_extent, work);
1989 btrfs_finish_ordered_io(ordered_extent);
1933} 1990}
1934 1991
1935static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end, 1992static int btrfs_writepage_end_io_hook(struct page *page, u64 start, u64 end,
1936 struct extent_state *state, int uptodate) 1993 struct extent_state *state, int uptodate)
1937{ 1994{
1995 struct inode *inode = page->mapping->host;
1996 struct btrfs_root *root = BTRFS_I(inode)->root;
1997 struct btrfs_ordered_extent *ordered_extent = NULL;
1998 struct btrfs_workers *workers;
1999
1938 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate); 2000 trace_btrfs_writepage_end_io_hook(page, start, end, uptodate);
1939 2001
1940 ClearPagePrivate2(page); 2002 ClearPagePrivate2(page);
1941 return btrfs_finish_ordered_io(page->mapping->host, start, end); 2003 if (!btrfs_dec_test_ordered_pending(inode, &ordered_extent, start,
2004 end - start + 1, uptodate))
2005 return 0;
2006
2007 ordered_extent->work.func = finish_ordered_fn;
2008 ordered_extent->work.flags = 0;
2009
2010 if (btrfs_is_free_space_inode(root, inode))
2011 workers = &root->fs_info->endio_freespace_worker;
2012 else
2013 workers = &root->fs_info->endio_write_workers;
2014 btrfs_queue_worker(workers, &ordered_extent->work);
2015
2016 return 0;
1942} 2017}
1943 2018
1944/* 2019/*
@@ -2072,12 +2147,12 @@ void btrfs_orphan_commit_root(struct btrfs_trans_handle *trans,
2072 struct btrfs_block_rsv *block_rsv; 2147 struct btrfs_block_rsv *block_rsv;
2073 int ret; 2148 int ret;
2074 2149
2075 if (!list_empty(&root->orphan_list) || 2150 if (atomic_read(&root->orphan_inodes) ||
2076 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE) 2151 root->orphan_cleanup_state != ORPHAN_CLEANUP_DONE)
2077 return; 2152 return;
2078 2153
2079 spin_lock(&root->orphan_lock); 2154 spin_lock(&root->orphan_lock);
2080 if (!list_empty(&root->orphan_list)) { 2155 if (atomic_read(&root->orphan_inodes)) {
2081 spin_unlock(&root->orphan_lock); 2156 spin_unlock(&root->orphan_lock);
2082 return; 2157 return;
2083 } 2158 }
@@ -2134,8 +2209,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2134 block_rsv = NULL; 2209 block_rsv = NULL;
2135 } 2210 }
2136 2211
2137 if (list_empty(&BTRFS_I(inode)->i_orphan)) { 2212 if (!test_and_set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2138 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); 2213 &BTRFS_I(inode)->runtime_flags)) {
2139#if 0 2214#if 0
2140 /* 2215 /*
2141 * For proper ENOSPC handling, we should do orphan 2216 * For proper ENOSPC handling, we should do orphan
@@ -2148,12 +2223,12 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2148 insert = 1; 2223 insert = 1;
2149#endif 2224#endif
2150 insert = 1; 2225 insert = 1;
2226 atomic_dec(&root->orphan_inodes);
2151 } 2227 }
2152 2228
2153 if (!BTRFS_I(inode)->orphan_meta_reserved) { 2229 if (!test_and_set_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2154 BTRFS_I(inode)->orphan_meta_reserved = 1; 2230 &BTRFS_I(inode)->runtime_flags))
2155 reserve = 1; 2231 reserve = 1;
2156 }
2157 spin_unlock(&root->orphan_lock); 2232 spin_unlock(&root->orphan_lock);
2158 2233
2159 /* grab metadata reservation from transaction handle */ 2234 /* grab metadata reservation from transaction handle */
@@ -2166,6 +2241,8 @@ int btrfs_orphan_add(struct btrfs_trans_handle *trans, struct inode *inode)
2166 if (insert >= 1) { 2241 if (insert >= 1) {
2167 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode)); 2242 ret = btrfs_insert_orphan_item(trans, root, btrfs_ino(inode));
2168 if (ret && ret != -EEXIST) { 2243 if (ret && ret != -EEXIST) {
2244 clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2245 &BTRFS_I(inode)->runtime_flags);
2169 btrfs_abort_transaction(trans, root, ret); 2246 btrfs_abort_transaction(trans, root, ret);
2170 return ret; 2247 return ret;
2171 } 2248 }
@@ -2196,15 +2273,13 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2196 int ret = 0; 2273 int ret = 0;
2197 2274
2198 spin_lock(&root->orphan_lock); 2275 spin_lock(&root->orphan_lock);
2199 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 2276 if (test_and_clear_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2200 list_del_init(&BTRFS_I(inode)->i_orphan); 2277 &BTRFS_I(inode)->runtime_flags))
2201 delete_item = 1; 2278 delete_item = 1;
2202 }
2203 2279
2204 if (BTRFS_I(inode)->orphan_meta_reserved) { 2280 if (test_and_clear_bit(BTRFS_INODE_ORPHAN_META_RESERVED,
2205 BTRFS_I(inode)->orphan_meta_reserved = 0; 2281 &BTRFS_I(inode)->runtime_flags))
2206 release_rsv = 1; 2282 release_rsv = 1;
2207 }
2208 spin_unlock(&root->orphan_lock); 2283 spin_unlock(&root->orphan_lock);
2209 2284
2210 if (trans && delete_item) { 2285 if (trans && delete_item) {
@@ -2212,8 +2287,10 @@ int btrfs_orphan_del(struct btrfs_trans_handle *trans, struct inode *inode)
2212 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */ 2287 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
2213 } 2288 }
2214 2289
2215 if (release_rsv) 2290 if (release_rsv) {
2216 btrfs_orphan_release_metadata(inode); 2291 btrfs_orphan_release_metadata(inode);
2292 atomic_dec(&root->orphan_inodes);
2293 }
2217 2294
2218 return 0; 2295 return 0;
2219} 2296}
@@ -2341,6 +2418,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2341 ret = PTR_ERR(trans); 2418 ret = PTR_ERR(trans);
2342 goto out; 2419 goto out;
2343 } 2420 }
2421 printk(KERN_ERR "auto deleting %Lu\n",
2422 found_key.objectid);
2344 ret = btrfs_del_orphan_item(trans, root, 2423 ret = btrfs_del_orphan_item(trans, root,
2345 found_key.objectid); 2424 found_key.objectid);
2346 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */ 2425 BUG_ON(ret); /* -ENOMEM or corruption (JDM: Recheck) */
@@ -2352,9 +2431,8 @@ int btrfs_orphan_cleanup(struct btrfs_root *root)
2352 * add this inode to the orphan list so btrfs_orphan_del does 2431 * add this inode to the orphan list so btrfs_orphan_del does
2353 * the proper thing when we hit it 2432 * the proper thing when we hit it
2354 */ 2433 */
2355 spin_lock(&root->orphan_lock); 2434 set_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
2356 list_add(&BTRFS_I(inode)->i_orphan, &root->orphan_list); 2435 &BTRFS_I(inode)->runtime_flags);
2357 spin_unlock(&root->orphan_lock);
2358 2436
2359 /* if we have links, this was a truncate, lets do that */ 2437 /* if we have links, this was a truncate, lets do that */
2360 if (inode->i_nlink) { 2438 if (inode->i_nlink) {
@@ -2510,7 +2588,7 @@ static void btrfs_read_locked_inode(struct inode *inode)
2510 2588
2511 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item)); 2589 inode_set_bytes(inode, btrfs_inode_nbytes(leaf, inode_item));
2512 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item); 2590 BTRFS_I(inode)->generation = btrfs_inode_generation(leaf, inode_item);
2513 BTRFS_I(inode)->sequence = btrfs_inode_sequence(leaf, inode_item); 2591 inode->i_version = btrfs_inode_sequence(leaf, inode_item);
2514 inode->i_generation = BTRFS_I(inode)->generation; 2592 inode->i_generation = BTRFS_I(inode)->generation;
2515 inode->i_rdev = 0; 2593 inode->i_rdev = 0;
2516 rdev = btrfs_inode_rdev(leaf, inode_item); 2594 rdev = btrfs_inode_rdev(leaf, inode_item);
@@ -2594,7 +2672,7 @@ static void fill_inode_item(struct btrfs_trans_handle *trans,
2594 2672
2595 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode)); 2673 btrfs_set_inode_nbytes(leaf, item, inode_get_bytes(inode));
2596 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation); 2674 btrfs_set_inode_generation(leaf, item, BTRFS_I(inode)->generation);
2597 btrfs_set_inode_sequence(leaf, item, BTRFS_I(inode)->sequence); 2675 btrfs_set_inode_sequence(leaf, item, inode->i_version);
2598 btrfs_set_inode_transid(leaf, item, trans->transid); 2676 btrfs_set_inode_transid(leaf, item, trans->transid);
2599 btrfs_set_inode_rdev(leaf, item, inode->i_rdev); 2677 btrfs_set_inode_rdev(leaf, item, inode->i_rdev);
2600 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags); 2678 btrfs_set_inode_flags(leaf, item, BTRFS_I(inode)->flags);
@@ -2752,6 +2830,8 @@ err:
2752 goto out; 2830 goto out;
2753 2831
2754 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 2832 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
2833 inode_inc_iversion(inode);
2834 inode_inc_iversion(dir);
2755 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME; 2835 inode->i_ctime = dir->i_mtime = dir->i_ctime = CURRENT_TIME;
2756 btrfs_update_inode(trans, root, dir); 2836 btrfs_update_inode(trans, root, dir);
2757out: 2837out:
@@ -3089,6 +3169,7 @@ int btrfs_unlink_subvol(struct btrfs_trans_handle *trans,
3089 } 3169 }
3090 3170
3091 btrfs_i_size_write(dir, dir->i_size - name_len * 2); 3171 btrfs_i_size_write(dir, dir->i_size - name_len * 2);
3172 inode_inc_iversion(dir);
3092 dir->i_mtime = dir->i_ctime = CURRENT_TIME; 3173 dir->i_mtime = dir->i_ctime = CURRENT_TIME;
3093 ret = btrfs_update_inode(trans, root, dir); 3174 ret = btrfs_update_inode(trans, root, dir);
3094 if (ret) 3175 if (ret)
@@ -3607,7 +3688,8 @@ static int btrfs_setsize(struct inode *inode, loff_t newsize)
3607 * any new writes get down to disk quickly. 3688 * any new writes get down to disk quickly.
3608 */ 3689 */
3609 if (newsize == 0) 3690 if (newsize == 0)
3610 BTRFS_I(inode)->ordered_data_close = 1; 3691 set_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
3692 &BTRFS_I(inode)->runtime_flags);
3611 3693
3612 /* we don't support swapfiles, so vmtruncate shouldn't fail */ 3694 /* we don't support swapfiles, so vmtruncate shouldn't fail */
3613 truncate_setsize(inode, newsize); 3695 truncate_setsize(inode, newsize);
@@ -3638,6 +3720,7 @@ static int btrfs_setattr(struct dentry *dentry, struct iattr *attr)
3638 3720
3639 if (attr->ia_valid) { 3721 if (attr->ia_valid) {
3640 setattr_copy(inode, attr); 3722 setattr_copy(inode, attr);
3723 inode_inc_iversion(inode);
3641 err = btrfs_dirty_inode(inode); 3724 err = btrfs_dirty_inode(inode);
3642 3725
3643 if (!err && attr->ia_valid & ATTR_MODE) 3726 if (!err && attr->ia_valid & ATTR_MODE)
@@ -3671,7 +3754,8 @@ void btrfs_evict_inode(struct inode *inode)
3671 btrfs_wait_ordered_range(inode, 0, (u64)-1); 3754 btrfs_wait_ordered_range(inode, 0, (u64)-1);
3672 3755
3673 if (root->fs_info->log_root_recovering) { 3756 if (root->fs_info->log_root_recovering) {
3674 BUG_ON(!list_empty(&BTRFS_I(inode)->i_orphan)); 3757 BUG_ON(!test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
3758 &BTRFS_I(inode)->runtime_flags));
3675 goto no_delete; 3759 goto no_delete;
3676 } 3760 }
3677 3761
@@ -4066,7 +4150,7 @@ static struct inode *new_simple_dir(struct super_block *s,
4066 4150
4067 BTRFS_I(inode)->root = root; 4151 BTRFS_I(inode)->root = root;
4068 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key)); 4152 memcpy(&BTRFS_I(inode)->location, key, sizeof(*key));
4069 BTRFS_I(inode)->dummy_inode = 1; 4153 set_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags);
4070 4154
4071 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID; 4155 inode->i_ino = BTRFS_EMPTY_SUBVOL_DIR_OBJECTID;
4072 inode->i_op = &btrfs_dir_ro_inode_operations; 4156 inode->i_op = &btrfs_dir_ro_inode_operations;
@@ -4370,7 +4454,7 @@ int btrfs_write_inode(struct inode *inode, struct writeback_control *wbc)
4370 int ret = 0; 4454 int ret = 0;
4371 bool nolock = false; 4455 bool nolock = false;
4372 4456
4373 if (BTRFS_I(inode)->dummy_inode) 4457 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
4374 return 0; 4458 return 0;
4375 4459
4376 if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode)) 4460 if (btrfs_fs_closing(root->fs_info) && btrfs_is_free_space_inode(root, inode))
@@ -4403,7 +4487,7 @@ int btrfs_dirty_inode(struct inode *inode)
4403 struct btrfs_trans_handle *trans; 4487 struct btrfs_trans_handle *trans;
4404 int ret; 4488 int ret;
4405 4489
4406 if (BTRFS_I(inode)->dummy_inode) 4490 if (test_bit(BTRFS_INODE_DUMMY, &BTRFS_I(inode)->runtime_flags))
4407 return 0; 4491 return 0;
4408 4492
4409 trans = btrfs_join_transaction(root); 4493 trans = btrfs_join_transaction(root);
@@ -4431,46 +4515,18 @@ int btrfs_dirty_inode(struct inode *inode)
4431 * This is a copy of file_update_time. We need this so we can return error on 4515 * This is a copy of file_update_time. We need this so we can return error on
4432 * ENOSPC for updating the inode in the case of file write and mmap writes. 4516 * ENOSPC for updating the inode in the case of file write and mmap writes.
4433 */ 4517 */
4434int btrfs_update_time(struct file *file) 4518static int btrfs_update_time(struct inode *inode, struct timespec *now,
4519 int flags)
4435{ 4520{
4436 struct inode *inode = file->f_path.dentry->d_inode; 4521 if (flags & S_VERSION)
4437 struct timespec now;
4438 int ret;
4439 enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0;
4440
4441 /* First try to exhaust all avenues to not sync */
4442 if (IS_NOCMTIME(inode))
4443 return 0;
4444
4445 now = current_fs_time(inode->i_sb);
4446 if (!timespec_equal(&inode->i_mtime, &now))
4447 sync_it = S_MTIME;
4448
4449 if (!timespec_equal(&inode->i_ctime, &now))
4450 sync_it |= S_CTIME;
4451
4452 if (IS_I_VERSION(inode))
4453 sync_it |= S_VERSION;
4454
4455 if (!sync_it)
4456 return 0;
4457
4458 /* Finally allowed to write? Takes lock. */
4459 if (mnt_want_write_file(file))
4460 return 0;
4461
4462 /* Only change inode inside the lock region */
4463 if (sync_it & S_VERSION)
4464 inode_inc_iversion(inode); 4522 inode_inc_iversion(inode);
4465 if (sync_it & S_CTIME) 4523 if (flags & S_CTIME)
4466 inode->i_ctime = now; 4524 inode->i_ctime = *now;
4467 if (sync_it & S_MTIME) 4525 if (flags & S_MTIME)
4468 inode->i_mtime = now; 4526 inode->i_mtime = *now;
4469 ret = btrfs_dirty_inode(inode); 4527 if (flags & S_ATIME)
4470 if (!ret) 4528 inode->i_atime = *now;
4471 mark_inode_dirty_sync(inode); 4529 return btrfs_dirty_inode(inode);
4472 mnt_drop_write(file->f_path.mnt);
4473 return ret;
4474} 4530}
4475 4531
4476/* 4532/*
@@ -4730,6 +4786,7 @@ int btrfs_add_link(struct btrfs_trans_handle *trans,
4730 4786
4731 btrfs_i_size_write(parent_inode, parent_inode->i_size + 4787 btrfs_i_size_write(parent_inode, parent_inode->i_size +
4732 name_len * 2); 4788 name_len * 2);
4789 inode_inc_iversion(parent_inode);
4733 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME; 4790 parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
4734 ret = btrfs_update_inode(trans, root, parent_inode); 4791 ret = btrfs_update_inode(trans, root, parent_inode);
4735 if (ret) 4792 if (ret)
@@ -4937,6 +4994,7 @@ static int btrfs_link(struct dentry *old_dentry, struct inode *dir,
4937 } 4994 }
4938 4995
4939 btrfs_inc_nlink(inode); 4996 btrfs_inc_nlink(inode);
4997 inode_inc_iversion(inode);
4940 inode->i_ctime = CURRENT_TIME; 4998 inode->i_ctime = CURRENT_TIME;
4941 ihold(inode); 4999 ihold(inode);
4942 5000
@@ -5903,9 +5961,7 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
5903 struct btrfs_dio_private *dip = bio->bi_private; 5961 struct btrfs_dio_private *dip = bio->bi_private;
5904 struct inode *inode = dip->inode; 5962 struct inode *inode = dip->inode;
5905 struct btrfs_root *root = BTRFS_I(inode)->root; 5963 struct btrfs_root *root = BTRFS_I(inode)->root;
5906 struct btrfs_trans_handle *trans;
5907 struct btrfs_ordered_extent *ordered = NULL; 5964 struct btrfs_ordered_extent *ordered = NULL;
5908 struct extent_state *cached_state = NULL;
5909 u64 ordered_offset = dip->logical_offset; 5965 u64 ordered_offset = dip->logical_offset;
5910 u64 ordered_bytes = dip->bytes; 5966 u64 ordered_bytes = dip->bytes;
5911 int ret; 5967 int ret;
@@ -5915,73 +5971,14 @@ static void btrfs_endio_direct_write(struct bio *bio, int err)
5915again: 5971again:
5916 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered, 5972 ret = btrfs_dec_test_first_ordered_pending(inode, &ordered,
5917 &ordered_offset, 5973 &ordered_offset,
5918 ordered_bytes); 5974 ordered_bytes, !err);
5919 if (!ret) 5975 if (!ret)
5920 goto out_test; 5976 goto out_test;
5921 5977
5922 BUG_ON(!ordered); 5978 ordered->work.func = finish_ordered_fn;
5923 5979 ordered->work.flags = 0;
5924 trans = btrfs_join_transaction(root); 5980 btrfs_queue_worker(&root->fs_info->endio_write_workers,
5925 if (IS_ERR(trans)) { 5981 &ordered->work);
5926 err = -ENOMEM;
5927 goto out;
5928 }
5929 trans->block_rsv = &root->fs_info->delalloc_block_rsv;
5930
5931 if (test_bit(BTRFS_ORDERED_NOCOW, &ordered->flags)) {
5932 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5933 if (!ret)
5934 err = btrfs_update_inode_fallback(trans, root, inode);
5935 goto out;
5936 }
5937
5938 lock_extent_bits(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5939 ordered->file_offset + ordered->len - 1, 0,
5940 &cached_state);
5941
5942 if (test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags)) {
5943 ret = btrfs_mark_extent_written(trans, inode,
5944 ordered->file_offset,
5945 ordered->file_offset +
5946 ordered->len);
5947 if (ret) {
5948 err = ret;
5949 goto out_unlock;
5950 }
5951 } else {
5952 ret = insert_reserved_file_extent(trans, inode,
5953 ordered->file_offset,
5954 ordered->start,
5955 ordered->disk_len,
5956 ordered->len,
5957 ordered->len,
5958 0, 0, 0,
5959 BTRFS_FILE_EXTENT_REG);
5960 unpin_extent_cache(&BTRFS_I(inode)->extent_tree,
5961 ordered->file_offset, ordered->len);
5962 if (ret) {
5963 err = ret;
5964 WARN_ON(1);
5965 goto out_unlock;
5966 }
5967 }
5968
5969 add_pending_csums(trans, inode, ordered->file_offset, &ordered->list);
5970 ret = btrfs_ordered_update_i_size(inode, 0, ordered);
5971 if (!ret || !test_bit(BTRFS_ORDERED_PREALLOC, &ordered->flags))
5972 btrfs_update_inode_fallback(trans, root, inode);
5973 ret = 0;
5974out_unlock:
5975 unlock_extent_cached(&BTRFS_I(inode)->io_tree, ordered->file_offset,
5976 ordered->file_offset + ordered->len - 1,
5977 &cached_state, GFP_NOFS);
5978out:
5979 btrfs_delalloc_release_metadata(inode, ordered->len);
5980 btrfs_end_transaction(trans, root);
5981 ordered_offset = ordered->file_offset + ordered->len;
5982 btrfs_put_ordered_extent(ordered);
5983 btrfs_put_ordered_extent(ordered);
5984
5985out_test: 5982out_test:
5986 /* 5983 /*
5987 * our bio might span multiple ordered extents. If we haven't 5984 * our bio might span multiple ordered extents. If we haven't
@@ -5990,12 +5987,12 @@ out_test:
5990 if (ordered_offset < dip->logical_offset + dip->bytes) { 5987 if (ordered_offset < dip->logical_offset + dip->bytes) {
5991 ordered_bytes = dip->logical_offset + dip->bytes - 5988 ordered_bytes = dip->logical_offset + dip->bytes -
5992 ordered_offset; 5989 ordered_offset;
5990 ordered = NULL;
5993 goto again; 5991 goto again;
5994 } 5992 }
5995out_done: 5993out_done:
5996 bio->bi_private = dip->private; 5994 bio->bi_private = dip->private;
5997 5995
5998 kfree(dip->csums);
5999 kfree(dip); 5996 kfree(dip);
6000 5997
6001 /* If we had an error make sure to clear the uptodate flag */ 5998 /* If we had an error make sure to clear the uptodate flag */
@@ -6063,9 +6060,12 @@ static inline int __btrfs_submit_dio_bio(struct bio *bio, struct inode *inode,
6063 int ret; 6060 int ret;
6064 6061
6065 bio_get(bio); 6062 bio_get(bio);
6066 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0); 6063
6067 if (ret) 6064 if (!write) {
6068 goto err; 6065 ret = btrfs_bio_wq_end_io(root->fs_info, bio, 0);
6066 if (ret)
6067 goto err;
6068 }
6069 6069
6070 if (skip_sum) 6070 if (skip_sum)
6071 goto map; 6071 goto map;
@@ -6485,13 +6485,13 @@ static int btrfs_releasepage(struct page *page, gfp_t gfp_flags)
6485 6485
6486static void btrfs_invalidatepage(struct page *page, unsigned long offset) 6486static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6487{ 6487{
6488 struct inode *inode = page->mapping->host;
6488 struct extent_io_tree *tree; 6489 struct extent_io_tree *tree;
6489 struct btrfs_ordered_extent *ordered; 6490 struct btrfs_ordered_extent *ordered;
6490 struct extent_state *cached_state = NULL; 6491 struct extent_state *cached_state = NULL;
6491 u64 page_start = page_offset(page); 6492 u64 page_start = page_offset(page);
6492 u64 page_end = page_start + PAGE_CACHE_SIZE - 1; 6493 u64 page_end = page_start + PAGE_CACHE_SIZE - 1;
6493 6494
6494
6495 /* 6495 /*
6496 * we have the page locked, so new writeback can't start, 6496 * we have the page locked, so new writeback can't start,
6497 * and the dirty bit won't be cleared while we are here. 6497 * and the dirty bit won't be cleared while we are here.
@@ -6501,13 +6501,13 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6501 */ 6501 */
6502 wait_on_page_writeback(page); 6502 wait_on_page_writeback(page);
6503 6503
6504 tree = &BTRFS_I(page->mapping->host)->io_tree; 6504 tree = &BTRFS_I(inode)->io_tree;
6505 if (offset) { 6505 if (offset) {
6506 btrfs_releasepage(page, GFP_NOFS); 6506 btrfs_releasepage(page, GFP_NOFS);
6507 return; 6507 return;
6508 } 6508 }
6509 lock_extent_bits(tree, page_start, page_end, 0, &cached_state); 6509 lock_extent_bits(tree, page_start, page_end, 0, &cached_state);
6510 ordered = btrfs_lookup_ordered_extent(page->mapping->host, 6510 ordered = btrfs_lookup_ordered_extent(inode,
6511 page_offset(page)); 6511 page_offset(page));
6512 if (ordered) { 6512 if (ordered) {
6513 /* 6513 /*
@@ -6522,9 +6522,10 @@ static void btrfs_invalidatepage(struct page *page, unsigned long offset)
6522 * whoever cleared the private bit is responsible 6522 * whoever cleared the private bit is responsible
6523 * for the finish_ordered_io 6523 * for the finish_ordered_io
6524 */ 6524 */
6525 if (TestClearPagePrivate2(page)) { 6525 if (TestClearPagePrivate2(page) &&
6526 btrfs_finish_ordered_io(page->mapping->host, 6526 btrfs_dec_test_ordered_pending(inode, &ordered, page_start,
6527 page_start, page_end); 6527 PAGE_CACHE_SIZE, 1)) {
6528 btrfs_finish_ordered_io(ordered);
6528 } 6529 }
6529 btrfs_put_ordered_extent(ordered); 6530 btrfs_put_ordered_extent(ordered);
6530 cached_state = NULL; 6531 cached_state = NULL;
@@ -6576,7 +6577,7 @@ int btrfs_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
6576 6577
6577 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE); 6578 ret = btrfs_delalloc_reserve_space(inode, PAGE_CACHE_SIZE);
6578 if (!ret) { 6579 if (!ret) {
6579 ret = btrfs_update_time(vma->vm_file); 6580 ret = file_update_time(vma->vm_file);
6580 reserved = 1; 6581 reserved = 1;
6581 } 6582 }
6582 if (ret) { 6583 if (ret) {
@@ -6771,7 +6772,8 @@ static int btrfs_truncate(struct inode *inode)
6771 * using truncate to replace the contents of the file will 6772 * using truncate to replace the contents of the file will
6772 * end up with a zero length file after a crash. 6773 * end up with a zero length file after a crash.
6773 */ 6774 */
6774 if (inode->i_size == 0 && BTRFS_I(inode)->ordered_data_close) 6775 if (inode->i_size == 0 && test_bit(BTRFS_INODE_ORDERED_DATA_CLOSE,
6776 &BTRFS_I(inode)->runtime_flags))
6775 btrfs_add_ordered_operation(trans, root, inode); 6777 btrfs_add_ordered_operation(trans, root, inode);
6776 6778
6777 while (1) { 6779 while (1) {
@@ -6894,7 +6896,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6894 ei->root = NULL; 6896 ei->root = NULL;
6895 ei->space_info = NULL; 6897 ei->space_info = NULL;
6896 ei->generation = 0; 6898 ei->generation = 0;
6897 ei->sequence = 0;
6898 ei->last_trans = 0; 6899 ei->last_trans = 0;
6899 ei->last_sub_trans = 0; 6900 ei->last_sub_trans = 0;
6900 ei->logged_trans = 0; 6901 ei->logged_trans = 0;
@@ -6909,11 +6910,7 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6909 ei->outstanding_extents = 0; 6910 ei->outstanding_extents = 0;
6910 ei->reserved_extents = 0; 6911 ei->reserved_extents = 0;
6911 6912
6912 ei->ordered_data_close = 0; 6913 ei->runtime_flags = 0;
6913 ei->orphan_meta_reserved = 0;
6914 ei->dummy_inode = 0;
6915 ei->in_defrag = 0;
6916 ei->delalloc_meta_reserved = 0;
6917 ei->force_compress = BTRFS_COMPRESS_NONE; 6914 ei->force_compress = BTRFS_COMPRESS_NONE;
6918 6915
6919 ei->delayed_node = NULL; 6916 ei->delayed_node = NULL;
@@ -6927,7 +6924,6 @@ struct inode *btrfs_alloc_inode(struct super_block *sb)
6927 mutex_init(&ei->log_mutex); 6924 mutex_init(&ei->log_mutex);
6928 mutex_init(&ei->delalloc_mutex); 6925 mutex_init(&ei->delalloc_mutex);
6929 btrfs_ordered_inode_tree_init(&ei->ordered_tree); 6926 btrfs_ordered_inode_tree_init(&ei->ordered_tree);
6930 INIT_LIST_HEAD(&ei->i_orphan);
6931 INIT_LIST_HEAD(&ei->delalloc_inodes); 6927 INIT_LIST_HEAD(&ei->delalloc_inodes);
6932 INIT_LIST_HEAD(&ei->ordered_operations); 6928 INIT_LIST_HEAD(&ei->ordered_operations);
6933 RB_CLEAR_NODE(&ei->rb_node); 6929 RB_CLEAR_NODE(&ei->rb_node);
@@ -6972,13 +6968,12 @@ void btrfs_destroy_inode(struct inode *inode)
6972 spin_unlock(&root->fs_info->ordered_extent_lock); 6968 spin_unlock(&root->fs_info->ordered_extent_lock);
6973 } 6969 }
6974 6970
6975 spin_lock(&root->orphan_lock); 6971 if (test_bit(BTRFS_INODE_HAS_ORPHAN_ITEM,
6976 if (!list_empty(&BTRFS_I(inode)->i_orphan)) { 6972 &BTRFS_I(inode)->runtime_flags)) {
6977 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n", 6973 printk(KERN_INFO "BTRFS: inode %llu still on the orphan list\n",
6978 (unsigned long long)btrfs_ino(inode)); 6974 (unsigned long long)btrfs_ino(inode));
6979 list_del_init(&BTRFS_I(inode)->i_orphan); 6975 atomic_dec(&root->orphan_inodes);
6980 } 6976 }
6981 spin_unlock(&root->orphan_lock);
6982 6977
6983 while (1) { 6978 while (1) {
6984 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1); 6979 ordered = btrfs_lookup_first_ordered_extent(inode, (u64)-1);
@@ -7099,10 +7094,13 @@ static void fixup_inode_flags(struct inode *dir, struct inode *inode)
7099 else 7094 else
7100 b_inode->flags &= ~BTRFS_INODE_NODATACOW; 7095 b_inode->flags &= ~BTRFS_INODE_NODATACOW;
7101 7096
7102 if (b_dir->flags & BTRFS_INODE_COMPRESS) 7097 if (b_dir->flags & BTRFS_INODE_COMPRESS) {
7103 b_inode->flags |= BTRFS_INODE_COMPRESS; 7098 b_inode->flags |= BTRFS_INODE_COMPRESS;
7104 else 7099 b_inode->flags &= ~BTRFS_INODE_NOCOMPRESS;
7105 b_inode->flags &= ~BTRFS_INODE_COMPRESS; 7100 } else {
7101 b_inode->flags &= ~(BTRFS_INODE_COMPRESS |
7102 BTRFS_INODE_NOCOMPRESS);
7103 }
7106} 7104}
7107 7105
7108static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry, 7106static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
@@ -7193,6 +7191,9 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7193 if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode)) 7191 if (new_inode && new_inode->i_size && S_ISREG(old_inode->i_mode))
7194 btrfs_add_ordered_operation(trans, root, old_inode); 7192 btrfs_add_ordered_operation(trans, root, old_inode);
7195 7193
7194 inode_inc_iversion(old_dir);
7195 inode_inc_iversion(new_dir);
7196 inode_inc_iversion(old_inode);
7196 old_dir->i_ctime = old_dir->i_mtime = ctime; 7197 old_dir->i_ctime = old_dir->i_mtime = ctime;
7197 new_dir->i_ctime = new_dir->i_mtime = ctime; 7198 new_dir->i_ctime = new_dir->i_mtime = ctime;
7198 old_inode->i_ctime = ctime; 7199 old_inode->i_ctime = ctime;
@@ -7219,6 +7220,7 @@ static int btrfs_rename(struct inode *old_dir, struct dentry *old_dentry,
7219 } 7220 }
7220 7221
7221 if (new_inode) { 7222 if (new_inode) {
7223 inode_inc_iversion(new_inode);
7222 new_inode->i_ctime = CURRENT_TIME; 7224 new_inode->i_ctime = CURRENT_TIME;
7223 if (unlikely(btrfs_ino(new_inode) == 7225 if (unlikely(btrfs_ino(new_inode) ==
7224 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) { 7226 BTRFS_EMPTY_SUBVOL_DIR_OBJECTID)) {
@@ -7490,6 +7492,7 @@ static int __btrfs_prealloc_file_range(struct inode *inode, int mode,
7490 cur_offset += ins.offset; 7492 cur_offset += ins.offset;
7491 *alloc_hint = ins.objectid + ins.offset; 7493 *alloc_hint = ins.objectid + ins.offset;
7492 7494
7495 inode_inc_iversion(inode);
7493 inode->i_ctime = CURRENT_TIME; 7496 inode->i_ctime = CURRENT_TIME;
7494 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC; 7497 BTRFS_I(inode)->flags |= BTRFS_INODE_PREALLOC;
7495 if (!(mode & FALLOC_FL_KEEP_SIZE) && 7498 if (!(mode & FALLOC_FL_KEEP_SIZE) &&
@@ -7647,6 +7650,7 @@ static const struct inode_operations btrfs_file_inode_operations = {
7647 .permission = btrfs_permission, 7650 .permission = btrfs_permission,
7648 .fiemap = btrfs_fiemap, 7651 .fiemap = btrfs_fiemap,
7649 .get_acl = btrfs_get_acl, 7652 .get_acl = btrfs_get_acl,
7653 .update_time = btrfs_update_time,
7650}; 7654};
7651static const struct inode_operations btrfs_special_inode_operations = { 7655static const struct inode_operations btrfs_special_inode_operations = {
7652 .getattr = btrfs_getattr, 7656 .getattr = btrfs_getattr,
@@ -7657,6 +7661,7 @@ static const struct inode_operations btrfs_special_inode_operations = {
7657 .listxattr = btrfs_listxattr, 7661 .listxattr = btrfs_listxattr,
7658 .removexattr = btrfs_removexattr, 7662 .removexattr = btrfs_removexattr,
7659 .get_acl = btrfs_get_acl, 7663 .get_acl = btrfs_get_acl,
7664 .update_time = btrfs_update_time,
7660}; 7665};
7661static const struct inode_operations btrfs_symlink_inode_operations = { 7666static const struct inode_operations btrfs_symlink_inode_operations = {
7662 .readlink = generic_readlink, 7667 .readlink = generic_readlink,
@@ -7670,6 +7675,7 @@ static const struct inode_operations btrfs_symlink_inode_operations = {
7670 .listxattr = btrfs_listxattr, 7675 .listxattr = btrfs_listxattr,
7671 .removexattr = btrfs_removexattr, 7676 .removexattr = btrfs_removexattr,
7672 .get_acl = btrfs_get_acl, 7677 .get_acl = btrfs_get_acl,
7678 .update_time = btrfs_update_time,
7673}; 7679};
7674 7680
7675const struct dentry_operations btrfs_dentry_operations = { 7681const struct dentry_operations btrfs_dentry_operations = {
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 14f8e1faa46e..0e92e5763005 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -52,6 +52,7 @@
52#include "locking.h" 52#include "locking.h"
53#include "inode-map.h" 53#include "inode-map.h"
54#include "backref.h" 54#include "backref.h"
55#include "rcu-string.h"
55 56
56/* Mask out flags that are inappropriate for the given type of inode. */ 57/* Mask out flags that are inappropriate for the given type of inode. */
57static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags) 58static inline __u32 btrfs_mask_flags(umode_t mode, __u32 flags)
@@ -261,6 +262,7 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
261 } 262 }
262 263
263 btrfs_update_iflags(inode); 264 btrfs_update_iflags(inode);
265 inode_inc_iversion(inode);
264 inode->i_ctime = CURRENT_TIME; 266 inode->i_ctime = CURRENT_TIME;
265 ret = btrfs_update_inode(trans, root, inode); 267 ret = btrfs_update_inode(trans, root, inode);
266 268
@@ -367,7 +369,7 @@ static noinline int create_subvol(struct btrfs_root *root,
367 return PTR_ERR(trans); 369 return PTR_ERR(trans);
368 370
369 leaf = btrfs_alloc_free_block(trans, root, root->leafsize, 371 leaf = btrfs_alloc_free_block(trans, root, root->leafsize,
370 0, objectid, NULL, 0, 0, 0, 0); 372 0, objectid, NULL, 0, 0, 0);
371 if (IS_ERR(leaf)) { 373 if (IS_ERR(leaf)) {
372 ret = PTR_ERR(leaf); 374 ret = PTR_ERR(leaf);
373 goto fail; 375 goto fail;
@@ -784,39 +786,57 @@ none:
784 return -ENOENT; 786 return -ENOENT;
785} 787}
786 788
787/* 789static struct extent_map *defrag_lookup_extent(struct inode *inode, u64 start)
788 * Validaty check of prev em and next em:
789 * 1) no prev/next em
790 * 2) prev/next em is an hole/inline extent
791 */
792static int check_adjacent_extents(struct inode *inode, struct extent_map *em)
793{ 790{
794 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree; 791 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
795 struct extent_map *prev = NULL, *next = NULL; 792 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
796 int ret = 0; 793 struct extent_map *em;
794 u64 len = PAGE_CACHE_SIZE;
797 795
796 /*
797 * hopefully we have this extent in the tree already, try without
798 * the full extent lock
799 */
798 read_lock(&em_tree->lock); 800 read_lock(&em_tree->lock);
799 prev = lookup_extent_mapping(em_tree, em->start - 1, (u64)-1); 801 em = lookup_extent_mapping(em_tree, start, len);
800 next = lookup_extent_mapping(em_tree, em->start + em->len, (u64)-1);
801 read_unlock(&em_tree->lock); 802 read_unlock(&em_tree->lock);
802 803
803 if ((!prev || prev->block_start >= EXTENT_MAP_LAST_BYTE) && 804 if (!em) {
804 (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)) 805 /* get the big lock and read metadata off disk */
805 ret = 1; 806 lock_extent(io_tree, start, start + len - 1);
806 free_extent_map(prev); 807 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
807 free_extent_map(next); 808 unlock_extent(io_tree, start, start + len - 1);
809
810 if (IS_ERR(em))
811 return NULL;
812 }
813
814 return em;
815}
808 816
817static bool defrag_check_next_extent(struct inode *inode, struct extent_map *em)
818{
819 struct extent_map *next;
820 bool ret = true;
821
822 /* this is the last extent */
823 if (em->start + em->len >= i_size_read(inode))
824 return false;
825
826 next = defrag_lookup_extent(inode, em->start + em->len);
827 if (!next || next->block_start >= EXTENT_MAP_LAST_BYTE)
828 ret = false;
829
830 free_extent_map(next);
809 return ret; 831 return ret;
810} 832}
811 833
812static int should_defrag_range(struct inode *inode, u64 start, u64 len, 834static int should_defrag_range(struct inode *inode, u64 start, int thresh,
813 int thresh, u64 *last_len, u64 *skip, 835 u64 *last_len, u64 *skip, u64 *defrag_end)
814 u64 *defrag_end)
815{ 836{
816 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree; 837 struct extent_map *em;
817 struct extent_map *em = NULL;
818 struct extent_map_tree *em_tree = &BTRFS_I(inode)->extent_tree;
819 int ret = 1; 838 int ret = 1;
839 bool next_mergeable = true;
820 840
821 /* 841 /*
822 * make sure that once we start defragging an extent, we keep on 842 * make sure that once we start defragging an extent, we keep on
@@ -827,23 +847,9 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
827 847
828 *skip = 0; 848 *skip = 0;
829 849
830 /* 850 em = defrag_lookup_extent(inode, start);
831 * hopefully we have this extent in the tree already, try without 851 if (!em)
832 * the full extent lock 852 return 0;
833 */
834 read_lock(&em_tree->lock);
835 em = lookup_extent_mapping(em_tree, start, len);
836 read_unlock(&em_tree->lock);
837
838 if (!em) {
839 /* get the big lock and read metadata off disk */
840 lock_extent(io_tree, start, start + len - 1);
841 em = btrfs_get_extent(inode, NULL, 0, start, len, 0);
842 unlock_extent(io_tree, start, start + len - 1);
843
844 if (IS_ERR(em))
845 return 0;
846 }
847 853
848 /* this will cover holes, and inline extents */ 854 /* this will cover holes, and inline extents */
849 if (em->block_start >= EXTENT_MAP_LAST_BYTE) { 855 if (em->block_start >= EXTENT_MAP_LAST_BYTE) {
@@ -851,18 +857,15 @@ static int should_defrag_range(struct inode *inode, u64 start, u64 len,
851 goto out; 857 goto out;
852 } 858 }
853 859
854 /* If we have nothing to merge with us, just skip. */ 860 next_mergeable = defrag_check_next_extent(inode, em);
855 if (check_adjacent_extents(inode, em)) {
856 ret = 0;
857 goto out;
858 }
859 861
860 /* 862 /*
861 * we hit a real extent, if it is big don't bother defragging it again 863 * we hit a real extent, if it is big or the next extent is not a
864 * real extent, don't bother defragging it
862 */ 865 */
863 if ((*last_len == 0 || *last_len >= thresh) && em->len >= thresh) 866 if ((*last_len == 0 || *last_len >= thresh) &&
867 (em->len >= thresh || !next_mergeable))
864 ret = 0; 868 ret = 0;
865
866out: 869out:
867 /* 870 /*
868 * last_len ends up being a counter of how many bytes we've defragged. 871 * last_len ends up being a counter of how many bytes we've defragged.
@@ -1141,8 +1144,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
1141 break; 1144 break;
1142 1145
1143 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT, 1146 if (!should_defrag_range(inode, (u64)i << PAGE_CACHE_SHIFT,
1144 PAGE_CACHE_SIZE, extent_thresh, 1147 extent_thresh, &last_len, &skip,
1145 &last_len, &skip, &defrag_end)) { 1148 &defrag_end)) {
1146 unsigned long next; 1149 unsigned long next;
1147 /* 1150 /*
1148 * the should_defrag function tells us how much to skip 1151 * the should_defrag function tells us how much to skip
@@ -1303,6 +1306,14 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1303 ret = -EINVAL; 1306 ret = -EINVAL;
1304 goto out_free; 1307 goto out_free;
1305 } 1308 }
1309 if (device->fs_devices && device->fs_devices->seeding) {
1310 printk(KERN_INFO "btrfs: resizer unable to apply on "
1311 "seeding device %llu\n",
1312 (unsigned long long)devid);
1313 ret = -EINVAL;
1314 goto out_free;
1315 }
1316
1306 if (!strcmp(sizestr, "max")) 1317 if (!strcmp(sizestr, "max"))
1307 new_size = device->bdev->bd_inode->i_size; 1318 new_size = device->bdev->bd_inode->i_size;
1308 else { 1319 else {
@@ -1344,8 +1355,9 @@ static noinline int btrfs_ioctl_resize(struct btrfs_root *root,
1344 do_div(new_size, root->sectorsize); 1355 do_div(new_size, root->sectorsize);
1345 new_size *= root->sectorsize; 1356 new_size *= root->sectorsize;
1346 1357
1347 printk(KERN_INFO "btrfs: new size for %s is %llu\n", 1358 printk_in_rcu(KERN_INFO "btrfs: new size for %s is %llu\n",
1348 device->name, (unsigned long long)new_size); 1359 rcu_str_deref(device->name),
1360 (unsigned long long)new_size);
1349 1361
1350 if (new_size > old_size) { 1362 if (new_size > old_size) {
1351 trans = btrfs_start_transaction(root, 0); 1363 trans = btrfs_start_transaction(root, 0);
@@ -2262,10 +2274,17 @@ static long btrfs_ioctl_dev_info(struct btrfs_root *root, void __user *arg)
2262 di_args->bytes_used = dev->bytes_used; 2274 di_args->bytes_used = dev->bytes_used;
2263 di_args->total_bytes = dev->total_bytes; 2275 di_args->total_bytes = dev->total_bytes;
2264 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid)); 2276 memcpy(di_args->uuid, dev->uuid, sizeof(di_args->uuid));
2265 if (dev->name) 2277 if (dev->name) {
2266 strncpy(di_args->path, dev->name, sizeof(di_args->path)); 2278 struct rcu_string *name;
2267 else 2279
2280 rcu_read_lock();
2281 name = rcu_dereference(dev->name);
2282 strncpy(di_args->path, name->str, sizeof(di_args->path));
2283 rcu_read_unlock();
2284 di_args->path[sizeof(di_args->path) - 1] = 0;
2285 } else {
2268 di_args->path[0] = '\0'; 2286 di_args->path[0] = '\0';
2287 }
2269 2288
2270out: 2289out:
2271 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args))) 2290 if (ret == 0 && copy_to_user(arg, di_args, sizeof(*di_args)))
@@ -2622,6 +2641,7 @@ static noinline long btrfs_ioctl_clone(struct file *file, unsigned long srcfd,
2622 btrfs_mark_buffer_dirty(leaf); 2641 btrfs_mark_buffer_dirty(leaf);
2623 btrfs_release_path(path); 2642 btrfs_release_path(path);
2624 2643
2644 inode_inc_iversion(inode);
2625 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 2645 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2626 2646
2627 /* 2647 /*
@@ -2914,7 +2934,7 @@ long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
2914 up_read(&info->groups_sem); 2934 up_read(&info->groups_sem);
2915 } 2935 }
2916 2936
2917 user_dest = (struct btrfs_ioctl_space_info *) 2937 user_dest = (struct btrfs_ioctl_space_info __user *)
2918 (arg + sizeof(struct btrfs_ioctl_space_args)); 2938 (arg + sizeof(struct btrfs_ioctl_space_args));
2919 2939
2920 if (copy_to_user(user_dest, dest_orig, alloc_size)) 2940 if (copy_to_user(user_dest, dest_orig, alloc_size))
@@ -3042,6 +3062,28 @@ static long btrfs_ioctl_scrub_progress(struct btrfs_root *root,
3042 return ret; 3062 return ret;
3043} 3063}
3044 3064
3065static long btrfs_ioctl_get_dev_stats(struct btrfs_root *root,
3066 void __user *arg, int reset_after_read)
3067{
3068 struct btrfs_ioctl_get_dev_stats *sa;
3069 int ret;
3070
3071 if (reset_after_read && !capable(CAP_SYS_ADMIN))
3072 return -EPERM;
3073
3074 sa = memdup_user(arg, sizeof(*sa));
3075 if (IS_ERR(sa))
3076 return PTR_ERR(sa);
3077
3078 ret = btrfs_get_dev_stats(root, sa, reset_after_read);
3079
3080 if (copy_to_user(arg, sa, sizeof(*sa)))
3081 ret = -EFAULT;
3082
3083 kfree(sa);
3084 return ret;
3085}
3086
3045static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg) 3087static long btrfs_ioctl_ino_to_path(struct btrfs_root *root, void __user *arg)
3046{ 3088{
3047 int ret = 0; 3089 int ret = 0;
@@ -3212,8 +3254,9 @@ void update_ioctl_balance_args(struct btrfs_fs_info *fs_info, int lock,
3212 } 3254 }
3213} 3255}
3214 3256
3215static long btrfs_ioctl_balance(struct btrfs_root *root, void __user *arg) 3257static long btrfs_ioctl_balance(struct file *file, void __user *arg)
3216{ 3258{
3259 struct btrfs_root *root = BTRFS_I(fdentry(file)->d_inode)->root;
3217 struct btrfs_fs_info *fs_info = root->fs_info; 3260 struct btrfs_fs_info *fs_info = root->fs_info;
3218 struct btrfs_ioctl_balance_args *bargs; 3261 struct btrfs_ioctl_balance_args *bargs;
3219 struct btrfs_balance_control *bctl; 3262 struct btrfs_balance_control *bctl;
@@ -3225,6 +3268,10 @@ static long btrfs_ioctl_balance(struct btrfs_root *root, void __user *arg)
3225 if (fs_info->sb->s_flags & MS_RDONLY) 3268 if (fs_info->sb->s_flags & MS_RDONLY)
3226 return -EROFS; 3269 return -EROFS;
3227 3270
3271 ret = mnt_want_write(file->f_path.mnt);
3272 if (ret)
3273 return ret;
3274
3228 mutex_lock(&fs_info->volume_mutex); 3275 mutex_lock(&fs_info->volume_mutex);
3229 mutex_lock(&fs_info->balance_mutex); 3276 mutex_lock(&fs_info->balance_mutex);
3230 3277
@@ -3291,6 +3338,7 @@ out_bargs:
3291out: 3338out:
3292 mutex_unlock(&fs_info->balance_mutex); 3339 mutex_unlock(&fs_info->balance_mutex);
3293 mutex_unlock(&fs_info->volume_mutex); 3340 mutex_unlock(&fs_info->volume_mutex);
3341 mnt_drop_write(file->f_path.mnt);
3294 return ret; 3342 return ret;
3295} 3343}
3296 3344
@@ -3386,7 +3434,7 @@ long btrfs_ioctl(struct file *file, unsigned int
3386 case BTRFS_IOC_DEV_INFO: 3434 case BTRFS_IOC_DEV_INFO:
3387 return btrfs_ioctl_dev_info(root, argp); 3435 return btrfs_ioctl_dev_info(root, argp);
3388 case BTRFS_IOC_BALANCE: 3436 case BTRFS_IOC_BALANCE:
3389 return btrfs_ioctl_balance(root, NULL); 3437 return btrfs_ioctl_balance(file, NULL);
3390 case BTRFS_IOC_CLONE: 3438 case BTRFS_IOC_CLONE:
3391 return btrfs_ioctl_clone(file, arg, 0, 0, 0); 3439 return btrfs_ioctl_clone(file, arg, 0, 0, 0);
3392 case BTRFS_IOC_CLONE_RANGE: 3440 case BTRFS_IOC_CLONE_RANGE:
@@ -3419,11 +3467,15 @@ long btrfs_ioctl(struct file *file, unsigned int
3419 case BTRFS_IOC_SCRUB_PROGRESS: 3467 case BTRFS_IOC_SCRUB_PROGRESS:
3420 return btrfs_ioctl_scrub_progress(root, argp); 3468 return btrfs_ioctl_scrub_progress(root, argp);
3421 case BTRFS_IOC_BALANCE_V2: 3469 case BTRFS_IOC_BALANCE_V2:
3422 return btrfs_ioctl_balance(root, argp); 3470 return btrfs_ioctl_balance(file, argp);
3423 case BTRFS_IOC_BALANCE_CTL: 3471 case BTRFS_IOC_BALANCE_CTL:
3424 return btrfs_ioctl_balance_ctl(root, arg); 3472 return btrfs_ioctl_balance_ctl(root, arg);
3425 case BTRFS_IOC_BALANCE_PROGRESS: 3473 case BTRFS_IOC_BALANCE_PROGRESS:
3426 return btrfs_ioctl_balance_progress(root, argp); 3474 return btrfs_ioctl_balance_progress(root, argp);
3475 case BTRFS_IOC_GET_DEV_STATS:
3476 return btrfs_ioctl_get_dev_stats(root, argp, 0);
3477 case BTRFS_IOC_GET_AND_RESET_DEV_STATS:
3478 return btrfs_ioctl_get_dev_stats(root, argp, 1);
3427 } 3479 }
3428 3480
3429 return -ENOTTY; 3481 return -ENOTTY;
diff --git a/fs/btrfs/ioctl.h b/fs/btrfs/ioctl.h
index 086e6bdae1c4..497c530724cf 100644
--- a/fs/btrfs/ioctl.h
+++ b/fs/btrfs/ioctl.h
@@ -266,6 +266,35 @@ struct btrfs_ioctl_logical_ino_args {
266 __u64 inodes; 266 __u64 inodes;
267}; 267};
268 268
269enum btrfs_dev_stat_values {
270 /* disk I/O failure stats */
271 BTRFS_DEV_STAT_WRITE_ERRS, /* EIO or EREMOTEIO from lower layers */
272 BTRFS_DEV_STAT_READ_ERRS, /* EIO or EREMOTEIO from lower layers */
273 BTRFS_DEV_STAT_FLUSH_ERRS, /* EIO or EREMOTEIO from lower layers */
274
275 /* stats for indirect indications for I/O failures */
276 BTRFS_DEV_STAT_CORRUPTION_ERRS, /* checksum error, bytenr error or
277 * contents is illegal: this is an
278 * indication that the block was damaged
279 * during read or write, or written to
280 * wrong location or read from wrong
281 * location */
282 BTRFS_DEV_STAT_GENERATION_ERRS, /* an indication that blocks have not
283 * been written */
284
285 BTRFS_DEV_STAT_VALUES_MAX
286};
287
288struct btrfs_ioctl_get_dev_stats {
289 __u64 devid; /* in */
290 __u64 nr_items; /* in/out */
291
292 /* out values: */
293 __u64 values[BTRFS_DEV_STAT_VALUES_MAX];
294
295 __u64 unused[128 - 2 - BTRFS_DEV_STAT_VALUES_MAX]; /* pad to 1k */
296};
297
269#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \ 298#define BTRFS_IOC_SNAP_CREATE _IOW(BTRFS_IOCTL_MAGIC, 1, \
270 struct btrfs_ioctl_vol_args) 299 struct btrfs_ioctl_vol_args)
271#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \ 300#define BTRFS_IOC_DEFRAG _IOW(BTRFS_IOCTL_MAGIC, 2, \
@@ -330,5 +359,9 @@ struct btrfs_ioctl_logical_ino_args {
330 struct btrfs_ioctl_ino_path_args) 359 struct btrfs_ioctl_ino_path_args)
331#define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \ 360#define BTRFS_IOC_LOGICAL_INO _IOWR(BTRFS_IOCTL_MAGIC, 36, \
332 struct btrfs_ioctl_ino_path_args) 361 struct btrfs_ioctl_ino_path_args)
362#define BTRFS_IOC_GET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 52, \
363 struct btrfs_ioctl_get_dev_stats)
364#define BTRFS_IOC_GET_AND_RESET_DEV_STATS _IOWR(BTRFS_IOCTL_MAGIC, 53, \
365 struct btrfs_ioctl_get_dev_stats)
333 366
334#endif 367#endif
diff --git a/fs/btrfs/ordered-data.c b/fs/btrfs/ordered-data.c
index bbf6d0d9aebe..643335a4fe3c 100644
--- a/fs/btrfs/ordered-data.c
+++ b/fs/btrfs/ordered-data.c
@@ -196,7 +196,7 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
196 entry->len = len; 196 entry->len = len;
197 entry->disk_len = disk_len; 197 entry->disk_len = disk_len;
198 entry->bytes_left = len; 198 entry->bytes_left = len;
199 entry->inode = inode; 199 entry->inode = igrab(inode);
200 entry->compress_type = compress_type; 200 entry->compress_type = compress_type;
201 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE) 201 if (type != BTRFS_ORDERED_IO_DONE && type != BTRFS_ORDERED_COMPLETE)
202 set_bit(type, &entry->flags); 202 set_bit(type, &entry->flags);
@@ -212,12 +212,12 @@ static int __btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
212 212
213 trace_btrfs_ordered_extent_add(inode, entry); 213 trace_btrfs_ordered_extent_add(inode, entry);
214 214
215 spin_lock(&tree->lock); 215 spin_lock_irq(&tree->lock);
216 node = tree_insert(&tree->tree, file_offset, 216 node = tree_insert(&tree->tree, file_offset,
217 &entry->rb_node); 217 &entry->rb_node);
218 if (node) 218 if (node)
219 ordered_data_tree_panic(inode, -EEXIST, file_offset); 219 ordered_data_tree_panic(inode, -EEXIST, file_offset);
220 spin_unlock(&tree->lock); 220 spin_unlock_irq(&tree->lock);
221 221
222 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock); 222 spin_lock(&BTRFS_I(inode)->root->fs_info->ordered_extent_lock);
223 list_add_tail(&entry->root_extent_list, 223 list_add_tail(&entry->root_extent_list,
@@ -264,9 +264,9 @@ void btrfs_add_ordered_sum(struct inode *inode,
264 struct btrfs_ordered_inode_tree *tree; 264 struct btrfs_ordered_inode_tree *tree;
265 265
266 tree = &BTRFS_I(inode)->ordered_tree; 266 tree = &BTRFS_I(inode)->ordered_tree;
267 spin_lock(&tree->lock); 267 spin_lock_irq(&tree->lock);
268 list_add_tail(&sum->list, &entry->list); 268 list_add_tail(&sum->list, &entry->list);
269 spin_unlock(&tree->lock); 269 spin_unlock_irq(&tree->lock);
270} 270}
271 271
272/* 272/*
@@ -283,18 +283,19 @@ void btrfs_add_ordered_sum(struct inode *inode,
283 */ 283 */
284int btrfs_dec_test_first_ordered_pending(struct inode *inode, 284int btrfs_dec_test_first_ordered_pending(struct inode *inode,
285 struct btrfs_ordered_extent **cached, 285 struct btrfs_ordered_extent **cached,
286 u64 *file_offset, u64 io_size) 286 u64 *file_offset, u64 io_size, int uptodate)
287{ 287{
288 struct btrfs_ordered_inode_tree *tree; 288 struct btrfs_ordered_inode_tree *tree;
289 struct rb_node *node; 289 struct rb_node *node;
290 struct btrfs_ordered_extent *entry = NULL; 290 struct btrfs_ordered_extent *entry = NULL;
291 int ret; 291 int ret;
292 unsigned long flags;
292 u64 dec_end; 293 u64 dec_end;
293 u64 dec_start; 294 u64 dec_start;
294 u64 to_dec; 295 u64 to_dec;
295 296
296 tree = &BTRFS_I(inode)->ordered_tree; 297 tree = &BTRFS_I(inode)->ordered_tree;
297 spin_lock(&tree->lock); 298 spin_lock_irqsave(&tree->lock, flags);
298 node = tree_search(tree, *file_offset); 299 node = tree_search(tree, *file_offset);
299 if (!node) { 300 if (!node) {
300 ret = 1; 301 ret = 1;
@@ -323,6 +324,9 @@ int btrfs_dec_test_first_ordered_pending(struct inode *inode,
323 (unsigned long long)to_dec); 324 (unsigned long long)to_dec);
324 } 325 }
325 entry->bytes_left -= to_dec; 326 entry->bytes_left -= to_dec;
327 if (!uptodate)
328 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
329
326 if (entry->bytes_left == 0) 330 if (entry->bytes_left == 0)
327 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 331 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
328 else 332 else
@@ -332,7 +336,7 @@ out:
332 *cached = entry; 336 *cached = entry;
333 atomic_inc(&entry->refs); 337 atomic_inc(&entry->refs);
334 } 338 }
335 spin_unlock(&tree->lock); 339 spin_unlock_irqrestore(&tree->lock, flags);
336 return ret == 0; 340 return ret == 0;
337} 341}
338 342
@@ -347,15 +351,21 @@ out:
347 */ 351 */
348int btrfs_dec_test_ordered_pending(struct inode *inode, 352int btrfs_dec_test_ordered_pending(struct inode *inode,
349 struct btrfs_ordered_extent **cached, 353 struct btrfs_ordered_extent **cached,
350 u64 file_offset, u64 io_size) 354 u64 file_offset, u64 io_size, int uptodate)
351{ 355{
352 struct btrfs_ordered_inode_tree *tree; 356 struct btrfs_ordered_inode_tree *tree;
353 struct rb_node *node; 357 struct rb_node *node;
354 struct btrfs_ordered_extent *entry = NULL; 358 struct btrfs_ordered_extent *entry = NULL;
359 unsigned long flags;
355 int ret; 360 int ret;
356 361
357 tree = &BTRFS_I(inode)->ordered_tree; 362 tree = &BTRFS_I(inode)->ordered_tree;
358 spin_lock(&tree->lock); 363 spin_lock_irqsave(&tree->lock, flags);
364 if (cached && *cached) {
365 entry = *cached;
366 goto have_entry;
367 }
368
359 node = tree_search(tree, file_offset); 369 node = tree_search(tree, file_offset);
360 if (!node) { 370 if (!node) {
361 ret = 1; 371 ret = 1;
@@ -363,6 +373,7 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
363 } 373 }
364 374
365 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 375 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
376have_entry:
366 if (!offset_in_entry(entry, file_offset)) { 377 if (!offset_in_entry(entry, file_offset)) {
367 ret = 1; 378 ret = 1;
368 goto out; 379 goto out;
@@ -374,6 +385,9 @@ int btrfs_dec_test_ordered_pending(struct inode *inode,
374 (unsigned long long)io_size); 385 (unsigned long long)io_size);
375 } 386 }
376 entry->bytes_left -= io_size; 387 entry->bytes_left -= io_size;
388 if (!uptodate)
389 set_bit(BTRFS_ORDERED_IOERR, &entry->flags);
390
377 if (entry->bytes_left == 0) 391 if (entry->bytes_left == 0)
378 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags); 392 ret = test_and_set_bit(BTRFS_ORDERED_IO_DONE, &entry->flags);
379 else 393 else
@@ -383,7 +397,7 @@ out:
383 *cached = entry; 397 *cached = entry;
384 atomic_inc(&entry->refs); 398 atomic_inc(&entry->refs);
385 } 399 }
386 spin_unlock(&tree->lock); 400 spin_unlock_irqrestore(&tree->lock, flags);
387 return ret == 0; 401 return ret == 0;
388} 402}
389 403
@@ -399,6 +413,8 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
399 trace_btrfs_ordered_extent_put(entry->inode, entry); 413 trace_btrfs_ordered_extent_put(entry->inode, entry);
400 414
401 if (atomic_dec_and_test(&entry->refs)) { 415 if (atomic_dec_and_test(&entry->refs)) {
416 if (entry->inode)
417 btrfs_add_delayed_iput(entry->inode);
402 while (!list_empty(&entry->list)) { 418 while (!list_empty(&entry->list)) {
403 cur = entry->list.next; 419 cur = entry->list.next;
404 sum = list_entry(cur, struct btrfs_ordered_sum, list); 420 sum = list_entry(cur, struct btrfs_ordered_sum, list);
@@ -411,21 +427,22 @@ void btrfs_put_ordered_extent(struct btrfs_ordered_extent *entry)
411 427
412/* 428/*
413 * remove an ordered extent from the tree. No references are dropped 429 * remove an ordered extent from the tree. No references are dropped
414 * and you must wake_up entry->wait. You must hold the tree lock 430 * and waiters are woken up.
415 * while you call this function.
416 */ 431 */
417static void __btrfs_remove_ordered_extent(struct inode *inode, 432void btrfs_remove_ordered_extent(struct inode *inode,
418 struct btrfs_ordered_extent *entry) 433 struct btrfs_ordered_extent *entry)
419{ 434{
420 struct btrfs_ordered_inode_tree *tree; 435 struct btrfs_ordered_inode_tree *tree;
421 struct btrfs_root *root = BTRFS_I(inode)->root; 436 struct btrfs_root *root = BTRFS_I(inode)->root;
422 struct rb_node *node; 437 struct rb_node *node;
423 438
424 tree = &BTRFS_I(inode)->ordered_tree; 439 tree = &BTRFS_I(inode)->ordered_tree;
440 spin_lock_irq(&tree->lock);
425 node = &entry->rb_node; 441 node = &entry->rb_node;
426 rb_erase(node, &tree->tree); 442 rb_erase(node, &tree->tree);
427 tree->last = NULL; 443 tree->last = NULL;
428 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags); 444 set_bit(BTRFS_ORDERED_COMPLETE, &entry->flags);
445 spin_unlock_irq(&tree->lock);
429 446
430 spin_lock(&root->fs_info->ordered_extent_lock); 447 spin_lock(&root->fs_info->ordered_extent_lock);
431 list_del_init(&entry->root_extent_list); 448 list_del_init(&entry->root_extent_list);
@@ -442,21 +459,6 @@ static void __btrfs_remove_ordered_extent(struct inode *inode,
442 list_del_init(&BTRFS_I(inode)->ordered_operations); 459 list_del_init(&BTRFS_I(inode)->ordered_operations);
443 } 460 }
444 spin_unlock(&root->fs_info->ordered_extent_lock); 461 spin_unlock(&root->fs_info->ordered_extent_lock);
445}
446
447/*
448 * remove an ordered extent from the tree. No references are dropped
449 * but any waiters are woken.
450 */
451void btrfs_remove_ordered_extent(struct inode *inode,
452 struct btrfs_ordered_extent *entry)
453{
454 struct btrfs_ordered_inode_tree *tree;
455
456 tree = &BTRFS_I(inode)->ordered_tree;
457 spin_lock(&tree->lock);
458 __btrfs_remove_ordered_extent(inode, entry);
459 spin_unlock(&tree->lock);
460 wake_up(&entry->wait); 462 wake_up(&entry->wait);
461} 463}
462 464
@@ -621,17 +623,29 @@ void btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
621 if (orig_end > INT_LIMIT(loff_t)) 623 if (orig_end > INT_LIMIT(loff_t))
622 orig_end = INT_LIMIT(loff_t); 624 orig_end = INT_LIMIT(loff_t);
623 } 625 }
624again: 626
625 /* start IO across the range first to instantiate any delalloc 627 /* start IO across the range first to instantiate any delalloc
626 * extents 628 * extents
627 */ 629 */
628 filemap_fdatawrite_range(inode->i_mapping, start, orig_end); 630 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
629 631
630 /* The compression code will leave pages locked but return from 632 /*
631 * writepage without setting the page writeback. Starting again 633 * So with compression we will find and lock a dirty page and clear the
632 * with WB_SYNC_ALL will end up waiting for the IO to actually start. 634 * first one as dirty, setup an async extent, and immediately return
635 * with the entire range locked but with nobody actually marked with
636 * writeback. So we can't just filemap_write_and_wait_range() and
637 * expect it to work since it will just kick off a thread to do the
638 * actual work. So we need to call filemap_fdatawrite_range _again_
639 * since it will wait on the page lock, which won't be unlocked until
640 * after the pages have been marked as writeback and so we're good to go
641 * from there. We have to do this otherwise we'll miss the ordered
642 * extents and that results in badness. Please Josef, do not think you
643 * know better and pull this out at some point in the future, it is
644 * right and you are wrong.
633 */ 645 */
634 filemap_fdatawrite_range(inode->i_mapping, start, orig_end); 646 if (test_bit(BTRFS_INODE_HAS_ASYNC_EXTENT,
647 &BTRFS_I(inode)->runtime_flags))
648 filemap_fdatawrite_range(inode->i_mapping, start, orig_end);
635 649
636 filemap_fdatawait_range(inode->i_mapping, start, orig_end); 650 filemap_fdatawait_range(inode->i_mapping, start, orig_end);
637 651
@@ -657,11 +671,6 @@ again:
657 break; 671 break;
658 end--; 672 end--;
659 } 673 }
660 if (found || test_range_bit(&BTRFS_I(inode)->io_tree, start, orig_end,
661 EXTENT_DELALLOC, 0, NULL)) {
662 schedule_timeout(1);
663 goto again;
664 }
665} 674}
666 675
667/* 676/*
@@ -676,7 +685,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
676 struct btrfs_ordered_extent *entry = NULL; 685 struct btrfs_ordered_extent *entry = NULL;
677 686
678 tree = &BTRFS_I(inode)->ordered_tree; 687 tree = &BTRFS_I(inode)->ordered_tree;
679 spin_lock(&tree->lock); 688 spin_lock_irq(&tree->lock);
680 node = tree_search(tree, file_offset); 689 node = tree_search(tree, file_offset);
681 if (!node) 690 if (!node)
682 goto out; 691 goto out;
@@ -687,7 +696,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_extent(struct inode *inode,
687 if (entry) 696 if (entry)
688 atomic_inc(&entry->refs); 697 atomic_inc(&entry->refs);
689out: 698out:
690 spin_unlock(&tree->lock); 699 spin_unlock_irq(&tree->lock);
691 return entry; 700 return entry;
692} 701}
693 702
@@ -703,7 +712,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
703 struct btrfs_ordered_extent *entry = NULL; 712 struct btrfs_ordered_extent *entry = NULL;
704 713
705 tree = &BTRFS_I(inode)->ordered_tree; 714 tree = &BTRFS_I(inode)->ordered_tree;
706 spin_lock(&tree->lock); 715 spin_lock_irq(&tree->lock);
707 node = tree_search(tree, file_offset); 716 node = tree_search(tree, file_offset);
708 if (!node) { 717 if (!node) {
709 node = tree_search(tree, file_offset + len); 718 node = tree_search(tree, file_offset + len);
@@ -728,7 +737,7 @@ struct btrfs_ordered_extent *btrfs_lookup_ordered_range(struct inode *inode,
728out: 737out:
729 if (entry) 738 if (entry)
730 atomic_inc(&entry->refs); 739 atomic_inc(&entry->refs);
731 spin_unlock(&tree->lock); 740 spin_unlock_irq(&tree->lock);
732 return entry; 741 return entry;
733} 742}
734 743
@@ -744,7 +753,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
744 struct btrfs_ordered_extent *entry = NULL; 753 struct btrfs_ordered_extent *entry = NULL;
745 754
746 tree = &BTRFS_I(inode)->ordered_tree; 755 tree = &BTRFS_I(inode)->ordered_tree;
747 spin_lock(&tree->lock); 756 spin_lock_irq(&tree->lock);
748 node = tree_search(tree, file_offset); 757 node = tree_search(tree, file_offset);
749 if (!node) 758 if (!node)
750 goto out; 759 goto out;
@@ -752,7 +761,7 @@ btrfs_lookup_first_ordered_extent(struct inode *inode, u64 file_offset)
752 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node); 761 entry = rb_entry(node, struct btrfs_ordered_extent, rb_node);
753 atomic_inc(&entry->refs); 762 atomic_inc(&entry->refs);
754out: 763out:
755 spin_unlock(&tree->lock); 764 spin_unlock_irq(&tree->lock);
756 return entry; 765 return entry;
757} 766}
758 767
@@ -764,7 +773,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
764 struct btrfs_ordered_extent *ordered) 773 struct btrfs_ordered_extent *ordered)
765{ 774{
766 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree; 775 struct btrfs_ordered_inode_tree *tree = &BTRFS_I(inode)->ordered_tree;
767 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
768 u64 disk_i_size; 776 u64 disk_i_size;
769 u64 new_i_size; 777 u64 new_i_size;
770 u64 i_size_test; 778 u64 i_size_test;
@@ -779,7 +787,7 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
779 else 787 else
780 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize); 788 offset = ALIGN(offset, BTRFS_I(inode)->root->sectorsize);
781 789
782 spin_lock(&tree->lock); 790 spin_lock_irq(&tree->lock);
783 disk_i_size = BTRFS_I(inode)->disk_i_size; 791 disk_i_size = BTRFS_I(inode)->disk_i_size;
784 792
785 /* truncate file */ 793 /* truncate file */
@@ -798,14 +806,6 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
798 } 806 }
799 807
800 /* 808 /*
801 * we can't update the disk_isize if there are delalloc bytes
802 * between disk_i_size and this ordered extent
803 */
804 if (test_range_bit(io_tree, disk_i_size, offset - 1,
805 EXTENT_DELALLOC, 0, NULL)) {
806 goto out;
807 }
808 /*
809 * walk backward from this ordered extent to disk_i_size. 809 * walk backward from this ordered extent to disk_i_size.
810 * if we find an ordered extent then we can't update disk i_size 810 * if we find an ordered extent then we can't update disk i_size
811 * yet 811 * yet
@@ -825,15 +825,18 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
825 } 825 }
826 node = prev; 826 node = prev;
827 } 827 }
828 while (node) { 828 for (; node; node = rb_prev(node)) {
829 test = rb_entry(node, struct btrfs_ordered_extent, rb_node); 829 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
830
831 /* We treat this entry as if it doesnt exist */
832 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
833 continue;
830 if (test->file_offset + test->len <= disk_i_size) 834 if (test->file_offset + test->len <= disk_i_size)
831 break; 835 break;
832 if (test->file_offset >= i_size) 836 if (test->file_offset >= i_size)
833 break; 837 break;
834 if (test->file_offset >= disk_i_size) 838 if (test->file_offset >= disk_i_size)
835 goto out; 839 goto out;
836 node = rb_prev(node);
837 } 840 }
838 new_i_size = min_t(u64, offset, i_size); 841 new_i_size = min_t(u64, offset, i_size);
839 842
@@ -851,43 +854,49 @@ int btrfs_ordered_update_i_size(struct inode *inode, u64 offset,
851 else 854 else
852 node = rb_first(&tree->tree); 855 node = rb_first(&tree->tree);
853 } 856 }
854 i_size_test = 0; 857
855 if (node) { 858 /*
856 /* 859 * We are looking for an area between our current extent and the next
857 * do we have an area where IO might have finished 860 * ordered extent to update the i_size to. There are 3 cases here
858 * between our ordered extent and the next one. 861 *
859 */ 862 * 1) We don't actually have anything and we can update to i_size.
863 * 2) We have stuff but they already did their i_size update so again we
864 * can just update to i_size.
865 * 3) We have an outstanding ordered extent so the most we can update
866 * our disk_i_size to is the start of the next offset.
867 */
868 i_size_test = i_size;
869 for (; node; node = rb_next(node)) {
860 test = rb_entry(node, struct btrfs_ordered_extent, rb_node); 870 test = rb_entry(node, struct btrfs_ordered_extent, rb_node);
861 if (test->file_offset > offset) 871
872 if (test_bit(BTRFS_ORDERED_UPDATED_ISIZE, &test->flags))
873 continue;
874 if (test->file_offset > offset) {
862 i_size_test = test->file_offset; 875 i_size_test = test->file_offset;
863 } else { 876 break;
864 i_size_test = i_size; 877 }
865 } 878 }
866 879
867 /* 880 /*
868 * i_size_test is the end of a region after this ordered 881 * i_size_test is the end of a region after this ordered
869 * extent where there are no ordered extents. As long as there 882 * extent where there are no ordered extents, we can safely set
870 * are no delalloc bytes in this area, it is safe to update 883 * disk_i_size to this.
871 * disk_i_size to the end of the region.
872 */ 884 */
873 if (i_size_test > offset && 885 if (i_size_test > offset)
874 !test_range_bit(io_tree, offset, i_size_test - 1,
875 EXTENT_DELALLOC, 0, NULL)) {
876 new_i_size = min_t(u64, i_size_test, i_size); 886 new_i_size = min_t(u64, i_size_test, i_size);
877 }
878 BTRFS_I(inode)->disk_i_size = new_i_size; 887 BTRFS_I(inode)->disk_i_size = new_i_size;
879 ret = 0; 888 ret = 0;
880out: 889out:
881 /* 890 /*
882 * we need to remove the ordered extent with the tree lock held 891 * We need to do this because we can't remove ordered extents until
883 * so that other people calling this function don't find our fully 892 * after the i_disk_size has been updated and then the inode has been
884 * processed ordered entry and skip updating the i_size 893 * updated to reflect the change, so we need to tell anybody who finds
894 * this ordered extent that we've already done all the real work, we
895 * just haven't completed all the other work.
885 */ 896 */
886 if (ordered) 897 if (ordered)
887 __btrfs_remove_ordered_extent(inode, ordered); 898 set_bit(BTRFS_ORDERED_UPDATED_ISIZE, &ordered->flags);
888 spin_unlock(&tree->lock); 899 spin_unlock_irq(&tree->lock);
889 if (ordered)
890 wake_up(&ordered->wait);
891 return ret; 900 return ret;
892} 901}
893 902
@@ -912,7 +921,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
912 if (!ordered) 921 if (!ordered)
913 return 1; 922 return 1;
914 923
915 spin_lock(&tree->lock); 924 spin_lock_irq(&tree->lock);
916 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) { 925 list_for_each_entry_reverse(ordered_sum, &ordered->list, list) {
917 if (disk_bytenr >= ordered_sum->bytenr) { 926 if (disk_bytenr >= ordered_sum->bytenr) {
918 num_sectors = ordered_sum->len / sectorsize; 927 num_sectors = ordered_sum->len / sectorsize;
@@ -927,7 +936,7 @@ int btrfs_find_ordered_sum(struct inode *inode, u64 offset, u64 disk_bytenr,
927 } 936 }
928 } 937 }
929out: 938out:
930 spin_unlock(&tree->lock); 939 spin_unlock_irq(&tree->lock);
931 btrfs_put_ordered_extent(ordered); 940 btrfs_put_ordered_extent(ordered);
932 return ret; 941 return ret;
933} 942}
diff --git a/fs/btrfs/ordered-data.h b/fs/btrfs/ordered-data.h
index c355ad4dc1a6..e03c560d2997 100644
--- a/fs/btrfs/ordered-data.h
+++ b/fs/btrfs/ordered-data.h
@@ -74,6 +74,12 @@ struct btrfs_ordered_sum {
74 74
75#define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */ 75#define BTRFS_ORDERED_DIRECT 5 /* set when we're doing DIO with this extent */
76 76
77#define BTRFS_ORDERED_IOERR 6 /* We had an io error when writing this out */
78
79#define BTRFS_ORDERED_UPDATED_ISIZE 7 /* indicates wether this ordered extent
80 * has done its due diligence in updating
81 * the isize. */
82
77struct btrfs_ordered_extent { 83struct btrfs_ordered_extent {
78 /* logical offset in the file */ 84 /* logical offset in the file */
79 u64 file_offset; 85 u64 file_offset;
@@ -113,6 +119,8 @@ struct btrfs_ordered_extent {
113 119
114 /* a per root list of all the pending ordered extents */ 120 /* a per root list of all the pending ordered extents */
115 struct list_head root_extent_list; 121 struct list_head root_extent_list;
122
123 struct btrfs_work work;
116}; 124};
117 125
118 126
@@ -143,10 +151,11 @@ void btrfs_remove_ordered_extent(struct inode *inode,
143 struct btrfs_ordered_extent *entry); 151 struct btrfs_ordered_extent *entry);
144int btrfs_dec_test_ordered_pending(struct inode *inode, 152int btrfs_dec_test_ordered_pending(struct inode *inode,
145 struct btrfs_ordered_extent **cached, 153 struct btrfs_ordered_extent **cached,
146 u64 file_offset, u64 io_size); 154 u64 file_offset, u64 io_size, int uptodate);
147int btrfs_dec_test_first_ordered_pending(struct inode *inode, 155int btrfs_dec_test_first_ordered_pending(struct inode *inode,
148 struct btrfs_ordered_extent **cached, 156 struct btrfs_ordered_extent **cached,
149 u64 *file_offset, u64 io_size); 157 u64 *file_offset, u64 io_size,
158 int uptodate);
150int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset, 159int btrfs_add_ordered_extent(struct inode *inode, u64 file_offset,
151 u64 start, u64 len, u64 disk_len, int type); 160 u64 start, u64 len, u64 disk_len, int type);
152int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset, 161int btrfs_add_ordered_extent_dio(struct inode *inode, u64 file_offset,
diff --git a/fs/btrfs/print-tree.c b/fs/btrfs/print-tree.c
index f38e452486b8..5e23684887eb 100644
--- a/fs/btrfs/print-tree.c
+++ b/fs/btrfs/print-tree.c
@@ -294,6 +294,9 @@ void btrfs_print_leaf(struct btrfs_root *root, struct extent_buffer *l)
294 btrfs_dev_extent_chunk_offset(l, dev_extent), 294 btrfs_dev_extent_chunk_offset(l, dev_extent),
295 (unsigned long long) 295 (unsigned long long)
296 btrfs_dev_extent_length(l, dev_extent)); 296 btrfs_dev_extent_length(l, dev_extent));
297 case BTRFS_DEV_STATS_KEY:
298 printk(KERN_INFO "\t\tdevice stats\n");
299 break;
297 }; 300 };
298 } 301 }
299} 302}
diff --git a/fs/btrfs/rcu-string.h b/fs/btrfs/rcu-string.h
new file mode 100644
index 000000000000..9e111e4576d4
--- /dev/null
+++ b/fs/btrfs/rcu-string.h
@@ -0,0 +1,56 @@
1/*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19struct rcu_string {
20 struct rcu_head rcu;
21 char str[0];
22};
23
24static inline struct rcu_string *rcu_string_strdup(const char *src, gfp_t mask)
25{
26 size_t len = strlen(src) + 1;
27 struct rcu_string *ret = kzalloc(sizeof(struct rcu_string) +
28 (len * sizeof(char)), mask);
29 if (!ret)
30 return ret;
31 strncpy(ret->str, src, len);
32 return ret;
33}
34
35static inline void rcu_string_free(struct rcu_string *str)
36{
37 if (str)
38 kfree_rcu(str, rcu);
39}
40
41#define printk_in_rcu(fmt, ...) do { \
42 rcu_read_lock(); \
43 printk(fmt, __VA_ARGS__); \
44 rcu_read_unlock(); \
45} while (0)
46
47#define printk_ratelimited_in_rcu(fmt, ...) do { \
48 rcu_read_lock(); \
49 printk_ratelimited(fmt, __VA_ARGS__); \
50 rcu_read_unlock(); \
51} while (0)
52
53#define rcu_str_deref(rcu_str) ({ \
54 struct rcu_string *__str = rcu_dereference(rcu_str); \
55 __str->str; \
56})
diff --git a/fs/btrfs/reada.c b/fs/btrfs/reada.c
index ac5d01085884..48a4882d8ad5 100644
--- a/fs/btrfs/reada.c
+++ b/fs/btrfs/reada.c
@@ -718,13 +718,18 @@ static void reada_start_machine_worker(struct btrfs_work *work)
718{ 718{
719 struct reada_machine_work *rmw; 719 struct reada_machine_work *rmw;
720 struct btrfs_fs_info *fs_info; 720 struct btrfs_fs_info *fs_info;
721 int old_ioprio;
721 722
722 rmw = container_of(work, struct reada_machine_work, work); 723 rmw = container_of(work, struct reada_machine_work, work);
723 fs_info = rmw->fs_info; 724 fs_info = rmw->fs_info;
724 725
725 kfree(rmw); 726 kfree(rmw);
726 727
728 old_ioprio = IOPRIO_PRIO_VALUE(task_nice_ioclass(current),
729 task_nice_ioprio(current));
730 set_task_ioprio(current, BTRFS_IOPRIO_READA);
727 __reada_start_machine(fs_info); 731 __reada_start_machine(fs_info);
732 set_task_ioprio(current, old_ioprio);
728} 733}
729 734
730static void __reada_start_machine(struct btrfs_fs_info *fs_info) 735static void __reada_start_machine(struct btrfs_fs_info *fs_info)
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 2f3d6f917fb3..b223620cd5a6 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -26,6 +26,7 @@
26#include "backref.h" 26#include "backref.h"
27#include "extent_io.h" 27#include "extent_io.h"
28#include "check-integrity.h" 28#include "check-integrity.h"
29#include "rcu-string.h"
29 30
30/* 31/*
31 * This is only the first step towards a full-features scrub. It reads all 32 * This is only the first step towards a full-features scrub. It reads all
@@ -50,7 +51,7 @@ struct scrub_dev;
50struct scrub_page { 51struct scrub_page {
51 struct scrub_block *sblock; 52 struct scrub_block *sblock;
52 struct page *page; 53 struct page *page;
53 struct block_device *bdev; 54 struct btrfs_device *dev;
54 u64 flags; /* extent flags */ 55 u64 flags; /* extent flags */
55 u64 generation; 56 u64 generation;
56 u64 logical; 57 u64 logical;
@@ -86,6 +87,7 @@ struct scrub_block {
86 unsigned int header_error:1; 87 unsigned int header_error:1;
87 unsigned int checksum_error:1; 88 unsigned int checksum_error:1;
88 unsigned int no_io_error_seen:1; 89 unsigned int no_io_error_seen:1;
90 unsigned int generation_error:1; /* also sets header_error */
89 }; 91 };
90}; 92};
91 93
@@ -319,10 +321,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
319 * hold all of the paths here 321 * hold all of the paths here
320 */ 322 */
321 for (i = 0; i < ipath->fspath->elem_cnt; ++i) 323 for (i = 0; i < ipath->fspath->elem_cnt; ++i)
322 printk(KERN_WARNING "btrfs: %s at logical %llu on dev " 324 printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
323 "%s, sector %llu, root %llu, inode %llu, offset %llu, " 325 "%s, sector %llu, root %llu, inode %llu, offset %llu, "
324 "length %llu, links %u (path: %s)\n", swarn->errstr, 326 "length %llu, links %u (path: %s)\n", swarn->errstr,
325 swarn->logical, swarn->dev->name, 327 swarn->logical, rcu_str_deref(swarn->dev->name),
326 (unsigned long long)swarn->sector, root, inum, offset, 328 (unsigned long long)swarn->sector, root, inum, offset,
327 min(isize - offset, (u64)PAGE_SIZE), nlink, 329 min(isize - offset, (u64)PAGE_SIZE), nlink,
328 (char *)(unsigned long)ipath->fspath->val[i]); 330 (char *)(unsigned long)ipath->fspath->val[i]);
@@ -331,10 +333,10 @@ static int scrub_print_warning_inode(u64 inum, u64 offset, u64 root, void *ctx)
331 return 0; 333 return 0;
332 334
333err: 335err:
334 printk(KERN_WARNING "btrfs: %s at logical %llu on dev " 336 printk_in_rcu(KERN_WARNING "btrfs: %s at logical %llu on dev "
335 "%s, sector %llu, root %llu, inode %llu, offset %llu: path " 337 "%s, sector %llu, root %llu, inode %llu, offset %llu: path "
336 "resolving failed with ret=%d\n", swarn->errstr, 338 "resolving failed with ret=%d\n", swarn->errstr,
337 swarn->logical, swarn->dev->name, 339 swarn->logical, rcu_str_deref(swarn->dev->name),
338 (unsigned long long)swarn->sector, root, inum, offset, ret); 340 (unsigned long long)swarn->sector, root, inum, offset, ret);
339 341
340 free_ipath(ipath); 342 free_ipath(ipath);
@@ -389,10 +391,11 @@ static void scrub_print_warning(const char *errstr, struct scrub_block *sblock)
389 do { 391 do {
390 ret = tree_backref_for_extent(&ptr, eb, ei, item_size, 392 ret = tree_backref_for_extent(&ptr, eb, ei, item_size,
391 &ref_root, &ref_level); 393 &ref_root, &ref_level);
392 printk(KERN_WARNING 394 printk_in_rcu(KERN_WARNING
393 "btrfs: %s at logical %llu on dev %s, " 395 "btrfs: %s at logical %llu on dev %s, "
394 "sector %llu: metadata %s (level %d) in tree " 396 "sector %llu: metadata %s (level %d) in tree "
395 "%llu\n", errstr, swarn.logical, dev->name, 397 "%llu\n", errstr, swarn.logical,
398 rcu_str_deref(dev->name),
396 (unsigned long long)swarn.sector, 399 (unsigned long long)swarn.sector,
397 ref_level ? "node" : "leaf", 400 ref_level ? "node" : "leaf",
398 ret < 0 ? -1 : ref_level, 401 ret < 0 ? -1 : ref_level,
@@ -579,9 +582,11 @@ out:
579 spin_lock(&sdev->stat_lock); 582 spin_lock(&sdev->stat_lock);
580 ++sdev->stat.uncorrectable_errors; 583 ++sdev->stat.uncorrectable_errors;
581 spin_unlock(&sdev->stat_lock); 584 spin_unlock(&sdev->stat_lock);
582 printk_ratelimited(KERN_ERR 585
586 printk_ratelimited_in_rcu(KERN_ERR
583 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n", 587 "btrfs: unable to fixup (nodatasum) error at logical %llu on dev %s\n",
584 (unsigned long long)fixup->logical, sdev->dev->name); 588 (unsigned long long)fixup->logical,
589 rcu_str_deref(sdev->dev->name));
585 } 590 }
586 591
587 btrfs_free_path(path); 592 btrfs_free_path(path);
@@ -675,6 +680,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
675 sdev->stat.read_errors++; 680 sdev->stat.read_errors++;
676 sdev->stat.uncorrectable_errors++; 681 sdev->stat.uncorrectable_errors++;
677 spin_unlock(&sdev->stat_lock); 682 spin_unlock(&sdev->stat_lock);
683 btrfs_dev_stat_inc_and_print(sdev->dev,
684 BTRFS_DEV_STAT_READ_ERRS);
678 goto out; 685 goto out;
679 } 686 }
680 687
@@ -686,6 +693,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
686 sdev->stat.read_errors++; 693 sdev->stat.read_errors++;
687 sdev->stat.uncorrectable_errors++; 694 sdev->stat.uncorrectable_errors++;
688 spin_unlock(&sdev->stat_lock); 695 spin_unlock(&sdev->stat_lock);
696 btrfs_dev_stat_inc_and_print(sdev->dev,
697 BTRFS_DEV_STAT_READ_ERRS);
689 goto out; 698 goto out;
690 } 699 }
691 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS); 700 BUG_ON(failed_mirror_index >= BTRFS_MAX_MIRRORS);
@@ -699,6 +708,8 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
699 sdev->stat.read_errors++; 708 sdev->stat.read_errors++;
700 sdev->stat.uncorrectable_errors++; 709 sdev->stat.uncorrectable_errors++;
701 spin_unlock(&sdev->stat_lock); 710 spin_unlock(&sdev->stat_lock);
711 btrfs_dev_stat_inc_and_print(sdev->dev,
712 BTRFS_DEV_STAT_READ_ERRS);
702 goto out; 713 goto out;
703 } 714 }
704 715
@@ -725,12 +736,16 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
725 spin_unlock(&sdev->stat_lock); 736 spin_unlock(&sdev->stat_lock);
726 if (__ratelimit(&_rs)) 737 if (__ratelimit(&_rs))
727 scrub_print_warning("i/o error", sblock_to_check); 738 scrub_print_warning("i/o error", sblock_to_check);
739 btrfs_dev_stat_inc_and_print(sdev->dev,
740 BTRFS_DEV_STAT_READ_ERRS);
728 } else if (sblock_bad->checksum_error) { 741 } else if (sblock_bad->checksum_error) {
729 spin_lock(&sdev->stat_lock); 742 spin_lock(&sdev->stat_lock);
730 sdev->stat.csum_errors++; 743 sdev->stat.csum_errors++;
731 spin_unlock(&sdev->stat_lock); 744 spin_unlock(&sdev->stat_lock);
732 if (__ratelimit(&_rs)) 745 if (__ratelimit(&_rs))
733 scrub_print_warning("checksum error", sblock_to_check); 746 scrub_print_warning("checksum error", sblock_to_check);
747 btrfs_dev_stat_inc_and_print(sdev->dev,
748 BTRFS_DEV_STAT_CORRUPTION_ERRS);
734 } else if (sblock_bad->header_error) { 749 } else if (sblock_bad->header_error) {
735 spin_lock(&sdev->stat_lock); 750 spin_lock(&sdev->stat_lock);
736 sdev->stat.verify_errors++; 751 sdev->stat.verify_errors++;
@@ -738,6 +753,12 @@ static int scrub_handle_errored_block(struct scrub_block *sblock_to_check)
738 if (__ratelimit(&_rs)) 753 if (__ratelimit(&_rs))
739 scrub_print_warning("checksum/header error", 754 scrub_print_warning("checksum/header error",
740 sblock_to_check); 755 sblock_to_check);
756 if (sblock_bad->generation_error)
757 btrfs_dev_stat_inc_and_print(sdev->dev,
758 BTRFS_DEV_STAT_GENERATION_ERRS);
759 else
760 btrfs_dev_stat_inc_and_print(sdev->dev,
761 BTRFS_DEV_STAT_CORRUPTION_ERRS);
741 } 762 }
742 763
743 if (sdev->readonly) 764 if (sdev->readonly)
@@ -919,18 +940,20 @@ corrected_error:
919 spin_lock(&sdev->stat_lock); 940 spin_lock(&sdev->stat_lock);
920 sdev->stat.corrected_errors++; 941 sdev->stat.corrected_errors++;
921 spin_unlock(&sdev->stat_lock); 942 spin_unlock(&sdev->stat_lock);
922 printk_ratelimited(KERN_ERR 943 printk_ratelimited_in_rcu(KERN_ERR
923 "btrfs: fixed up error at logical %llu on dev %s\n", 944 "btrfs: fixed up error at logical %llu on dev %s\n",
924 (unsigned long long)logical, sdev->dev->name); 945 (unsigned long long)logical,
946 rcu_str_deref(sdev->dev->name));
925 } 947 }
926 } else { 948 } else {
927did_not_correct_error: 949did_not_correct_error:
928 spin_lock(&sdev->stat_lock); 950 spin_lock(&sdev->stat_lock);
929 sdev->stat.uncorrectable_errors++; 951 sdev->stat.uncorrectable_errors++;
930 spin_unlock(&sdev->stat_lock); 952 spin_unlock(&sdev->stat_lock);
931 printk_ratelimited(KERN_ERR 953 printk_ratelimited_in_rcu(KERN_ERR
932 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n", 954 "btrfs: unable to fixup (regular) error at logical %llu on dev %s\n",
933 (unsigned long long)logical, sdev->dev->name); 955 (unsigned long long)logical,
956 rcu_str_deref(sdev->dev->name));
934 } 957 }
935 958
936out: 959out:
@@ -998,8 +1021,8 @@ static int scrub_setup_recheck_block(struct scrub_dev *sdev,
998 page = sblock->pagev + page_index; 1021 page = sblock->pagev + page_index;
999 page->logical = logical; 1022 page->logical = logical;
1000 page->physical = bbio->stripes[mirror_index].physical; 1023 page->physical = bbio->stripes[mirror_index].physical;
1001 /* for missing devices, bdev is NULL */ 1024 /* for missing devices, dev->bdev is NULL */
1002 page->bdev = bbio->stripes[mirror_index].dev->bdev; 1025 page->dev = bbio->stripes[mirror_index].dev;
1003 page->mirror_num = mirror_index + 1; 1026 page->mirror_num = mirror_index + 1;
1004 page->page = alloc_page(GFP_NOFS); 1027 page->page = alloc_page(GFP_NOFS);
1005 if (!page->page) { 1028 if (!page->page) {
@@ -1043,7 +1066,7 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
1043 struct scrub_page *page = sblock->pagev + page_num; 1066 struct scrub_page *page = sblock->pagev + page_num;
1044 DECLARE_COMPLETION_ONSTACK(complete); 1067 DECLARE_COMPLETION_ONSTACK(complete);
1045 1068
1046 if (page->bdev == NULL) { 1069 if (page->dev->bdev == NULL) {
1047 page->io_error = 1; 1070 page->io_error = 1;
1048 sblock->no_io_error_seen = 0; 1071 sblock->no_io_error_seen = 0;
1049 continue; 1072 continue;
@@ -1053,7 +1076,7 @@ static int scrub_recheck_block(struct btrfs_fs_info *fs_info,
1053 bio = bio_alloc(GFP_NOFS, 1); 1076 bio = bio_alloc(GFP_NOFS, 1);
1054 if (!bio) 1077 if (!bio)
1055 return -EIO; 1078 return -EIO;
1056 bio->bi_bdev = page->bdev; 1079 bio->bi_bdev = page->dev->bdev;
1057 bio->bi_sector = page->physical >> 9; 1080 bio->bi_sector = page->physical >> 9;
1058 bio->bi_end_io = scrub_complete_bio_end_io; 1081 bio->bi_end_io = scrub_complete_bio_end_io;
1059 bio->bi_private = &complete; 1082 bio->bi_private = &complete;
@@ -1102,11 +1125,14 @@ static void scrub_recheck_block_checksum(struct btrfs_fs_info *fs_info,
1102 h = (struct btrfs_header *)mapped_buffer; 1125 h = (struct btrfs_header *)mapped_buffer;
1103 1126
1104 if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) || 1127 if (sblock->pagev[0].logical != le64_to_cpu(h->bytenr) ||
1105 generation != le64_to_cpu(h->generation) ||
1106 memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) || 1128 memcmp(h->fsid, fs_info->fsid, BTRFS_UUID_SIZE) ||
1107 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid, 1129 memcmp(h->chunk_tree_uuid, fs_info->chunk_tree_uuid,
1108 BTRFS_UUID_SIZE)) 1130 BTRFS_UUID_SIZE)) {
1131 sblock->header_error = 1;
1132 } else if (generation != le64_to_cpu(h->generation)) {
1109 sblock->header_error = 1; 1133 sblock->header_error = 1;
1134 sblock->generation_error = 1;
1135 }
1110 csum = h->csum; 1136 csum = h->csum;
1111 } else { 1137 } else {
1112 if (!have_csum) 1138 if (!have_csum)
@@ -1182,7 +1208,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1182 bio = bio_alloc(GFP_NOFS, 1); 1208 bio = bio_alloc(GFP_NOFS, 1);
1183 if (!bio) 1209 if (!bio)
1184 return -EIO; 1210 return -EIO;
1185 bio->bi_bdev = page_bad->bdev; 1211 bio->bi_bdev = page_bad->dev->bdev;
1186 bio->bi_sector = page_bad->physical >> 9; 1212 bio->bi_sector = page_bad->physical >> 9;
1187 bio->bi_end_io = scrub_complete_bio_end_io; 1213 bio->bi_end_io = scrub_complete_bio_end_io;
1188 bio->bi_private = &complete; 1214 bio->bi_private = &complete;
@@ -1196,6 +1222,12 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1196 1222
1197 /* this will also unplug the queue */ 1223 /* this will also unplug the queue */
1198 wait_for_completion(&complete); 1224 wait_for_completion(&complete);
1225 if (!bio_flagged(bio, BIO_UPTODATE)) {
1226 btrfs_dev_stat_inc_and_print(page_bad->dev,
1227 BTRFS_DEV_STAT_WRITE_ERRS);
1228 bio_put(bio);
1229 return -EIO;
1230 }
1199 bio_put(bio); 1231 bio_put(bio);
1200 } 1232 }
1201 1233
@@ -1352,7 +1384,8 @@ static int scrub_checksum_super(struct scrub_block *sblock)
1352 u64 mapped_size; 1384 u64 mapped_size;
1353 void *p; 1385 void *p;
1354 u32 crc = ~(u32)0; 1386 u32 crc = ~(u32)0;
1355 int fail = 0; 1387 int fail_gen = 0;
1388 int fail_cor = 0;
1356 u64 len; 1389 u64 len;
1357 int index; 1390 int index;
1358 1391
@@ -1363,13 +1396,13 @@ static int scrub_checksum_super(struct scrub_block *sblock)
1363 memcpy(on_disk_csum, s->csum, sdev->csum_size); 1396 memcpy(on_disk_csum, s->csum, sdev->csum_size);
1364 1397
1365 if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr)) 1398 if (sblock->pagev[0].logical != le64_to_cpu(s->bytenr))
1366 ++fail; 1399 ++fail_cor;
1367 1400
1368 if (sblock->pagev[0].generation != le64_to_cpu(s->generation)) 1401 if (sblock->pagev[0].generation != le64_to_cpu(s->generation))
1369 ++fail; 1402 ++fail_gen;
1370 1403
1371 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE)) 1404 if (memcmp(s->fsid, fs_info->fsid, BTRFS_UUID_SIZE))
1372 ++fail; 1405 ++fail_cor;
1373 1406
1374 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE; 1407 len = BTRFS_SUPER_INFO_SIZE - BTRFS_CSUM_SIZE;
1375 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE; 1408 mapped_size = PAGE_SIZE - BTRFS_CSUM_SIZE;
@@ -1394,9 +1427,9 @@ static int scrub_checksum_super(struct scrub_block *sblock)
1394 1427
1395 btrfs_csum_final(crc, calculated_csum); 1428 btrfs_csum_final(crc, calculated_csum);
1396 if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size)) 1429 if (memcmp(calculated_csum, on_disk_csum, sdev->csum_size))
1397 ++fail; 1430 ++fail_cor;
1398 1431
1399 if (fail) { 1432 if (fail_cor + fail_gen) {
1400 /* 1433 /*
1401 * if we find an error in a super block, we just report it. 1434 * if we find an error in a super block, we just report it.
1402 * They will get written with the next transaction commit 1435 * They will get written with the next transaction commit
@@ -1405,9 +1438,15 @@ static int scrub_checksum_super(struct scrub_block *sblock)
1405 spin_lock(&sdev->stat_lock); 1438 spin_lock(&sdev->stat_lock);
1406 ++sdev->stat.super_errors; 1439 ++sdev->stat.super_errors;
1407 spin_unlock(&sdev->stat_lock); 1440 spin_unlock(&sdev->stat_lock);
1441 if (fail_cor)
1442 btrfs_dev_stat_inc_and_print(sdev->dev,
1443 BTRFS_DEV_STAT_CORRUPTION_ERRS);
1444 else
1445 btrfs_dev_stat_inc_and_print(sdev->dev,
1446 BTRFS_DEV_STAT_GENERATION_ERRS);
1408 } 1447 }
1409 1448
1410 return fail; 1449 return fail_cor + fail_gen;
1411} 1450}
1412 1451
1413static void scrub_block_get(struct scrub_block *sblock) 1452static void scrub_block_get(struct scrub_block *sblock)
@@ -1551,7 +1590,7 @@ static int scrub_pages(struct scrub_dev *sdev, u64 logical, u64 len,
1551 return -ENOMEM; 1590 return -ENOMEM;
1552 } 1591 }
1553 spage->sblock = sblock; 1592 spage->sblock = sblock;
1554 spage->bdev = sdev->dev->bdev; 1593 spage->dev = sdev->dev;
1555 spage->flags = flags; 1594 spage->flags = flags;
1556 spage->generation = gen; 1595 spage->generation = gen;
1557 spage->logical = logical; 1596 spage->logical = logical;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index c5f8fca4195f..0eb9a4da069e 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -54,6 +54,7 @@
54#include "version.h" 54#include "version.h"
55#include "export.h" 55#include "export.h"
56#include "compression.h" 56#include "compression.h"
57#include "rcu-string.h"
57 58
58#define CREATE_TRACE_POINTS 59#define CREATE_TRACE_POINTS
59#include <trace/events/btrfs.h> 60#include <trace/events/btrfs.h>
@@ -188,7 +189,8 @@ void btrfs_printk(struct btrfs_fs_info *fs_info, const char *fmt, ...)
188 va_start(args, fmt); 189 va_start(args, fmt);
189 190
190 if (fmt[0] == '<' && isdigit(fmt[1]) && fmt[2] == '>') { 191 if (fmt[0] == '<' && isdigit(fmt[1]) && fmt[2] == '>') {
191 strncpy(lvl, fmt, 3); 192 memcpy(lvl, fmt, 3);
193 lvl[3] = '\0';
192 fmt += 3; 194 fmt += 3;
193 type = logtypes[fmt[1] - '0']; 195 type = logtypes[fmt[1] - '0'];
194 } else 196 } else
@@ -435,11 +437,8 @@ int btrfs_parse_options(struct btrfs_root *root, char *options)
435 case Opt_thread_pool: 437 case Opt_thread_pool:
436 intarg = 0; 438 intarg = 0;
437 match_int(&args[0], &intarg); 439 match_int(&args[0], &intarg);
438 if (intarg) { 440 if (intarg)
439 info->thread_pool_size = intarg; 441 info->thread_pool_size = intarg;
440 printk(KERN_INFO "btrfs: thread pool %d\n",
441 info->thread_pool_size);
442 }
443 break; 442 break;
444 case Opt_max_inline: 443 case Opt_max_inline:
445 num = match_strdup(&args[0]); 444 num = match_strdup(&args[0]);
@@ -769,7 +768,7 @@ static int btrfs_fill_super(struct super_block *sb,
769#ifdef CONFIG_BTRFS_FS_POSIX_ACL 768#ifdef CONFIG_BTRFS_FS_POSIX_ACL
770 sb->s_flags |= MS_POSIXACL; 769 sb->s_flags |= MS_POSIXACL;
771#endif 770#endif
772 771 sb->s_flags |= MS_I_VERSION;
773 err = open_ctree(sb, fs_devices, (char *)data); 772 err = open_ctree(sb, fs_devices, (char *)data);
774 if (err) { 773 if (err) {
775 printk("btrfs: open_ctree failed\n"); 774 printk("btrfs: open_ctree failed\n");
@@ -925,63 +924,48 @@ static inline int is_subvolume_inode(struct inode *inode)
925 */ 924 */
926static char *setup_root_args(char *args) 925static char *setup_root_args(char *args)
927{ 926{
928 unsigned copied = 0; 927 unsigned len = strlen(args) + 2 + 1;
929 unsigned len = strlen(args) + 2; 928 char *src, *dst, *buf;
930 char *pos;
931 char *ret;
932 929
933 /* 930 /*
934 * We need the same args as before, but minus 931 * We need the same args as before, but with this substitution:
935 * 932 * s!subvol=[^,]+!subvolid=0!
936 * subvol=a
937 * 933 *
938 * and add 934 * Since the replacement string is up to 2 bytes longer than the
939 * 935 * original, allocate strlen(args) + 2 + 1 bytes.
940 * subvolid=0
941 *
942 * which is a difference of 2 characters, so we allocate strlen(args) +
943 * 2 characters.
944 */ 936 */
945 ret = kzalloc(len * sizeof(char), GFP_NOFS);
946 if (!ret)
947 return NULL;
948 pos = strstr(args, "subvol=");
949 937
938 src = strstr(args, "subvol=");
950 /* This shouldn't happen, but just in case.. */ 939 /* This shouldn't happen, but just in case.. */
951 if (!pos) { 940 if (!src)
952 kfree(ret); 941 return NULL;
942
943 buf = dst = kmalloc(len, GFP_NOFS);
944 if (!buf)
953 return NULL; 945 return NULL;
954 }
955 946
956 /* 947 /*
957 * The subvol=<> arg is not at the front of the string, copy everybody 948 * If the subvol= arg is not at the start of the string,
958 * up to that into ret. 949 * copy whatever precedes it into buf.
959 */ 950 */
960 if (pos != args) { 951 if (src != args) {
961 *pos = '\0'; 952 *src++ = '\0';
962 strcpy(ret, args); 953 strcpy(buf, args);
963 copied += strlen(args); 954 dst += strlen(args);
964 pos++;
965 } 955 }
966 956
967 strncpy(ret + copied, "subvolid=0", len - copied); 957 strcpy(dst, "subvolid=0");
968 958 dst += strlen("subvolid=0");
969 /* Length of subvolid=0 */
970 copied += 10;
971 959
972 /* 960 /*
973 * If there is no , after the subvol= option then we know there's no 961 * If there is a "," after the original subvol=... string,
974 * other options and we can just return. 962 * copy that suffix into our buffer. Otherwise, we're done.
975 */ 963 */
976 pos = strchr(pos, ','); 964 src = strchr(src, ',');
977 if (!pos) 965 if (src)
978 return ret; 966 strcpy(dst, src);
979 967
980 /* Copy the rest of the arguments into our buffer */ 968 return buf;
981 strncpy(ret + copied, pos, len - copied);
982 copied += strlen(pos);
983
984 return ret;
985} 969}
986 970
987static struct dentry *mount_subvol(const char *subvol_name, int flags, 971static struct dentry *mount_subvol(const char *subvol_name, int flags,
@@ -1118,6 +1102,40 @@ error_fs_info:
1118 return ERR_PTR(error); 1102 return ERR_PTR(error);
1119} 1103}
1120 1104
1105static void btrfs_set_max_workers(struct btrfs_workers *workers, int new_limit)
1106{
1107 spin_lock_irq(&workers->lock);
1108 workers->max_workers = new_limit;
1109 spin_unlock_irq(&workers->lock);
1110}
1111
1112static void btrfs_resize_thread_pool(struct btrfs_fs_info *fs_info,
1113 int new_pool_size, int old_pool_size)
1114{
1115 if (new_pool_size == old_pool_size)
1116 return;
1117
1118 fs_info->thread_pool_size = new_pool_size;
1119
1120 printk(KERN_INFO "btrfs: resize thread pool %d -> %d\n",
1121 old_pool_size, new_pool_size);
1122
1123 btrfs_set_max_workers(&fs_info->generic_worker, new_pool_size);
1124 btrfs_set_max_workers(&fs_info->workers, new_pool_size);
1125 btrfs_set_max_workers(&fs_info->delalloc_workers, new_pool_size);
1126 btrfs_set_max_workers(&fs_info->submit_workers, new_pool_size);
1127 btrfs_set_max_workers(&fs_info->caching_workers, new_pool_size);
1128 btrfs_set_max_workers(&fs_info->fixup_workers, new_pool_size);
1129 btrfs_set_max_workers(&fs_info->endio_workers, new_pool_size);
1130 btrfs_set_max_workers(&fs_info->endio_meta_workers, new_pool_size);
1131 btrfs_set_max_workers(&fs_info->endio_meta_write_workers, new_pool_size);
1132 btrfs_set_max_workers(&fs_info->endio_write_workers, new_pool_size);
1133 btrfs_set_max_workers(&fs_info->endio_freespace_worker, new_pool_size);
1134 btrfs_set_max_workers(&fs_info->delayed_workers, new_pool_size);
1135 btrfs_set_max_workers(&fs_info->readahead_workers, new_pool_size);
1136 btrfs_set_max_workers(&fs_info->scrub_workers, new_pool_size);
1137}
1138
1121static int btrfs_remount(struct super_block *sb, int *flags, char *data) 1139static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1122{ 1140{
1123 struct btrfs_fs_info *fs_info = btrfs_sb(sb); 1141 struct btrfs_fs_info *fs_info = btrfs_sb(sb);
@@ -1137,6 +1155,9 @@ static int btrfs_remount(struct super_block *sb, int *flags, char *data)
1137 goto restore; 1155 goto restore;
1138 } 1156 }
1139 1157
1158 btrfs_resize_thread_pool(fs_info,
1159 fs_info->thread_pool_size, old_thread_pool_size);
1160
1140 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY)) 1161 if ((*flags & MS_RDONLY) == (sb->s_flags & MS_RDONLY))
1141 return 0; 1162 return 0;
1142 1163
@@ -1180,7 +1201,8 @@ restore:
1180 fs_info->compress_type = old_compress_type; 1201 fs_info->compress_type = old_compress_type;
1181 fs_info->max_inline = old_max_inline; 1202 fs_info->max_inline = old_max_inline;
1182 fs_info->alloc_start = old_alloc_start; 1203 fs_info->alloc_start = old_alloc_start;
1183 fs_info->thread_pool_size = old_thread_pool_size; 1204 btrfs_resize_thread_pool(fs_info,
1205 old_thread_pool_size, fs_info->thread_pool_size);
1184 fs_info->metadata_ratio = old_metadata_ratio; 1206 fs_info->metadata_ratio = old_metadata_ratio;
1185 return ret; 1207 return ret;
1186} 1208}
@@ -1461,12 +1483,44 @@ static void btrfs_fs_dirty_inode(struct inode *inode, int flags)
1461 "error %d\n", btrfs_ino(inode), ret); 1483 "error %d\n", btrfs_ino(inode), ret);
1462} 1484}
1463 1485
1486static int btrfs_show_devname(struct seq_file *m, struct dentry *root)
1487{
1488 struct btrfs_fs_info *fs_info = btrfs_sb(root->d_sb);
1489 struct btrfs_fs_devices *cur_devices;
1490 struct btrfs_device *dev, *first_dev = NULL;
1491 struct list_head *head;
1492 struct rcu_string *name;
1493
1494 mutex_lock(&fs_info->fs_devices->device_list_mutex);
1495 cur_devices = fs_info->fs_devices;
1496 while (cur_devices) {
1497 head = &cur_devices->devices;
1498 list_for_each_entry(dev, head, dev_list) {
1499 if (!first_dev || dev->devid < first_dev->devid)
1500 first_dev = dev;
1501 }
1502 cur_devices = cur_devices->seed;
1503 }
1504
1505 if (first_dev) {
1506 rcu_read_lock();
1507 name = rcu_dereference(first_dev->name);
1508 seq_escape(m, name->str, " \t\n\\");
1509 rcu_read_unlock();
1510 } else {
1511 WARN_ON(1);
1512 }
1513 mutex_unlock(&fs_info->fs_devices->device_list_mutex);
1514 return 0;
1515}
1516
1464static const struct super_operations btrfs_super_ops = { 1517static const struct super_operations btrfs_super_ops = {
1465 .drop_inode = btrfs_drop_inode, 1518 .drop_inode = btrfs_drop_inode,
1466 .evict_inode = btrfs_evict_inode, 1519 .evict_inode = btrfs_evict_inode,
1467 .put_super = btrfs_put_super, 1520 .put_super = btrfs_put_super,
1468 .sync_fs = btrfs_sync_fs, 1521 .sync_fs = btrfs_sync_fs,
1469 .show_options = btrfs_show_options, 1522 .show_options = btrfs_show_options,
1523 .show_devname = btrfs_show_devname,
1470 .write_inode = btrfs_write_inode, 1524 .write_inode = btrfs_write_inode,
1471 .dirty_inode = btrfs_fs_dirty_inode, 1525 .dirty_inode = btrfs_fs_dirty_inode,
1472 .alloc_inode = btrfs_alloc_inode, 1526 .alloc_inode = btrfs_alloc_inode,
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 36422254ef67..b72b068183ec 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -28,6 +28,7 @@
28#include "locking.h" 28#include "locking.h"
29#include "tree-log.h" 29#include "tree-log.h"
30#include "inode-map.h" 30#include "inode-map.h"
31#include "volumes.h"
31 32
32#define BTRFS_ROOT_TRANS_TAG 0 33#define BTRFS_ROOT_TRANS_TAG 0
33 34
@@ -55,49 +56,54 @@ static noinline void switch_commit_root(struct btrfs_root *root)
55static noinline int join_transaction(struct btrfs_root *root, int nofail) 56static noinline int join_transaction(struct btrfs_root *root, int nofail)
56{ 57{
57 struct btrfs_transaction *cur_trans; 58 struct btrfs_transaction *cur_trans;
59 struct btrfs_fs_info *fs_info = root->fs_info;
58 60
59 spin_lock(&root->fs_info->trans_lock); 61 spin_lock(&fs_info->trans_lock);
60loop: 62loop:
61 /* The file system has been taken offline. No new transactions. */ 63 /* The file system has been taken offline. No new transactions. */
62 if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) { 64 if (fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
63 spin_unlock(&root->fs_info->trans_lock); 65 spin_unlock(&fs_info->trans_lock);
64 return -EROFS; 66 return -EROFS;
65 } 67 }
66 68
67 if (root->fs_info->trans_no_join) { 69 if (fs_info->trans_no_join) {
68 if (!nofail) { 70 if (!nofail) {
69 spin_unlock(&root->fs_info->trans_lock); 71 spin_unlock(&fs_info->trans_lock);
70 return -EBUSY; 72 return -EBUSY;
71 } 73 }
72 } 74 }
73 75
74 cur_trans = root->fs_info->running_transaction; 76 cur_trans = fs_info->running_transaction;
75 if (cur_trans) { 77 if (cur_trans) {
76 if (cur_trans->aborted) { 78 if (cur_trans->aborted) {
77 spin_unlock(&root->fs_info->trans_lock); 79 spin_unlock(&fs_info->trans_lock);
78 return cur_trans->aborted; 80 return cur_trans->aborted;
79 } 81 }
80 atomic_inc(&cur_trans->use_count); 82 atomic_inc(&cur_trans->use_count);
81 atomic_inc(&cur_trans->num_writers); 83 atomic_inc(&cur_trans->num_writers);
82 cur_trans->num_joined++; 84 cur_trans->num_joined++;
83 spin_unlock(&root->fs_info->trans_lock); 85 spin_unlock(&fs_info->trans_lock);
84 return 0; 86 return 0;
85 } 87 }
86 spin_unlock(&root->fs_info->trans_lock); 88 spin_unlock(&fs_info->trans_lock);
87 89
88 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS); 90 cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
89 if (!cur_trans) 91 if (!cur_trans)
90 return -ENOMEM; 92 return -ENOMEM;
91 93
92 spin_lock(&root->fs_info->trans_lock); 94 spin_lock(&fs_info->trans_lock);
93 if (root->fs_info->running_transaction) { 95 if (fs_info->running_transaction) {
94 /* 96 /*
95 * someone started a transaction after we unlocked. Make sure 97 * someone started a transaction after we unlocked. Make sure
96 * to redo the trans_no_join checks above 98 * to redo the trans_no_join checks above
97 */ 99 */
98 kmem_cache_free(btrfs_transaction_cachep, cur_trans); 100 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
99 cur_trans = root->fs_info->running_transaction; 101 cur_trans = fs_info->running_transaction;
100 goto loop; 102 goto loop;
103 } else if (root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
104 spin_unlock(&root->fs_info->trans_lock);
105 kmem_cache_free(btrfs_transaction_cachep, cur_trans);
106 return -EROFS;
101 } 107 }
102 108
103 atomic_set(&cur_trans->num_writers, 1); 109 atomic_set(&cur_trans->num_writers, 1);
@@ -121,20 +127,38 @@ loop:
121 cur_trans->delayed_refs.flushing = 0; 127 cur_trans->delayed_refs.flushing = 0;
122 cur_trans->delayed_refs.run_delayed_start = 0; 128 cur_trans->delayed_refs.run_delayed_start = 0;
123 cur_trans->delayed_refs.seq = 1; 129 cur_trans->delayed_refs.seq = 1;
130
131 /*
132 * although the tree mod log is per file system and not per transaction,
133 * the log must never go across transaction boundaries.
134 */
135 smp_mb();
136 if (!list_empty(&fs_info->tree_mod_seq_list)) {
137 printk(KERN_ERR "btrfs: tree_mod_seq_list not empty when "
138 "creating a fresh transaction\n");
139 WARN_ON(1);
140 }
141 if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log)) {
142 printk(KERN_ERR "btrfs: tree_mod_log rb tree not empty when "
143 "creating a fresh transaction\n");
144 WARN_ON(1);
145 }
146 atomic_set(&fs_info->tree_mod_seq, 0);
147
124 init_waitqueue_head(&cur_trans->delayed_refs.seq_wait); 148 init_waitqueue_head(&cur_trans->delayed_refs.seq_wait);
125 spin_lock_init(&cur_trans->commit_lock); 149 spin_lock_init(&cur_trans->commit_lock);
126 spin_lock_init(&cur_trans->delayed_refs.lock); 150 spin_lock_init(&cur_trans->delayed_refs.lock);
127 INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head); 151 INIT_LIST_HEAD(&cur_trans->delayed_refs.seq_head);
128 152
129 INIT_LIST_HEAD(&cur_trans->pending_snapshots); 153 INIT_LIST_HEAD(&cur_trans->pending_snapshots);
130 list_add_tail(&cur_trans->list, &root->fs_info->trans_list); 154 list_add_tail(&cur_trans->list, &fs_info->trans_list);
131 extent_io_tree_init(&cur_trans->dirty_pages, 155 extent_io_tree_init(&cur_trans->dirty_pages,
132 root->fs_info->btree_inode->i_mapping); 156 fs_info->btree_inode->i_mapping);
133 root->fs_info->generation++; 157 fs_info->generation++;
134 cur_trans->transid = root->fs_info->generation; 158 cur_trans->transid = fs_info->generation;
135 root->fs_info->running_transaction = cur_trans; 159 fs_info->running_transaction = cur_trans;
136 cur_trans->aborted = 0; 160 cur_trans->aborted = 0;
137 spin_unlock(&root->fs_info->trans_lock); 161 spin_unlock(&fs_info->trans_lock);
138 162
139 return 0; 163 return 0;
140} 164}
@@ -758,6 +782,9 @@ static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
758 if (ret) 782 if (ret)
759 return ret; 783 return ret;
760 784
785 ret = btrfs_run_dev_stats(trans, root->fs_info);
786 BUG_ON(ret);
787
761 while (!list_empty(&fs_info->dirty_cowonly_roots)) { 788 while (!list_empty(&fs_info->dirty_cowonly_roots)) {
762 next = fs_info->dirty_cowonly_roots.next; 789 next = fs_info->dirty_cowonly_roots.next;
763 list_del_init(next); 790 list_del_init(next);
@@ -1190,14 +1217,20 @@ int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
1190 1217
1191 1218
1192static void cleanup_transaction(struct btrfs_trans_handle *trans, 1219static void cleanup_transaction(struct btrfs_trans_handle *trans,
1193 struct btrfs_root *root) 1220 struct btrfs_root *root, int err)
1194{ 1221{
1195 struct btrfs_transaction *cur_trans = trans->transaction; 1222 struct btrfs_transaction *cur_trans = trans->transaction;
1196 1223
1197 WARN_ON(trans->use_count > 1); 1224 WARN_ON(trans->use_count > 1);
1198 1225
1226 btrfs_abort_transaction(trans, root, err);
1227
1199 spin_lock(&root->fs_info->trans_lock); 1228 spin_lock(&root->fs_info->trans_lock);
1200 list_del_init(&cur_trans->list); 1229 list_del_init(&cur_trans->list);
1230 if (cur_trans == root->fs_info->running_transaction) {
1231 root->fs_info->running_transaction = NULL;
1232 root->fs_info->trans_no_join = 0;
1233 }
1201 spin_unlock(&root->fs_info->trans_lock); 1234 spin_unlock(&root->fs_info->trans_lock);
1202 1235
1203 btrfs_cleanup_one_transaction(trans->transaction, root); 1236 btrfs_cleanup_one_transaction(trans->transaction, root);
@@ -1503,7 +1536,7 @@ cleanup_transaction:
1503// WARN_ON(1); 1536// WARN_ON(1);
1504 if (current->journal_info == trans) 1537 if (current->journal_info == trans)
1505 current->journal_info = NULL; 1538 current->journal_info = NULL;
1506 cleanup_transaction(trans, root); 1539 cleanup_transaction(trans, root, ret);
1507 1540
1508 return ret; 1541 return ret;
1509} 1542}
diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c
index eb1ae908582c..2017d0ff511c 100644
--- a/fs/btrfs/tree-log.c
+++ b/fs/btrfs/tree-log.c
@@ -1628,7 +1628,9 @@ static int replay_one_buffer(struct btrfs_root *log, struct extent_buffer *eb,
1628 int i; 1628 int i;
1629 int ret; 1629 int ret;
1630 1630
1631 btrfs_read_buffer(eb, gen); 1631 ret = btrfs_read_buffer(eb, gen);
1632 if (ret)
1633 return ret;
1632 1634
1633 level = btrfs_header_level(eb); 1635 level = btrfs_header_level(eb);
1634 1636
@@ -1749,7 +1751,11 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1749 1751
1750 path->slots[*level]++; 1752 path->slots[*level]++;
1751 if (wc->free) { 1753 if (wc->free) {
1752 btrfs_read_buffer(next, ptr_gen); 1754 ret = btrfs_read_buffer(next, ptr_gen);
1755 if (ret) {
1756 free_extent_buffer(next);
1757 return ret;
1758 }
1753 1759
1754 btrfs_tree_lock(next); 1760 btrfs_tree_lock(next);
1755 btrfs_set_lock_blocking(next); 1761 btrfs_set_lock_blocking(next);
@@ -1766,7 +1772,11 @@ static noinline int walk_down_log_tree(struct btrfs_trans_handle *trans,
1766 free_extent_buffer(next); 1772 free_extent_buffer(next);
1767 continue; 1773 continue;
1768 } 1774 }
1769 btrfs_read_buffer(next, ptr_gen); 1775 ret = btrfs_read_buffer(next, ptr_gen);
1776 if (ret) {
1777 free_extent_buffer(next);
1778 return ret;
1779 }
1770 1780
1771 WARN_ON(*level <= 0); 1781 WARN_ON(*level <= 0);
1772 if (path->nodes[*level-1]) 1782 if (path->nodes[*level-1])
@@ -2657,6 +2667,8 @@ static int drop_objectid_items(struct btrfs_trans_handle *trans,
2657 btrfs_release_path(path); 2667 btrfs_release_path(path);
2658 } 2668 }
2659 btrfs_release_path(path); 2669 btrfs_release_path(path);
2670 if (ret > 0)
2671 ret = 0;
2660 return ret; 2672 return ret;
2661} 2673}
2662 2674
@@ -3028,21 +3040,6 @@ out:
3028 return ret; 3040 return ret;
3029} 3041}
3030 3042
3031static int inode_in_log(struct btrfs_trans_handle *trans,
3032 struct inode *inode)
3033{
3034 struct btrfs_root *root = BTRFS_I(inode)->root;
3035 int ret = 0;
3036
3037 mutex_lock(&root->log_mutex);
3038 if (BTRFS_I(inode)->logged_trans == trans->transid &&
3039 BTRFS_I(inode)->last_sub_trans <= root->last_log_commit)
3040 ret = 1;
3041 mutex_unlock(&root->log_mutex);
3042 return ret;
3043}
3044
3045
3046/* 3043/*
3047 * helper function around btrfs_log_inode to make sure newly created 3044 * helper function around btrfs_log_inode to make sure newly created
3048 * parent directories also end up in the log. A minimal inode and backref 3045 * parent directories also end up in the log. A minimal inode and backref
@@ -3083,7 +3080,7 @@ int btrfs_log_inode_parent(struct btrfs_trans_handle *trans,
3083 if (ret) 3080 if (ret)
3084 goto end_no_trans; 3081 goto end_no_trans;
3085 3082
3086 if (inode_in_log(trans, inode)) { 3083 if (btrfs_inode_in_log(inode, trans->transid)) {
3087 ret = BTRFS_NO_LOG_SYNC; 3084 ret = BTRFS_NO_LOG_SYNC;
3088 goto end_no_trans; 3085 goto end_no_trans;
3089 } 3086 }
diff --git a/fs/btrfs/ulist.c b/fs/btrfs/ulist.c
index 12f5147bd2b1..ab942f46b3dd 100644
--- a/fs/btrfs/ulist.c
+++ b/fs/btrfs/ulist.c
@@ -23,9 +23,9 @@
23 * 23 *
24 * ulist = ulist_alloc(); 24 * ulist = ulist_alloc();
25 * ulist_add(ulist, root); 25 * ulist_add(ulist, root);
26 * elem = NULL; 26 * ULIST_ITER_INIT(&uiter);
27 * 27 *
28 * while ((elem = ulist_next(ulist, elem)) { 28 * while ((elem = ulist_next(ulist, &uiter)) {
29 * for (all child nodes n in elem) 29 * for (all child nodes n in elem)
30 * ulist_add(ulist, n); 30 * ulist_add(ulist, n);
31 * do something useful with the node; 31 * do something useful with the node;
@@ -95,7 +95,7 @@ EXPORT_SYMBOL(ulist_reinit);
95 * 95 *
96 * The allocated ulist will be returned in an initialized state. 96 * The allocated ulist will be returned in an initialized state.
97 */ 97 */
98struct ulist *ulist_alloc(unsigned long gfp_mask) 98struct ulist *ulist_alloc(gfp_t gfp_mask)
99{ 99{
100 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask); 100 struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
101 101
@@ -144,13 +144,22 @@ EXPORT_SYMBOL(ulist_free);
144 * unaltered. 144 * unaltered.
145 */ 145 */
146int ulist_add(struct ulist *ulist, u64 val, unsigned long aux, 146int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
147 unsigned long gfp_mask) 147 gfp_t gfp_mask)
148{
149 return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
150}
151
152int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux,
153 unsigned long *old_aux, gfp_t gfp_mask)
148{ 154{
149 int i; 155 int i;
150 156
151 for (i = 0; i < ulist->nnodes; ++i) { 157 for (i = 0; i < ulist->nnodes; ++i) {
152 if (ulist->nodes[i].val == val) 158 if (ulist->nodes[i].val == val) {
159 if (old_aux)
160 *old_aux = ulist->nodes[i].aux;
153 return 0; 161 return 0;
162 }
154 } 163 }
155 164
156 if (ulist->nnodes >= ulist->nodes_alloced) { 165 if (ulist->nnodes >= ulist->nodes_alloced) {
@@ -188,33 +197,26 @@ EXPORT_SYMBOL(ulist_add);
188/** 197/**
189 * ulist_next - iterate ulist 198 * ulist_next - iterate ulist
190 * @ulist: ulist to iterate 199 * @ulist: ulist to iterate
191 * @prev: previously returned element or %NULL to start iteration 200 * @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator)
192 * 201 *
193 * Note: locking must be provided by the caller. In case of rwlocks only read 202 * Note: locking must be provided by the caller. In case of rwlocks only read
194 * locking is needed 203 * locking is needed
195 * 204 *
196 * This function is used to iterate an ulist. The iteration is started with 205 * This function is used to iterate an ulist.
197 * @prev = %NULL. It returns the next element from the ulist or %NULL when the 206 * It returns the next element from the ulist or %NULL when the
198 * end is reached. No guarantee is made with respect to the order in which 207 * end is reached. No guarantee is made with respect to the order in which
199 * the elements are returned. They might neither be returned in order of 208 * the elements are returned. They might neither be returned in order of
200 * addition nor in ascending order. 209 * addition nor in ascending order.
201 * It is allowed to call ulist_add during an enumeration. Newly added items 210 * It is allowed to call ulist_add during an enumeration. Newly added items
202 * are guaranteed to show up in the running enumeration. 211 * are guaranteed to show up in the running enumeration.
203 */ 212 */
204struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev) 213struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
205{ 214{
206 int next;
207
208 if (ulist->nnodes == 0) 215 if (ulist->nnodes == 0)
209 return NULL; 216 return NULL;
210 217 if (uiter->i < 0 || uiter->i >= ulist->nnodes)
211 if (!prev)
212 return &ulist->nodes[0];
213
214 next = (prev - ulist->nodes) + 1;
215 if (next < 0 || next >= ulist->nnodes)
216 return NULL; 218 return NULL;
217 219
218 return &ulist->nodes[next]; 220 return &ulist->nodes[uiter->i++];
219} 221}
220EXPORT_SYMBOL(ulist_next); 222EXPORT_SYMBOL(ulist_next);
diff --git a/fs/btrfs/ulist.h b/fs/btrfs/ulist.h
index 2e25dec58ec0..21bdc8ec8130 100644
--- a/fs/btrfs/ulist.h
+++ b/fs/btrfs/ulist.h
@@ -24,6 +24,10 @@
24 */ 24 */
25#define ULIST_SIZE 16 25#define ULIST_SIZE 16
26 26
27struct ulist_iterator {
28 int i;
29};
30
27/* 31/*
28 * element of the list 32 * element of the list
29 */ 33 */
@@ -59,10 +63,15 @@ struct ulist {
59void ulist_init(struct ulist *ulist); 63void ulist_init(struct ulist *ulist);
60void ulist_fini(struct ulist *ulist); 64void ulist_fini(struct ulist *ulist);
61void ulist_reinit(struct ulist *ulist); 65void ulist_reinit(struct ulist *ulist);
62struct ulist *ulist_alloc(unsigned long gfp_mask); 66struct ulist *ulist_alloc(gfp_t gfp_mask);
63void ulist_free(struct ulist *ulist); 67void ulist_free(struct ulist *ulist);
64int ulist_add(struct ulist *ulist, u64 val, unsigned long aux, 68int ulist_add(struct ulist *ulist, u64 val, unsigned long aux,
65 unsigned long gfp_mask); 69 gfp_t gfp_mask);
66struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_node *prev); 70int ulist_add_merge(struct ulist *ulist, u64 val, unsigned long aux,
71 unsigned long *old_aux, gfp_t gfp_mask);
72struct ulist_node *ulist_next(struct ulist *ulist,
73 struct ulist_iterator *uiter);
74
75#define ULIST_ITER_INIT(uiter) ((uiter)->i = 0)
67 76
68#endif 77#endif
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 1411b99555a4..8a3d2594b807 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -23,6 +23,7 @@
23#include <linux/random.h> 23#include <linux/random.h>
24#include <linux/iocontext.h> 24#include <linux/iocontext.h>
25#include <linux/capability.h> 25#include <linux/capability.h>
26#include <linux/ratelimit.h>
26#include <linux/kthread.h> 27#include <linux/kthread.h>
27#include <asm/div64.h> 28#include <asm/div64.h>
28#include "compat.h" 29#include "compat.h"
@@ -34,11 +35,14 @@
34#include "volumes.h" 35#include "volumes.h"
35#include "async-thread.h" 36#include "async-thread.h"
36#include "check-integrity.h" 37#include "check-integrity.h"
38#include "rcu-string.h"
37 39
38static int init_first_rw_device(struct btrfs_trans_handle *trans, 40static int init_first_rw_device(struct btrfs_trans_handle *trans,
39 struct btrfs_root *root, 41 struct btrfs_root *root,
40 struct btrfs_device *device); 42 struct btrfs_device *device);
41static int btrfs_relocate_sys_chunks(struct btrfs_root *root); 43static int btrfs_relocate_sys_chunks(struct btrfs_root *root);
44static void __btrfs_reset_dev_stats(struct btrfs_device *dev);
45static void btrfs_dev_stat_print_on_load(struct btrfs_device *device);
42 46
43static DEFINE_MUTEX(uuid_mutex); 47static DEFINE_MUTEX(uuid_mutex);
44static LIST_HEAD(fs_uuids); 48static LIST_HEAD(fs_uuids);
@@ -61,7 +65,7 @@ static void free_fs_devices(struct btrfs_fs_devices *fs_devices)
61 device = list_entry(fs_devices->devices.next, 65 device = list_entry(fs_devices->devices.next,
62 struct btrfs_device, dev_list); 66 struct btrfs_device, dev_list);
63 list_del(&device->dev_list); 67 list_del(&device->dev_list);
64 kfree(device->name); 68 rcu_string_free(device->name);
65 kfree(device); 69 kfree(device);
66 } 70 }
67 kfree(fs_devices); 71 kfree(fs_devices);
@@ -331,8 +335,8 @@ static noinline int device_list_add(const char *path,
331{ 335{
332 struct btrfs_device *device; 336 struct btrfs_device *device;
333 struct btrfs_fs_devices *fs_devices; 337 struct btrfs_fs_devices *fs_devices;
338 struct rcu_string *name;
334 u64 found_transid = btrfs_super_generation(disk_super); 339 u64 found_transid = btrfs_super_generation(disk_super);
335 char *name;
336 340
337 fs_devices = find_fsid(disk_super->fsid); 341 fs_devices = find_fsid(disk_super->fsid);
338 if (!fs_devices) { 342 if (!fs_devices) {
@@ -361,15 +365,18 @@ static noinline int device_list_add(const char *path,
361 return -ENOMEM; 365 return -ENOMEM;
362 } 366 }
363 device->devid = devid; 367 device->devid = devid;
368 device->dev_stats_valid = 0;
364 device->work.func = pending_bios_fn; 369 device->work.func = pending_bios_fn;
365 memcpy(device->uuid, disk_super->dev_item.uuid, 370 memcpy(device->uuid, disk_super->dev_item.uuid,
366 BTRFS_UUID_SIZE); 371 BTRFS_UUID_SIZE);
367 spin_lock_init(&device->io_lock); 372 spin_lock_init(&device->io_lock);
368 device->name = kstrdup(path, GFP_NOFS); 373
369 if (!device->name) { 374 name = rcu_string_strdup(path, GFP_NOFS);
375 if (!name) {
370 kfree(device); 376 kfree(device);
371 return -ENOMEM; 377 return -ENOMEM;
372 } 378 }
379 rcu_assign_pointer(device->name, name);
373 INIT_LIST_HEAD(&device->dev_alloc_list); 380 INIT_LIST_HEAD(&device->dev_alloc_list);
374 381
375 /* init readahead state */ 382 /* init readahead state */
@@ -386,12 +393,12 @@ static noinline int device_list_add(const char *path,
386 393
387 device->fs_devices = fs_devices; 394 device->fs_devices = fs_devices;
388 fs_devices->num_devices++; 395 fs_devices->num_devices++;
389 } else if (!device->name || strcmp(device->name, path)) { 396 } else if (!device->name || strcmp(device->name->str, path)) {
390 name = kstrdup(path, GFP_NOFS); 397 name = rcu_string_strdup(path, GFP_NOFS);
391 if (!name) 398 if (!name)
392 return -ENOMEM; 399 return -ENOMEM;
393 kfree(device->name); 400 rcu_string_free(device->name);
394 device->name = name; 401 rcu_assign_pointer(device->name, name);
395 if (device->missing) { 402 if (device->missing) {
396 fs_devices->missing_devices--; 403 fs_devices->missing_devices--;
397 device->missing = 0; 404 device->missing = 0;
@@ -426,15 +433,22 @@ static struct btrfs_fs_devices *clone_fs_devices(struct btrfs_fs_devices *orig)
426 433
427 /* We have held the volume lock, it is safe to get the devices. */ 434 /* We have held the volume lock, it is safe to get the devices. */
428 list_for_each_entry(orig_dev, &orig->devices, dev_list) { 435 list_for_each_entry(orig_dev, &orig->devices, dev_list) {
436 struct rcu_string *name;
437
429 device = kzalloc(sizeof(*device), GFP_NOFS); 438 device = kzalloc(sizeof(*device), GFP_NOFS);
430 if (!device) 439 if (!device)
431 goto error; 440 goto error;
432 441
433 device->name = kstrdup(orig_dev->name, GFP_NOFS); 442 /*
434 if (!device->name) { 443 * This is ok to do without rcu read locked because we hold the
444 * uuid mutex so nothing we touch in here is going to disappear.
445 */
446 name = rcu_string_strdup(orig_dev->name->str, GFP_NOFS);
447 if (!name) {
435 kfree(device); 448 kfree(device);
436 goto error; 449 goto error;
437 } 450 }
451 rcu_assign_pointer(device->name, name);
438 452
439 device->devid = orig_dev->devid; 453 device->devid = orig_dev->devid;
440 device->work.func = pending_bios_fn; 454 device->work.func = pending_bios_fn;
@@ -487,7 +501,7 @@ again:
487 } 501 }
488 list_del_init(&device->dev_list); 502 list_del_init(&device->dev_list);
489 fs_devices->num_devices--; 503 fs_devices->num_devices--;
490 kfree(device->name); 504 rcu_string_free(device->name);
491 kfree(device); 505 kfree(device);
492 } 506 }
493 507
@@ -512,7 +526,7 @@ static void __free_device(struct work_struct *work)
512 if (device->bdev) 526 if (device->bdev)
513 blkdev_put(device->bdev, device->mode); 527 blkdev_put(device->bdev, device->mode);
514 528
515 kfree(device->name); 529 rcu_string_free(device->name);
516 kfree(device); 530 kfree(device);
517} 531}
518 532
@@ -536,6 +550,7 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
536 mutex_lock(&fs_devices->device_list_mutex); 550 mutex_lock(&fs_devices->device_list_mutex);
537 list_for_each_entry(device, &fs_devices->devices, dev_list) { 551 list_for_each_entry(device, &fs_devices->devices, dev_list) {
538 struct btrfs_device *new_device; 552 struct btrfs_device *new_device;
553 struct rcu_string *name;
539 554
540 if (device->bdev) 555 if (device->bdev)
541 fs_devices->open_devices--; 556 fs_devices->open_devices--;
@@ -551,8 +566,11 @@ static int __btrfs_close_devices(struct btrfs_fs_devices *fs_devices)
551 new_device = kmalloc(sizeof(*new_device), GFP_NOFS); 566 new_device = kmalloc(sizeof(*new_device), GFP_NOFS);
552 BUG_ON(!new_device); /* -ENOMEM */ 567 BUG_ON(!new_device); /* -ENOMEM */
553 memcpy(new_device, device, sizeof(*new_device)); 568 memcpy(new_device, device, sizeof(*new_device));
554 new_device->name = kstrdup(device->name, GFP_NOFS); 569
555 BUG_ON(device->name && !new_device->name); /* -ENOMEM */ 570 /* Safe because we are under uuid_mutex */
571 name = rcu_string_strdup(device->name->str, GFP_NOFS);
572 BUG_ON(device->name && !name); /* -ENOMEM */
573 rcu_assign_pointer(new_device->name, name);
556 new_device->bdev = NULL; 574 new_device->bdev = NULL;
557 new_device->writeable = 0; 575 new_device->writeable = 0;
558 new_device->in_fs_metadata = 0; 576 new_device->in_fs_metadata = 0;
@@ -617,9 +635,9 @@ static int __btrfs_open_devices(struct btrfs_fs_devices *fs_devices,
617 if (!device->name) 635 if (!device->name)
618 continue; 636 continue;
619 637
620 bdev = blkdev_get_by_path(device->name, flags, holder); 638 bdev = blkdev_get_by_path(device->name->str, flags, holder);
621 if (IS_ERR(bdev)) { 639 if (IS_ERR(bdev)) {
622 printk(KERN_INFO "open %s failed\n", device->name); 640 printk(KERN_INFO "open %s failed\n", device->name->str);
623 goto error; 641 goto error;
624 } 642 }
625 filemap_write_and_wait(bdev->bd_inode->i_mapping); 643 filemap_write_and_wait(bdev->bd_inode->i_mapping);
@@ -1628,12 +1646,13 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1628 struct block_device *bdev; 1646 struct block_device *bdev;
1629 struct list_head *devices; 1647 struct list_head *devices;
1630 struct super_block *sb = root->fs_info->sb; 1648 struct super_block *sb = root->fs_info->sb;
1649 struct rcu_string *name;
1631 u64 total_bytes; 1650 u64 total_bytes;
1632 int seeding_dev = 0; 1651 int seeding_dev = 0;
1633 int ret = 0; 1652 int ret = 0;
1634 1653
1635 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding) 1654 if ((sb->s_flags & MS_RDONLY) && !root->fs_info->fs_devices->seeding)
1636 return -EINVAL; 1655 return -EROFS;
1637 1656
1638 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL, 1657 bdev = blkdev_get_by_path(device_path, FMODE_WRITE | FMODE_EXCL,
1639 root->fs_info->bdev_holder); 1658 root->fs_info->bdev_holder);
@@ -1667,23 +1686,24 @@ int btrfs_init_new_device(struct btrfs_root *root, char *device_path)
1667 goto error; 1686 goto error;
1668 } 1687 }
1669 1688
1670 device->name = kstrdup(device_path, GFP_NOFS); 1689 name = rcu_string_strdup(device_path, GFP_NOFS);
1671 if (!device->name) { 1690 if (!name) {
1672 kfree(device); 1691 kfree(device);
1673 ret = -ENOMEM; 1692 ret = -ENOMEM;
1674 goto error; 1693 goto error;
1675 } 1694 }
1695 rcu_assign_pointer(device->name, name);
1676 1696
1677 ret = find_next_devid(root, &device->devid); 1697 ret = find_next_devid(root, &device->devid);
1678 if (ret) { 1698 if (ret) {
1679 kfree(device->name); 1699 rcu_string_free(device->name);
1680 kfree(device); 1700 kfree(device);
1681 goto error; 1701 goto error;
1682 } 1702 }
1683 1703
1684 trans = btrfs_start_transaction(root, 0); 1704 trans = btrfs_start_transaction(root, 0);
1685 if (IS_ERR(trans)) { 1705 if (IS_ERR(trans)) {
1686 kfree(device->name); 1706 rcu_string_free(device->name);
1687 kfree(device); 1707 kfree(device);
1688 ret = PTR_ERR(trans); 1708 ret = PTR_ERR(trans);
1689 goto error; 1709 goto error;
@@ -1792,7 +1812,7 @@ error_trans:
1792 unlock_chunks(root); 1812 unlock_chunks(root);
1793 btrfs_abort_transaction(trans, root, ret); 1813 btrfs_abort_transaction(trans, root, ret);
1794 btrfs_end_transaction(trans, root); 1814 btrfs_end_transaction(trans, root);
1795 kfree(device->name); 1815 rcu_string_free(device->name);
1796 kfree(device); 1816 kfree(device);
1797error: 1817error:
1798 blkdev_put(bdev, FMODE_EXCL); 1818 blkdev_put(bdev, FMODE_EXCL);
@@ -4001,13 +4021,58 @@ int btrfs_rmap_block(struct btrfs_mapping_tree *map_tree,
4001 return 0; 4021 return 0;
4002} 4022}
4003 4023
4024static void *merge_stripe_index_into_bio_private(void *bi_private,
4025 unsigned int stripe_index)
4026{
4027 /*
4028 * with single, dup, RAID0, RAID1 and RAID10, stripe_index is
4029 * at most 1.
4030 * The alternative solution (instead of stealing bits from the
4031 * pointer) would be to allocate an intermediate structure
4032 * that contains the old private pointer plus the stripe_index.
4033 */
4034 BUG_ON((((uintptr_t)bi_private) & 3) != 0);
4035 BUG_ON(stripe_index > 3);
4036 return (void *)(((uintptr_t)bi_private) | stripe_index);
4037}
4038
4039static struct btrfs_bio *extract_bbio_from_bio_private(void *bi_private)
4040{
4041 return (struct btrfs_bio *)(((uintptr_t)bi_private) & ~((uintptr_t)3));
4042}
4043
4044static unsigned int extract_stripe_index_from_bio_private(void *bi_private)
4045{
4046 return (unsigned int)((uintptr_t)bi_private) & 3;
4047}
4048
4004static void btrfs_end_bio(struct bio *bio, int err) 4049static void btrfs_end_bio(struct bio *bio, int err)
4005{ 4050{
4006 struct btrfs_bio *bbio = bio->bi_private; 4051 struct btrfs_bio *bbio = extract_bbio_from_bio_private(bio->bi_private);
4007 int is_orig_bio = 0; 4052 int is_orig_bio = 0;
4008 4053
4009 if (err) 4054 if (err) {
4010 atomic_inc(&bbio->error); 4055 atomic_inc(&bbio->error);
4056 if (err == -EIO || err == -EREMOTEIO) {
4057 unsigned int stripe_index =
4058 extract_stripe_index_from_bio_private(
4059 bio->bi_private);
4060 struct btrfs_device *dev;
4061
4062 BUG_ON(stripe_index >= bbio->num_stripes);
4063 dev = bbio->stripes[stripe_index].dev;
4064 if (bio->bi_rw & WRITE)
4065 btrfs_dev_stat_inc(dev,
4066 BTRFS_DEV_STAT_WRITE_ERRS);
4067 else
4068 btrfs_dev_stat_inc(dev,
4069 BTRFS_DEV_STAT_READ_ERRS);
4070 if ((bio->bi_rw & WRITE_FLUSH) == WRITE_FLUSH)
4071 btrfs_dev_stat_inc(dev,
4072 BTRFS_DEV_STAT_FLUSH_ERRS);
4073 btrfs_dev_stat_print_on_error(dev);
4074 }
4075 }
4011 4076
4012 if (bio == bbio->orig_bio) 4077 if (bio == bbio->orig_bio)
4013 is_orig_bio = 1; 4078 is_orig_bio = 1;
@@ -4149,14 +4214,23 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
4149 bio = first_bio; 4214 bio = first_bio;
4150 } 4215 }
4151 bio->bi_private = bbio; 4216 bio->bi_private = bbio;
4217 bio->bi_private = merge_stripe_index_into_bio_private(
4218 bio->bi_private, (unsigned int)dev_nr);
4152 bio->bi_end_io = btrfs_end_bio; 4219 bio->bi_end_io = btrfs_end_bio;
4153 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9; 4220 bio->bi_sector = bbio->stripes[dev_nr].physical >> 9;
4154 dev = bbio->stripes[dev_nr].dev; 4221 dev = bbio->stripes[dev_nr].dev;
4155 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) { 4222 if (dev && dev->bdev && (rw != WRITE || dev->writeable)) {
4223#ifdef DEBUG
4224 struct rcu_string *name;
4225
4226 rcu_read_lock();
4227 name = rcu_dereference(dev->name);
4156 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu " 4228 pr_debug("btrfs_map_bio: rw %d, secor=%llu, dev=%lu "
4157 "(%s id %llu), size=%u\n", rw, 4229 "(%s id %llu), size=%u\n", rw,
4158 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev, 4230 (u64)bio->bi_sector, (u_long)dev->bdev->bd_dev,
4159 dev->name, dev->devid, bio->bi_size); 4231 name->str, dev->devid, bio->bi_size);
4232 rcu_read_unlock();
4233#endif
4160 bio->bi_bdev = dev->bdev; 4234 bio->bi_bdev = dev->bdev;
4161 if (async_submit) 4235 if (async_submit)
4162 schedule_bio(root, dev, rw, bio); 4236 schedule_bio(root, dev, rw, bio);
@@ -4509,6 +4583,28 @@ int btrfs_read_sys_array(struct btrfs_root *root)
4509 return ret; 4583 return ret;
4510} 4584}
4511 4585
4586struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
4587 u64 logical, int mirror_num)
4588{
4589 struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
4590 int ret;
4591 u64 map_length = 0;
4592 struct btrfs_bio *bbio = NULL;
4593 struct btrfs_device *device;
4594
4595 BUG_ON(mirror_num == 0);
4596 ret = btrfs_map_block(map_tree, WRITE, logical, &map_length, &bbio,
4597 mirror_num);
4598 if (ret) {
4599 BUG_ON(bbio != NULL);
4600 return NULL;
4601 }
4602 BUG_ON(mirror_num != bbio->mirror_num);
4603 device = bbio->stripes[mirror_num - 1].dev;
4604 kfree(bbio);
4605 return device;
4606}
4607
4512int btrfs_read_chunk_tree(struct btrfs_root *root) 4608int btrfs_read_chunk_tree(struct btrfs_root *root)
4513{ 4609{
4514 struct btrfs_path *path; 4610 struct btrfs_path *path;
@@ -4583,3 +4679,231 @@ error:
4583 btrfs_free_path(path); 4679 btrfs_free_path(path);
4584 return ret; 4680 return ret;
4585} 4681}
4682
4683static void __btrfs_reset_dev_stats(struct btrfs_device *dev)
4684{
4685 int i;
4686
4687 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4688 btrfs_dev_stat_reset(dev, i);
4689}
4690
4691int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info)
4692{
4693 struct btrfs_key key;
4694 struct btrfs_key found_key;
4695 struct btrfs_root *dev_root = fs_info->dev_root;
4696 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4697 struct extent_buffer *eb;
4698 int slot;
4699 int ret = 0;
4700 struct btrfs_device *device;
4701 struct btrfs_path *path = NULL;
4702 int i;
4703
4704 path = btrfs_alloc_path();
4705 if (!path) {
4706 ret = -ENOMEM;
4707 goto out;
4708 }
4709
4710 mutex_lock(&fs_devices->device_list_mutex);
4711 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4712 int item_size;
4713 struct btrfs_dev_stats_item *ptr;
4714
4715 key.objectid = 0;
4716 key.type = BTRFS_DEV_STATS_KEY;
4717 key.offset = device->devid;
4718 ret = btrfs_search_slot(NULL, dev_root, &key, path, 0, 0);
4719 if (ret) {
4720 printk_in_rcu(KERN_WARNING "btrfs: no dev_stats entry found for device %s (devid %llu) (OK on first mount after mkfs)\n",
4721 rcu_str_deref(device->name),
4722 (unsigned long long)device->devid);
4723 __btrfs_reset_dev_stats(device);
4724 device->dev_stats_valid = 1;
4725 btrfs_release_path(path);
4726 continue;
4727 }
4728 slot = path->slots[0];
4729 eb = path->nodes[0];
4730 btrfs_item_key_to_cpu(eb, &found_key, slot);
4731 item_size = btrfs_item_size_nr(eb, slot);
4732
4733 ptr = btrfs_item_ptr(eb, slot,
4734 struct btrfs_dev_stats_item);
4735
4736 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4737 if (item_size >= (1 + i) * sizeof(__le64))
4738 btrfs_dev_stat_set(device, i,
4739 btrfs_dev_stats_value(eb, ptr, i));
4740 else
4741 btrfs_dev_stat_reset(device, i);
4742 }
4743
4744 device->dev_stats_valid = 1;
4745 btrfs_dev_stat_print_on_load(device);
4746 btrfs_release_path(path);
4747 }
4748 mutex_unlock(&fs_devices->device_list_mutex);
4749
4750out:
4751 btrfs_free_path(path);
4752 return ret < 0 ? ret : 0;
4753}
4754
4755static int update_dev_stat_item(struct btrfs_trans_handle *trans,
4756 struct btrfs_root *dev_root,
4757 struct btrfs_device *device)
4758{
4759 struct btrfs_path *path;
4760 struct btrfs_key key;
4761 struct extent_buffer *eb;
4762 struct btrfs_dev_stats_item *ptr;
4763 int ret;
4764 int i;
4765
4766 key.objectid = 0;
4767 key.type = BTRFS_DEV_STATS_KEY;
4768 key.offset = device->devid;
4769
4770 path = btrfs_alloc_path();
4771 BUG_ON(!path);
4772 ret = btrfs_search_slot(trans, dev_root, &key, path, -1, 1);
4773 if (ret < 0) {
4774 printk_in_rcu(KERN_WARNING "btrfs: error %d while searching for dev_stats item for device %s!\n",
4775 ret, rcu_str_deref(device->name));
4776 goto out;
4777 }
4778
4779 if (ret == 0 &&
4780 btrfs_item_size_nr(path->nodes[0], path->slots[0]) < sizeof(*ptr)) {
4781 /* need to delete old one and insert a new one */
4782 ret = btrfs_del_item(trans, dev_root, path);
4783 if (ret != 0) {
4784 printk_in_rcu(KERN_WARNING "btrfs: delete too small dev_stats item for device %s failed %d!\n",
4785 rcu_str_deref(device->name), ret);
4786 goto out;
4787 }
4788 ret = 1;
4789 }
4790
4791 if (ret == 1) {
4792 /* need to insert a new item */
4793 btrfs_release_path(path);
4794 ret = btrfs_insert_empty_item(trans, dev_root, path,
4795 &key, sizeof(*ptr));
4796 if (ret < 0) {
4797 printk_in_rcu(KERN_WARNING "btrfs: insert dev_stats item for device %s failed %d!\n",
4798 rcu_str_deref(device->name), ret);
4799 goto out;
4800 }
4801 }
4802
4803 eb = path->nodes[0];
4804 ptr = btrfs_item_ptr(eb, path->slots[0], struct btrfs_dev_stats_item);
4805 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4806 btrfs_set_dev_stats_value(eb, ptr, i,
4807 btrfs_dev_stat_read(device, i));
4808 btrfs_mark_buffer_dirty(eb);
4809
4810out:
4811 btrfs_free_path(path);
4812 return ret;
4813}
4814
4815/*
4816 * called from commit_transaction. Writes all changed device stats to disk.
4817 */
4818int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
4819 struct btrfs_fs_info *fs_info)
4820{
4821 struct btrfs_root *dev_root = fs_info->dev_root;
4822 struct btrfs_fs_devices *fs_devices = fs_info->fs_devices;
4823 struct btrfs_device *device;
4824 int ret = 0;
4825
4826 mutex_lock(&fs_devices->device_list_mutex);
4827 list_for_each_entry(device, &fs_devices->devices, dev_list) {
4828 if (!device->dev_stats_valid || !device->dev_stats_dirty)
4829 continue;
4830
4831 ret = update_dev_stat_item(trans, dev_root, device);
4832 if (!ret)
4833 device->dev_stats_dirty = 0;
4834 }
4835 mutex_unlock(&fs_devices->device_list_mutex);
4836
4837 return ret;
4838}
4839
4840void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index)
4841{
4842 btrfs_dev_stat_inc(dev, index);
4843 btrfs_dev_stat_print_on_error(dev);
4844}
4845
4846void btrfs_dev_stat_print_on_error(struct btrfs_device *dev)
4847{
4848 if (!dev->dev_stats_valid)
4849 return;
4850 printk_ratelimited_in_rcu(KERN_ERR
4851 "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4852 rcu_str_deref(dev->name),
4853 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4854 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4855 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4856 btrfs_dev_stat_read(dev,
4857 BTRFS_DEV_STAT_CORRUPTION_ERRS),
4858 btrfs_dev_stat_read(dev,
4859 BTRFS_DEV_STAT_GENERATION_ERRS));
4860}
4861
4862static void btrfs_dev_stat_print_on_load(struct btrfs_device *dev)
4863{
4864 printk_in_rcu(KERN_INFO "btrfs: bdev %s errs: wr %u, rd %u, flush %u, corrupt %u, gen %u\n",
4865 rcu_str_deref(dev->name),
4866 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_WRITE_ERRS),
4867 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_READ_ERRS),
4868 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_FLUSH_ERRS),
4869 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_CORRUPTION_ERRS),
4870 btrfs_dev_stat_read(dev, BTRFS_DEV_STAT_GENERATION_ERRS));
4871}
4872
4873int btrfs_get_dev_stats(struct btrfs_root *root,
4874 struct btrfs_ioctl_get_dev_stats *stats,
4875 int reset_after_read)
4876{
4877 struct btrfs_device *dev;
4878 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
4879 int i;
4880
4881 mutex_lock(&fs_devices->device_list_mutex);
4882 dev = btrfs_find_device(root, stats->devid, NULL, NULL);
4883 mutex_unlock(&fs_devices->device_list_mutex);
4884
4885 if (!dev) {
4886 printk(KERN_WARNING
4887 "btrfs: get dev_stats failed, device not found\n");
4888 return -ENODEV;
4889 } else if (!dev->dev_stats_valid) {
4890 printk(KERN_WARNING
4891 "btrfs: get dev_stats failed, not yet valid\n");
4892 return -ENODEV;
4893 } else if (reset_after_read) {
4894 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++) {
4895 if (stats->nr_items > i)
4896 stats->values[i] =
4897 btrfs_dev_stat_read_and_reset(dev, i);
4898 else
4899 btrfs_dev_stat_reset(dev, i);
4900 }
4901 } else {
4902 for (i = 0; i < BTRFS_DEV_STAT_VALUES_MAX; i++)
4903 if (stats->nr_items > i)
4904 stats->values[i] = btrfs_dev_stat_read(dev, i);
4905 }
4906 if (stats->nr_items > BTRFS_DEV_STAT_VALUES_MAX)
4907 stats->nr_items = BTRFS_DEV_STAT_VALUES_MAX;
4908 return 0;
4909}
diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h
index bb6b03f97aaa..74366f27a76b 100644
--- a/fs/btrfs/volumes.h
+++ b/fs/btrfs/volumes.h
@@ -22,6 +22,7 @@
22#include <linux/bio.h> 22#include <linux/bio.h>
23#include <linux/sort.h> 23#include <linux/sort.h>
24#include "async-thread.h" 24#include "async-thread.h"
25#include "ioctl.h"
25 26
26#define BTRFS_STRIPE_LEN (64 * 1024) 27#define BTRFS_STRIPE_LEN (64 * 1024)
27 28
@@ -57,7 +58,7 @@ struct btrfs_device {
57 /* the mode sent to blkdev_get */ 58 /* the mode sent to blkdev_get */
58 fmode_t mode; 59 fmode_t mode;
59 60
60 char *name; 61 struct rcu_string *name;
61 62
62 /* the internal btrfs device id */ 63 /* the internal btrfs device id */
63 u64 devid; 64 u64 devid;
@@ -106,6 +107,11 @@ struct btrfs_device {
106 struct completion flush_wait; 107 struct completion flush_wait;
107 int nobarriers; 108 int nobarriers;
108 109
110 /* disk I/O failure stats. For detailed description refer to
111 * enum btrfs_dev_stat_values in ioctl.h */
112 int dev_stats_valid;
113 int dev_stats_dirty; /* counters need to be written to disk */
114 atomic_t dev_stat_values[BTRFS_DEV_STAT_VALUES_MAX];
109}; 115};
110 116
111struct btrfs_fs_devices { 117struct btrfs_fs_devices {
@@ -281,4 +287,50 @@ int btrfs_cancel_balance(struct btrfs_fs_info *fs_info);
281int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset); 287int btrfs_chunk_readonly(struct btrfs_root *root, u64 chunk_offset);
282int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes, 288int find_free_dev_extent(struct btrfs_device *device, u64 num_bytes,
283 u64 *start, u64 *max_avail); 289 u64 *start, u64 *max_avail);
290struct btrfs_device *btrfs_find_device_for_logical(struct btrfs_root *root,
291 u64 logical, int mirror_num);
292void btrfs_dev_stat_print_on_error(struct btrfs_device *device);
293void btrfs_dev_stat_inc_and_print(struct btrfs_device *dev, int index);
294int btrfs_get_dev_stats(struct btrfs_root *root,
295 struct btrfs_ioctl_get_dev_stats *stats,
296 int reset_after_read);
297int btrfs_init_dev_stats(struct btrfs_fs_info *fs_info);
298int btrfs_run_dev_stats(struct btrfs_trans_handle *trans,
299 struct btrfs_fs_info *fs_info);
300
301static inline void btrfs_dev_stat_inc(struct btrfs_device *dev,
302 int index)
303{
304 atomic_inc(dev->dev_stat_values + index);
305 dev->dev_stats_dirty = 1;
306}
307
308static inline int btrfs_dev_stat_read(struct btrfs_device *dev,
309 int index)
310{
311 return atomic_read(dev->dev_stat_values + index);
312}
313
314static inline int btrfs_dev_stat_read_and_reset(struct btrfs_device *dev,
315 int index)
316{
317 int ret;
318
319 ret = atomic_xchg(dev->dev_stat_values + index, 0);
320 dev->dev_stats_dirty = 1;
321 return ret;
322}
323
324static inline void btrfs_dev_stat_set(struct btrfs_device *dev,
325 int index, unsigned long val)
326{
327 atomic_set(dev->dev_stat_values + index, val);
328 dev->dev_stats_dirty = 1;
329}
330
331static inline void btrfs_dev_stat_reset(struct btrfs_device *dev,
332 int index)
333{
334 btrfs_dev_stat_set(dev, index, 0);
335}
284#endif 336#endif
diff --git a/fs/btrfs/xattr.c b/fs/btrfs/xattr.c
index e7a5659087e6..3f4e2d69e83a 100644
--- a/fs/btrfs/xattr.c
+++ b/fs/btrfs/xattr.c
@@ -196,6 +196,7 @@ int __btrfs_setxattr(struct btrfs_trans_handle *trans,
196 if (ret) 196 if (ret)
197 goto out; 197 goto out;
198 198
199 inode_inc_iversion(inode);
199 inode->i_ctime = CURRENT_TIME; 200 inode->i_ctime = CURRENT_TIME;
200 ret = btrfs_update_inode(trans, root, inode); 201 ret = btrfs_update_inode(trans, root, inode);
201 BUG_ON(ret); 202 BUG_ON(ret);
diff --git a/fs/buffer.c b/fs/buffer.c
index ad5938ca357c..838a9cf246bd 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -3152,7 +3152,7 @@ SYSCALL_DEFINE2(bdflush, int, func, long, data)
3152/* 3152/*
3153 * Buffer-head allocation 3153 * Buffer-head allocation
3154 */ 3154 */
3155static struct kmem_cache *bh_cachep; 3155static struct kmem_cache *bh_cachep __read_mostly;
3156 3156
3157/* 3157/*
3158 * Once the number of bh's in the machine exceeds this level, we start 3158 * Once the number of bh's in the machine exceeds this level, we start
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 173b1d22e59b..8b67304e4b80 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -54,7 +54,12 @@
54 (CONGESTION_ON_THRESH(congestion_kb) - \ 54 (CONGESTION_ON_THRESH(congestion_kb) - \
55 (CONGESTION_ON_THRESH(congestion_kb) >> 2)) 55 (CONGESTION_ON_THRESH(congestion_kb) >> 2))
56 56
57 57static inline struct ceph_snap_context *page_snap_context(struct page *page)
58{
59 if (PagePrivate(page))
60 return (void *)page->private;
61 return NULL;
62}
58 63
59/* 64/*
60 * Dirty a page. Optimistically adjust accounting, on the assumption 65 * Dirty a page. Optimistically adjust accounting, on the assumption
@@ -142,10 +147,9 @@ static void ceph_invalidatepage(struct page *page, unsigned long offset)
142{ 147{
143 struct inode *inode; 148 struct inode *inode;
144 struct ceph_inode_info *ci; 149 struct ceph_inode_info *ci;
145 struct ceph_snap_context *snapc = (void *)page->private; 150 struct ceph_snap_context *snapc = page_snap_context(page);
146 151
147 BUG_ON(!PageLocked(page)); 152 BUG_ON(!PageLocked(page));
148 BUG_ON(!page->private);
149 BUG_ON(!PagePrivate(page)); 153 BUG_ON(!PagePrivate(page));
150 BUG_ON(!page->mapping); 154 BUG_ON(!page->mapping);
151 155
@@ -182,7 +186,6 @@ static int ceph_releasepage(struct page *page, gfp_t g)
182 struct inode *inode = page->mapping ? page->mapping->host : NULL; 186 struct inode *inode = page->mapping ? page->mapping->host : NULL;
183 dout("%p releasepage %p idx %lu\n", inode, page, page->index); 187 dout("%p releasepage %p idx %lu\n", inode, page, page->index);
184 WARN_ON(PageDirty(page)); 188 WARN_ON(PageDirty(page));
185 WARN_ON(page->private);
186 WARN_ON(PagePrivate(page)); 189 WARN_ON(PagePrivate(page));
187 return 0; 190 return 0;
188} 191}
@@ -443,7 +446,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
443 osdc = &fsc->client->osdc; 446 osdc = &fsc->client->osdc;
444 447
445 /* verify this is a writeable snap context */ 448 /* verify this is a writeable snap context */
446 snapc = (void *)page->private; 449 snapc = page_snap_context(page);
447 if (snapc == NULL) { 450 if (snapc == NULL) {
448 dout("writepage %p page %p not dirty?\n", inode, page); 451 dout("writepage %p page %p not dirty?\n", inode, page);
449 goto out; 452 goto out;
@@ -451,7 +454,7 @@ static int writepage_nounlock(struct page *page, struct writeback_control *wbc)
451 oldest = get_oldest_context(inode, &snap_size); 454 oldest = get_oldest_context(inode, &snap_size);
452 if (snapc->seq > oldest->seq) { 455 if (snapc->seq > oldest->seq) {
453 dout("writepage %p page %p snapc %p not writeable - noop\n", 456 dout("writepage %p page %p snapc %p not writeable - noop\n",
454 inode, page, (void *)page->private); 457 inode, page, snapc);
455 /* we should only noop if called by kswapd */ 458 /* we should only noop if called by kswapd */
456 WARN_ON((current->flags & PF_MEMALLOC) == 0); 459 WARN_ON((current->flags & PF_MEMALLOC) == 0);
457 ceph_put_snap_context(oldest); 460 ceph_put_snap_context(oldest);
@@ -591,7 +594,7 @@ static void writepages_finish(struct ceph_osd_request *req,
591 clear_bdi_congested(&fsc->backing_dev_info, 594 clear_bdi_congested(&fsc->backing_dev_info,
592 BLK_RW_ASYNC); 595 BLK_RW_ASYNC);
593 596
594 ceph_put_snap_context((void *)page->private); 597 ceph_put_snap_context(page_snap_context(page));
595 page->private = 0; 598 page->private = 0;
596 ClearPagePrivate(page); 599 ClearPagePrivate(page);
597 dout("unlocking %d %p\n", i, page); 600 dout("unlocking %d %p\n", i, page);
@@ -795,7 +798,7 @@ get_more_pages:
795 } 798 }
796 799
797 /* only if matching snap context */ 800 /* only if matching snap context */
798 pgsnapc = (void *)page->private; 801 pgsnapc = page_snap_context(page);
799 if (pgsnapc->seq > snapc->seq) { 802 if (pgsnapc->seq > snapc->seq) {
800 dout("page snapc %p %lld > oldest %p %lld\n", 803 dout("page snapc %p %lld > oldest %p %lld\n",
801 pgsnapc, pgsnapc->seq, snapc, snapc->seq); 804 pgsnapc, pgsnapc->seq, snapc, snapc->seq);
@@ -984,7 +987,7 @@ retry_locked:
984 BUG_ON(!ci->i_snap_realm); 987 BUG_ON(!ci->i_snap_realm);
985 down_read(&mdsc->snap_rwsem); 988 down_read(&mdsc->snap_rwsem);
986 BUG_ON(!ci->i_snap_realm->cached_context); 989 BUG_ON(!ci->i_snap_realm->cached_context);
987 snapc = (void *)page->private; 990 snapc = page_snap_context(page);
988 if (snapc && snapc != ci->i_head_snapc) { 991 if (snapc && snapc != ci->i_head_snapc) {
989 /* 992 /*
990 * this page is already dirty in another (older) snap 993 * this page is already dirty in another (older) snap
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index fbb2a643ef10..8e1b60e557b6 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -40,38 +40,49 @@ struct ceph_nfs_confh {
40 u32 parent_name_hash; 40 u32 parent_name_hash;
41} __attribute__ ((packed)); 41} __attribute__ ((packed));
42 42
43static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len, 43/*
44 int connectable) 44 * The presence of @parent_inode here tells us whether NFS wants a
45 * connectable file handle. However, we want to make a connectionable
46 * file handle unconditionally so that the MDS gets as much of a hint
47 * as possible. That means we only use @parent_dentry to indicate
48 * whether nfsd wants a connectable fh, and whether we should indicate
49 * failure from a too-small @max_len.
50 */
51static int ceph_encode_fh(struct inode *inode, u32 *rawfh, int *max_len,
52 struct inode *parent_inode)
45{ 53{
46 int type; 54 int type;
47 struct ceph_nfs_fh *fh = (void *)rawfh; 55 struct ceph_nfs_fh *fh = (void *)rawfh;
48 struct ceph_nfs_confh *cfh = (void *)rawfh; 56 struct ceph_nfs_confh *cfh = (void *)rawfh;
49 struct dentry *parent;
50 struct inode *inode = dentry->d_inode;
51 int connected_handle_length = sizeof(*cfh)/4; 57 int connected_handle_length = sizeof(*cfh)/4;
52 int handle_length = sizeof(*fh)/4; 58 int handle_length = sizeof(*fh)/4;
59 struct dentry *dentry = d_find_alias(inode);
60 struct dentry *parent;
53 61
54 /* don't re-export snaps */ 62 /* don't re-export snaps */
55 if (ceph_snap(inode) != CEPH_NOSNAP) 63 if (ceph_snap(inode) != CEPH_NOSNAP)
56 return -EINVAL; 64 return -EINVAL;
57 65
58 spin_lock(&dentry->d_lock); 66 /* if we found an alias, generate a connectable fh */
59 parent = dentry->d_parent; 67 if (*max_len >= connected_handle_length && dentry) {
60 if (*max_len >= connected_handle_length) {
61 dout("encode_fh %p connectable\n", dentry); 68 dout("encode_fh %p connectable\n", dentry);
62 cfh->ino = ceph_ino(dentry->d_inode); 69 spin_lock(&dentry->d_lock);
70 parent = dentry->d_parent;
71 cfh->ino = ceph_ino(inode);
63 cfh->parent_ino = ceph_ino(parent->d_inode); 72 cfh->parent_ino = ceph_ino(parent->d_inode);
64 cfh->parent_name_hash = ceph_dentry_hash(parent->d_inode, 73 cfh->parent_name_hash = ceph_dentry_hash(parent->d_inode,
65 dentry); 74 dentry);
66 *max_len = connected_handle_length; 75 *max_len = connected_handle_length;
67 type = 2; 76 type = 2;
77 spin_unlock(&dentry->d_lock);
68 } else if (*max_len >= handle_length) { 78 } else if (*max_len >= handle_length) {
69 if (connectable) { 79 if (parent_inode) {
80 /* nfsd wants connectable */
70 *max_len = connected_handle_length; 81 *max_len = connected_handle_length;
71 type = 255; 82 type = 255;
72 } else { 83 } else {
73 dout("encode_fh %p\n", dentry); 84 dout("encode_fh %p\n", dentry);
74 fh->ino = ceph_ino(dentry->d_inode); 85 fh->ino = ceph_ino(inode);
75 *max_len = handle_length; 86 *max_len = handle_length;
76 type = 1; 87 type = 1;
77 } 88 }
@@ -79,7 +90,6 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len,
79 *max_len = handle_length; 90 *max_len = handle_length;
80 type = 255; 91 type = 255;
81 } 92 }
82 spin_unlock(&dentry->d_lock);
83 return type; 93 return type;
84} 94}
85 95
diff --git a/fs/ceph/snap.c b/fs/ceph/snap.c
index f04c0961f993..e5206fc76562 100644
--- a/fs/ceph/snap.c
+++ b/fs/ceph/snap.c
@@ -331,7 +331,7 @@ static int build_snap_context(struct ceph_snap_realm *realm)
331 331
332 /* alloc new snap context */ 332 /* alloc new snap context */
333 err = -ENOMEM; 333 err = -ENOMEM;
334 if (num > (ULONG_MAX - sizeof(*snapc)) / sizeof(u64)) 334 if (num > (SIZE_MAX - sizeof(*snapc)) / sizeof(u64))
335 goto fail; 335 goto fail;
336 snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS); 336 snapc = kzalloc(sizeof(*snapc) + num*sizeof(u64), GFP_NOFS);
337 if (!snapc) 337 if (!snapc)
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 20350a93ed99..6df0cbe1cbc9 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -174,6 +174,7 @@ struct smb_version_operations {
174 void (*add_credits)(struct TCP_Server_Info *, const unsigned int); 174 void (*add_credits)(struct TCP_Server_Info *, const unsigned int);
175 void (*set_credits)(struct TCP_Server_Info *, const int); 175 void (*set_credits)(struct TCP_Server_Info *, const int);
176 int * (*get_credits_field)(struct TCP_Server_Info *); 176 int * (*get_credits_field)(struct TCP_Server_Info *);
177 __u64 (*get_next_mid)(struct TCP_Server_Info *);
177 /* data offset from read response message */ 178 /* data offset from read response message */
178 unsigned int (*read_data_offset)(char *); 179 unsigned int (*read_data_offset)(char *);
179 /* data length from read response message */ 180 /* data length from read response message */
@@ -399,6 +400,12 @@ set_credits(struct TCP_Server_Info *server, const int val)
399 server->ops->set_credits(server, val); 400 server->ops->set_credits(server, val);
400} 401}
401 402
403static inline __u64
404get_next_mid(struct TCP_Server_Info *server)
405{
406 return server->ops->get_next_mid(server);
407}
408
402/* 409/*
403 * Macros to allow the TCP_Server_Info->net field and related code to drop out 410 * Macros to allow the TCP_Server_Info->net field and related code to drop out
404 * when CONFIG_NET_NS isn't set. 411 * when CONFIG_NET_NS isn't set.
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 5ec21ecf7980..0a6cbfe2761e 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -114,7 +114,6 @@ extern int small_smb_init_no_tc(const int smb_cmd, const int wct,
114 void **request_buf); 114 void **request_buf);
115extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses, 115extern int CIFS_SessSetup(unsigned int xid, struct cifs_ses *ses,
116 const struct nls_table *nls_cp); 116 const struct nls_table *nls_cp);
117extern __u64 GetNextMid(struct TCP_Server_Info *server);
118extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601); 117extern struct timespec cifs_NTtimeToUnix(__le64 utc_nanoseconds_since_1601);
119extern u64 cifs_UnixTimeToNT(struct timespec); 118extern u64 cifs_UnixTimeToNT(struct timespec);
120extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time, 119extern struct timespec cnvrtDosUnixTm(__le16 le_date, __le16 le_time,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index b5ad716b2642..5b400730c213 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -268,7 +268,7 @@ small_smb_init_no_tc(const int smb_command, const int wct,
268 return rc; 268 return rc;
269 269
270 buffer = (struct smb_hdr *)*request_buf; 270 buffer = (struct smb_hdr *)*request_buf;
271 buffer->Mid = GetNextMid(ses->server); 271 buffer->Mid = get_next_mid(ses->server);
272 if (ses->capabilities & CAP_UNICODE) 272 if (ses->capabilities & CAP_UNICODE)
273 buffer->Flags2 |= SMBFLG2_UNICODE; 273 buffer->Flags2 |= SMBFLG2_UNICODE;
274 if (ses->capabilities & CAP_STATUS32) 274 if (ses->capabilities & CAP_STATUS32)
@@ -402,7 +402,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
402 402
403 cFYI(1, "secFlags 0x%x", secFlags); 403 cFYI(1, "secFlags 0x%x", secFlags);
404 404
405 pSMB->hdr.Mid = GetNextMid(server); 405 pSMB->hdr.Mid = get_next_mid(server);
406 pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS); 406 pSMB->hdr.Flags2 |= (SMBFLG2_UNICODE | SMBFLG2_ERR_STATUS);
407 407
408 if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5) 408 if ((secFlags & CIFSSEC_MUST_KRB5) == CIFSSEC_MUST_KRB5)
@@ -782,7 +782,7 @@ CIFSSMBLogoff(const int xid, struct cifs_ses *ses)
782 return rc; 782 return rc;
783 } 783 }
784 784
785 pSMB->hdr.Mid = GetNextMid(ses->server); 785 pSMB->hdr.Mid = get_next_mid(ses->server);
786 786
787 if (ses->server->sec_mode & 787 if (ses->server->sec_mode &
788 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) 788 (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED))
@@ -4762,7 +4762,7 @@ getDFSRetry:
4762 4762
4763 /* server pointer checked in called function, 4763 /* server pointer checked in called function,
4764 but should never be null here anyway */ 4764 but should never be null here anyway */
4765 pSMB->hdr.Mid = GetNextMid(ses->server); 4765 pSMB->hdr.Mid = get_next_mid(ses->server);
4766 pSMB->hdr.Tid = ses->ipc_tid; 4766 pSMB->hdr.Tid = ses->ipc_tid;
4767 pSMB->hdr.Uid = ses->Suid; 4767 pSMB->hdr.Uid = ses->Suid;
4768 if (ses->capabilities & CAP_STATUS32) 4768 if (ses->capabilities & CAP_STATUS32)
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index ccafdedd0dbc..78db68a5cf44 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1058,13 +1058,15 @@ cifs_demultiplex_thread(void *p)
1058 if (mid_entry != NULL) { 1058 if (mid_entry != NULL) {
1059 if (!mid_entry->multiRsp || mid_entry->multiEnd) 1059 if (!mid_entry->multiRsp || mid_entry->multiEnd)
1060 mid_entry->callback(mid_entry); 1060 mid_entry->callback(mid_entry);
1061 } else if (!server->ops->is_oplock_break(buf, server)) { 1061 } else if (!server->ops->is_oplock_break ||
1062 !server->ops->is_oplock_break(buf, server)) {
1062 cERROR(1, "No task to wake, unknown frame received! " 1063 cERROR(1, "No task to wake, unknown frame received! "
1063 "NumMids %d", atomic_read(&midCount)); 1064 "NumMids %d", atomic_read(&midCount));
1064 cifs_dump_mem("Received Data is: ", buf, 1065 cifs_dump_mem("Received Data is: ", buf,
1065 HEADER_SIZE(server)); 1066 HEADER_SIZE(server));
1066#ifdef CONFIG_CIFS_DEBUG2 1067#ifdef CONFIG_CIFS_DEBUG2
1067 server->ops->dump_detail(buf); 1068 if (server->ops->dump_detail)
1069 server->ops->dump_detail(buf);
1068 cifs_dump_mids(server); 1070 cifs_dump_mids(server);
1069#endif /* CIFS_DEBUG2 */ 1071#endif /* CIFS_DEBUG2 */
1070 1072
@@ -3938,7 +3940,7 @@ CIFSTCon(unsigned int xid, struct cifs_ses *ses,
3938 header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX, 3940 header_assemble(smb_buffer, SMB_COM_TREE_CONNECT_ANDX,
3939 NULL /*no tid */ , 4 /*wct */ ); 3941 NULL /*no tid */ , 4 /*wct */ );
3940 3942
3941 smb_buffer->Mid = GetNextMid(ses->server); 3943 smb_buffer->Mid = get_next_mid(ses->server);
3942 smb_buffer->Uid = ses->Suid; 3944 smb_buffer->Uid = ses->Suid;
3943 pSMB = (TCONX_REQ *) smb_buffer; 3945 pSMB = (TCONX_REQ *) smb_buffer;
3944 pSMBr = (TCONX_RSP *) smb_buffer_response; 3946 pSMBr = (TCONX_RSP *) smb_buffer_response;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 253170dfa716..513adbc211d7 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -876,7 +876,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
876 struct cifsLockInfo *li, *tmp; 876 struct cifsLockInfo *li, *tmp;
877 struct cifs_tcon *tcon; 877 struct cifs_tcon *tcon;
878 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); 878 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
879 unsigned int num, max_num; 879 unsigned int num, max_num, max_buf;
880 LOCKING_ANDX_RANGE *buf, *cur; 880 LOCKING_ANDX_RANGE *buf, *cur;
881 int types[] = {LOCKING_ANDX_LARGE_FILES, 881 int types[] = {LOCKING_ANDX_LARGE_FILES,
882 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; 882 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
@@ -892,8 +892,19 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
892 return rc; 892 return rc;
893 } 893 }
894 894
895 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) / 895 /*
896 sizeof(LOCKING_ANDX_RANGE); 896 * Accessing maxBuf is racy with cifs_reconnect - need to store value
897 * and check it for zero before using.
898 */
899 max_buf = tcon->ses->server->maxBuf;
900 if (!max_buf) {
901 mutex_unlock(&cinode->lock_mutex);
902 FreeXid(xid);
903 return -EINVAL;
904 }
905
906 max_num = (max_buf - sizeof(struct smb_hdr)) /
907 sizeof(LOCKING_ANDX_RANGE);
897 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); 908 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
898 if (!buf) { 909 if (!buf) {
899 mutex_unlock(&cinode->lock_mutex); 910 mutex_unlock(&cinode->lock_mutex);
@@ -1218,7 +1229,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1218 int types[] = {LOCKING_ANDX_LARGE_FILES, 1229 int types[] = {LOCKING_ANDX_LARGE_FILES,
1219 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES}; 1230 LOCKING_ANDX_SHARED_LOCK | LOCKING_ANDX_LARGE_FILES};
1220 unsigned int i; 1231 unsigned int i;
1221 unsigned int max_num, num; 1232 unsigned int max_num, num, max_buf;
1222 LOCKING_ANDX_RANGE *buf, *cur; 1233 LOCKING_ANDX_RANGE *buf, *cur;
1223 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1234 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1224 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); 1235 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
@@ -1228,8 +1239,16 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1228 1239
1229 INIT_LIST_HEAD(&tmp_llist); 1240 INIT_LIST_HEAD(&tmp_llist);
1230 1241
1231 max_num = (tcon->ses->server->maxBuf - sizeof(struct smb_hdr)) / 1242 /*
1232 sizeof(LOCKING_ANDX_RANGE); 1243 * Accessing maxBuf is racy with cifs_reconnect - need to store value
1244 * and check it for zero before using.
1245 */
1246 max_buf = tcon->ses->server->maxBuf;
1247 if (!max_buf)
1248 return -EINVAL;
1249
1250 max_num = (max_buf - sizeof(struct smb_hdr)) /
1251 sizeof(LOCKING_ANDX_RANGE);
1233 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL); 1252 buf = kzalloc(max_num * sizeof(LOCKING_ANDX_RANGE), GFP_KERNEL);
1234 if (!buf) 1253 if (!buf)
1235 return -ENOMEM; 1254 return -ENOMEM;
@@ -1247,46 +1266,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1247 continue; 1266 continue;
1248 if (types[i] != li->type) 1267 if (types[i] != li->type)
1249 continue; 1268 continue;
1250 if (!cinode->can_cache_brlcks) { 1269 if (cinode->can_cache_brlcks) {
1251 cur->Pid = cpu_to_le16(li->pid);
1252 cur->LengthLow = cpu_to_le32((u32)li->length);
1253 cur->LengthHigh =
1254 cpu_to_le32((u32)(li->length>>32));
1255 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1256 cur->OffsetHigh =
1257 cpu_to_le32((u32)(li->offset>>32));
1258 /*
1259 * We need to save a lock here to let us add
1260 * it again to the file's list if the unlock
1261 * range request fails on the server.
1262 */
1263 list_move(&li->llist, &tmp_llist);
1264 if (++num == max_num) {
1265 stored_rc = cifs_lockv(xid, tcon,
1266 cfile->netfid,
1267 li->type, num,
1268 0, buf);
1269 if (stored_rc) {
1270 /*
1271 * We failed on the unlock range
1272 * request - add all locks from
1273 * the tmp list to the head of
1274 * the file's list.
1275 */
1276 cifs_move_llist(&tmp_llist,
1277 &cfile->llist);
1278 rc = stored_rc;
1279 } else
1280 /*
1281 * The unlock range request
1282 * succeed - free the tmp list.
1283 */
1284 cifs_free_llist(&tmp_llist);
1285 cur = buf;
1286 num = 0;
1287 } else
1288 cur++;
1289 } else {
1290 /* 1270 /*
1291 * We can cache brlock requests - simply remove 1271 * We can cache brlock requests - simply remove
1292 * a lock from the file's list. 1272 * a lock from the file's list.
@@ -1294,7 +1274,41 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1294 list_del(&li->llist); 1274 list_del(&li->llist);
1295 cifs_del_lock_waiters(li); 1275 cifs_del_lock_waiters(li);
1296 kfree(li); 1276 kfree(li);
1277 continue;
1297 } 1278 }
1279 cur->Pid = cpu_to_le16(li->pid);
1280 cur->LengthLow = cpu_to_le32((u32)li->length);
1281 cur->LengthHigh = cpu_to_le32((u32)(li->length>>32));
1282 cur->OffsetLow = cpu_to_le32((u32)li->offset);
1283 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
1284 /*
1285 * We need to save a lock here to let us add it again to
1286 * the file's list if the unlock range request fails on
1287 * the server.
1288 */
1289 list_move(&li->llist, &tmp_llist);
1290 if (++num == max_num) {
1291 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1292 li->type, num, 0, buf);
1293 if (stored_rc) {
1294 /*
1295 * We failed on the unlock range
1296 * request - add all locks from the tmp
1297 * list to the head of the file's list.
1298 */
1299 cifs_move_llist(&tmp_llist,
1300 &cfile->llist);
1301 rc = stored_rc;
1302 } else
1303 /*
1304 * The unlock range request succeed -
1305 * free the tmp list.
1306 */
1307 cifs_free_llist(&tmp_llist);
1308 cur = buf;
1309 num = 0;
1310 } else
1311 cur++;
1298 } 1312 }
1299 if (num) { 1313 if (num) {
1300 stored_rc = cifs_lockv(xid, tcon, cfile->netfid, 1314 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index e2552d2b2e42..557506ae1e2a 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -212,93 +212,6 @@ cifs_small_buf_release(void *buf_to_free)
212 return; 212 return;
213} 213}
214 214
215/*
216 * Find a free multiplex id (SMB mid). Otherwise there could be
217 * mid collisions which might cause problems, demultiplexing the
218 * wrong response to this request. Multiplex ids could collide if
219 * one of a series requests takes much longer than the others, or
220 * if a very large number of long lived requests (byte range
221 * locks or FindNotify requests) are pending. No more than
222 * 64K-1 requests can be outstanding at one time. If no
223 * mids are available, return zero. A future optimization
224 * could make the combination of mids and uid the key we use
225 * to demultiplex on (rather than mid alone).
226 * In addition to the above check, the cifs demultiplex
227 * code already used the command code as a secondary
228 * check of the frame and if signing is negotiated the
229 * response would be discarded if the mid were the same
230 * but the signature was wrong. Since the mid is not put in the
231 * pending queue until later (when it is about to be dispatched)
232 * we do have to limit the number of outstanding requests
233 * to somewhat less than 64K-1 although it is hard to imagine
234 * so many threads being in the vfs at one time.
235 */
236__u64 GetNextMid(struct TCP_Server_Info *server)
237{
238 __u64 mid = 0;
239 __u16 last_mid, cur_mid;
240 bool collision;
241
242 spin_lock(&GlobalMid_Lock);
243
244 /* mid is 16 bit only for CIFS/SMB */
245 cur_mid = (__u16)((server->CurrentMid) & 0xffff);
246 /* we do not want to loop forever */
247 last_mid = cur_mid;
248 cur_mid++;
249
250 /*
251 * This nested loop looks more expensive than it is.
252 * In practice the list of pending requests is short,
253 * fewer than 50, and the mids are likely to be unique
254 * on the first pass through the loop unless some request
255 * takes longer than the 64 thousand requests before it
256 * (and it would also have to have been a request that
257 * did not time out).
258 */
259 while (cur_mid != last_mid) {
260 struct mid_q_entry *mid_entry;
261 unsigned int num_mids;
262
263 collision = false;
264 if (cur_mid == 0)
265 cur_mid++;
266
267 num_mids = 0;
268 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
269 ++num_mids;
270 if (mid_entry->mid == cur_mid &&
271 mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
272 /* This mid is in use, try a different one */
273 collision = true;
274 break;
275 }
276 }
277
278 /*
279 * if we have more than 32k mids in the list, then something
280 * is very wrong. Possibly a local user is trying to DoS the
281 * box by issuing long-running calls and SIGKILL'ing them. If
282 * we get to 2^16 mids then we're in big trouble as this
283 * function could loop forever.
284 *
285 * Go ahead and assign out the mid in this situation, but force
286 * an eventual reconnect to clean out the pending_mid_q.
287 */
288 if (num_mids > 32768)
289 server->tcpStatus = CifsNeedReconnect;
290
291 if (!collision) {
292 mid = (__u64)cur_mid;
293 server->CurrentMid = mid;
294 break;
295 }
296 cur_mid++;
297 }
298 spin_unlock(&GlobalMid_Lock);
299 return mid;
300}
301
302/* NB: MID can not be set if treeCon not passed in, in that 215/* NB: MID can not be set if treeCon not passed in, in that
303 case it is responsbility of caller to set the mid */ 216 case it is responsbility of caller to set the mid */
304void 217void
@@ -334,7 +247,7 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
334 247
335 /* Uid is not converted */ 248 /* Uid is not converted */
336 buffer->Uid = treeCon->ses->Suid; 249 buffer->Uid = treeCon->ses->Suid;
337 buffer->Mid = GetNextMid(treeCon->ses->server); 250 buffer->Mid = get_next_mid(treeCon->ses->server);
338 } 251 }
339 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) 252 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
340 buffer->Flags2 |= SMBFLG2_DFS; 253 buffer->Flags2 |= SMBFLG2_DFS;
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
index d9d615fbed3f..6dec38f5522d 100644
--- a/fs/cifs/smb1ops.c
+++ b/fs/cifs/smb1ops.c
@@ -125,6 +125,94 @@ cifs_get_credits_field(struct TCP_Server_Info *server)
125 return &server->credits; 125 return &server->credits;
126} 126}
127 127
128/*
129 * Find a free multiplex id (SMB mid). Otherwise there could be
130 * mid collisions which might cause problems, demultiplexing the
131 * wrong response to this request. Multiplex ids could collide if
132 * one of a series requests takes much longer than the others, or
133 * if a very large number of long lived requests (byte range
134 * locks or FindNotify requests) are pending. No more than
135 * 64K-1 requests can be outstanding at one time. If no
136 * mids are available, return zero. A future optimization
137 * could make the combination of mids and uid the key we use
138 * to demultiplex on (rather than mid alone).
139 * In addition to the above check, the cifs demultiplex
140 * code already used the command code as a secondary
141 * check of the frame and if signing is negotiated the
142 * response would be discarded if the mid were the same
143 * but the signature was wrong. Since the mid is not put in the
144 * pending queue until later (when it is about to be dispatched)
145 * we do have to limit the number of outstanding requests
146 * to somewhat less than 64K-1 although it is hard to imagine
147 * so many threads being in the vfs at one time.
148 */
149static __u64
150cifs_get_next_mid(struct TCP_Server_Info *server)
151{
152 __u64 mid = 0;
153 __u16 last_mid, cur_mid;
154 bool collision;
155
156 spin_lock(&GlobalMid_Lock);
157
158 /* mid is 16 bit only for CIFS/SMB */
159 cur_mid = (__u16)((server->CurrentMid) & 0xffff);
160 /* we do not want to loop forever */
161 last_mid = cur_mid;
162 cur_mid++;
163
164 /*
165 * This nested loop looks more expensive than it is.
166 * In practice the list of pending requests is short,
167 * fewer than 50, and the mids are likely to be unique
168 * on the first pass through the loop unless some request
169 * takes longer than the 64 thousand requests before it
170 * (and it would also have to have been a request that
171 * did not time out).
172 */
173 while (cur_mid != last_mid) {
174 struct mid_q_entry *mid_entry;
175 unsigned int num_mids;
176
177 collision = false;
178 if (cur_mid == 0)
179 cur_mid++;
180
181 num_mids = 0;
182 list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) {
183 ++num_mids;
184 if (mid_entry->mid == cur_mid &&
185 mid_entry->mid_state == MID_REQUEST_SUBMITTED) {
186 /* This mid is in use, try a different one */
187 collision = true;
188 break;
189 }
190 }
191
192 /*
193 * if we have more than 32k mids in the list, then something
194 * is very wrong. Possibly a local user is trying to DoS the
195 * box by issuing long-running calls and SIGKILL'ing them. If
196 * we get to 2^16 mids then we're in big trouble as this
197 * function could loop forever.
198 *
199 * Go ahead and assign out the mid in this situation, but force
200 * an eventual reconnect to clean out the pending_mid_q.
201 */
202 if (num_mids > 32768)
203 server->tcpStatus = CifsNeedReconnect;
204
205 if (!collision) {
206 mid = (__u64)cur_mid;
207 server->CurrentMid = mid;
208 break;
209 }
210 cur_mid++;
211 }
212 spin_unlock(&GlobalMid_Lock);
213 return mid;
214}
215
128struct smb_version_operations smb1_operations = { 216struct smb_version_operations smb1_operations = {
129 .send_cancel = send_nt_cancel, 217 .send_cancel = send_nt_cancel,
130 .compare_fids = cifs_compare_fids, 218 .compare_fids = cifs_compare_fids,
@@ -133,6 +221,7 @@ struct smb_version_operations smb1_operations = {
133 .add_credits = cifs_add_credits, 221 .add_credits = cifs_add_credits,
134 .set_credits = cifs_set_credits, 222 .set_credits = cifs_set_credits,
135 .get_credits_field = cifs_get_credits_field, 223 .get_credits_field = cifs_get_credits_field,
224 .get_next_mid = cifs_get_next_mid,
136 .read_data_offset = cifs_read_data_offset, 225 .read_data_offset = cifs_read_data_offset,
137 .read_data_length = cifs_read_data_length, 226 .read_data_length = cifs_read_data_length,
138 .map_error = map_smb_to_linux_error, 227 .map_error = map_smb_to_linux_error,
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 1b36ffe6a47b..3097ee58fd7d 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -779,7 +779,7 @@ send_lock_cancel(const unsigned int xid, struct cifs_tcon *tcon,
779 779
780 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES; 780 pSMB->LockType = LOCKING_ANDX_CANCEL_LOCK|LOCKING_ANDX_LARGE_FILES;
781 pSMB->Timeout = 0; 781 pSMB->Timeout = 0;
782 pSMB->hdr.Mid = GetNextMid(ses->server); 782 pSMB->hdr.Mid = get_next_mid(ses->server);
783 783
784 return SendReceive(xid, ses, in_buf, out_buf, 784 return SendReceive(xid, ses, in_buf, out_buf,
785 &bytes_returned, 0); 785 &bytes_returned, 0);
diff --git a/fs/compat.c b/fs/compat.c
index 0781e619a62a..6161255fac45 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -532,7 +532,7 @@ out:
532ssize_t compat_rw_copy_check_uvector(int type, 532ssize_t compat_rw_copy_check_uvector(int type,
533 const struct compat_iovec __user *uvector, unsigned long nr_segs, 533 const struct compat_iovec __user *uvector, unsigned long nr_segs,
534 unsigned long fast_segs, struct iovec *fast_pointer, 534 unsigned long fast_segs, struct iovec *fast_pointer,
535 struct iovec **ret_pointer, int check_access) 535 struct iovec **ret_pointer)
536{ 536{
537 compat_ssize_t tot_len; 537 compat_ssize_t tot_len;
538 struct iovec *iov = *ret_pointer = fast_pointer; 538 struct iovec *iov = *ret_pointer = fast_pointer;
@@ -579,7 +579,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
579 } 579 }
580 if (len < 0) /* size_t not fitting in compat_ssize_t .. */ 580 if (len < 0) /* size_t not fitting in compat_ssize_t .. */
581 goto out; 581 goto out;
582 if (check_access && 582 if (type >= 0 &&
583 !access_ok(vrfy_dir(type), compat_ptr(buf), len)) { 583 !access_ok(vrfy_dir(type), compat_ptr(buf), len)) {
584 ret = -EFAULT; 584 ret = -EFAULT;
585 goto out; 585 goto out;
@@ -871,12 +871,12 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
871{ 871{
872 int error; 872 int error;
873 struct file *file; 873 struct file *file;
874 int fput_needed;
874 struct compat_readdir_callback buf; 875 struct compat_readdir_callback buf;
875 876
876 error = -EBADF; 877 file = fget_light(fd, &fput_needed);
877 file = fget(fd);
878 if (!file) 878 if (!file)
879 goto out; 879 return -EBADF;
880 880
881 buf.result = 0; 881 buf.result = 0;
882 buf.dirent = dirent; 882 buf.dirent = dirent;
@@ -885,8 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
885 if (buf.result) 885 if (buf.result)
886 error = buf.result; 886 error = buf.result;
887 887
888 fput(file); 888 fput_light(file, fput_needed);
889out:
890 return error; 889 return error;
891} 890}
892 891
@@ -953,16 +952,15 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
953 struct file * file; 952 struct file * file;
954 struct compat_linux_dirent __user * lastdirent; 953 struct compat_linux_dirent __user * lastdirent;
955 struct compat_getdents_callback buf; 954 struct compat_getdents_callback buf;
955 int fput_needed;
956 int error; 956 int error;
957 957
958 error = -EFAULT;
959 if (!access_ok(VERIFY_WRITE, dirent, count)) 958 if (!access_ok(VERIFY_WRITE, dirent, count))
960 goto out; 959 return -EFAULT;
961 960
962 error = -EBADF; 961 file = fget_light(fd, &fput_needed);
963 file = fget(fd);
964 if (!file) 962 if (!file)
965 goto out; 963 return -EBADF;
966 964
967 buf.current_dir = dirent; 965 buf.current_dir = dirent;
968 buf.previous = NULL; 966 buf.previous = NULL;
@@ -979,8 +977,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
979 else 977 else
980 error = count - buf.count; 978 error = count - buf.count;
981 } 979 }
982 fput(file); 980 fput_light(file, fput_needed);
983out:
984 return error; 981 return error;
985} 982}
986 983
@@ -1041,16 +1038,15 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
1041 struct file * file; 1038 struct file * file;
1042 struct linux_dirent64 __user * lastdirent; 1039 struct linux_dirent64 __user * lastdirent;
1043 struct compat_getdents_callback64 buf; 1040 struct compat_getdents_callback64 buf;
1041 int fput_needed;
1044 int error; 1042 int error;
1045 1043
1046 error = -EFAULT;
1047 if (!access_ok(VERIFY_WRITE, dirent, count)) 1044 if (!access_ok(VERIFY_WRITE, dirent, count))
1048 goto out; 1045 return -EFAULT;
1049 1046
1050 error = -EBADF; 1047 file = fget_light(fd, &fput_needed);
1051 file = fget(fd);
1052 if (!file) 1048 if (!file)
1053 goto out; 1049 return -EBADF;
1054 1050
1055 buf.current_dir = dirent; 1051 buf.current_dir = dirent;
1056 buf.previous = NULL; 1052 buf.previous = NULL;
@@ -1068,8 +1064,7 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
1068 else 1064 else
1069 error = count - buf.count; 1065 error = count - buf.count;
1070 } 1066 }
1071 fput(file); 1067 fput_light(file, fput_needed);
1072out:
1073 return error; 1068 return error;
1074} 1069}
1075#endif /* ! __ARCH_OMIT_COMPAT_SYS_GETDENTS64 */ 1070#endif /* ! __ARCH_OMIT_COMPAT_SYS_GETDENTS64 */
@@ -1094,7 +1089,7 @@ static ssize_t compat_do_readv_writev(int type, struct file *file,
1094 goto out; 1089 goto out;
1095 1090
1096 tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs, 1091 tot_len = compat_rw_copy_check_uvector(type, uvector, nr_segs,
1097 UIO_FASTIOV, iovstack, &iov, 1); 1092 UIO_FASTIOV, iovstack, &iov);
1098 if (tot_len == 0) { 1093 if (tot_len == 0) {
1099 ret = 0; 1094 ret = 0;
1100 goto out; 1095 goto out;
@@ -1547,7 +1542,6 @@ asmlinkage long compat_sys_old_select(struct compat_sel_arg_struct __user *arg)
1547 compat_ptr(a.exp), compat_ptr(a.tvp)); 1542 compat_ptr(a.exp), compat_ptr(a.tvp));
1548} 1543}
1549 1544
1550#ifdef HAVE_SET_RESTORE_SIGMASK
1551static long do_compat_pselect(int n, compat_ulong_t __user *inp, 1545static long do_compat_pselect(int n, compat_ulong_t __user *inp,
1552 compat_ulong_t __user *outp, compat_ulong_t __user *exp, 1546 compat_ulong_t __user *outp, compat_ulong_t __user *exp,
1553 struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask, 1547 struct compat_timespec __user *tsp, compat_sigset_t __user *sigmask,
@@ -1670,11 +1664,9 @@ asmlinkage long compat_sys_ppoll(struct pollfd __user *ufds,
1670 1664
1671 return ret; 1665 return ret;
1672} 1666}
1673#endif /* HAVE_SET_RESTORE_SIGMASK */
1674 1667
1675#ifdef CONFIG_EPOLL 1668#ifdef CONFIG_EPOLL
1676 1669
1677#ifdef HAVE_SET_RESTORE_SIGMASK
1678asmlinkage long compat_sys_epoll_pwait(int epfd, 1670asmlinkage long compat_sys_epoll_pwait(int epfd,
1679 struct compat_epoll_event __user *events, 1671 struct compat_epoll_event __user *events,
1680 int maxevents, int timeout, 1672 int maxevents, int timeout,
@@ -1718,7 +1710,6 @@ asmlinkage long compat_sys_epoll_pwait(int epfd,
1718 1710
1719 return err; 1711 return err;
1720} 1712}
1721#endif /* HAVE_SET_RESTORE_SIGMASK */
1722 1713
1723#endif /* CONFIG_EPOLL */ 1714#endif /* CONFIG_EPOLL */
1724 1715
diff --git a/fs/dcache.c b/fs/dcache.c
index 4435d8b32904..40469044088d 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -2575,7 +2575,7 @@ static int prepend_path(const struct path *path,
2575 bool slash = false; 2575 bool slash = false;
2576 int error = 0; 2576 int error = 0;
2577 2577
2578 br_read_lock(vfsmount_lock); 2578 br_read_lock(&vfsmount_lock);
2579 while (dentry != root->dentry || vfsmnt != root->mnt) { 2579 while (dentry != root->dentry || vfsmnt != root->mnt) {
2580 struct dentry * parent; 2580 struct dentry * parent;
2581 2581
@@ -2606,7 +2606,7 @@ static int prepend_path(const struct path *path,
2606 error = prepend(buffer, buflen, "/", 1); 2606 error = prepend(buffer, buflen, "/", 1);
2607 2607
2608out: 2608out:
2609 br_read_unlock(vfsmount_lock); 2609 br_read_unlock(&vfsmount_lock);
2610 return error; 2610 return error;
2611 2611
2612global_root: 2612global_root:
diff --git a/fs/direct-io.c b/fs/direct-io.c
index f4aadd15b613..0c85fae37666 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -145,50 +145,6 @@ struct dio {
145 145
146static struct kmem_cache *dio_cache __read_mostly; 146static struct kmem_cache *dio_cache __read_mostly;
147 147
148static void __inode_dio_wait(struct inode *inode)
149{
150 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
151 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
152
153 do {
154 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
155 if (atomic_read(&inode->i_dio_count))
156 schedule();
157 } while (atomic_read(&inode->i_dio_count));
158 finish_wait(wq, &q.wait);
159}
160
161/**
162 * inode_dio_wait - wait for outstanding DIO requests to finish
163 * @inode: inode to wait for
164 *
165 * Waits for all pending direct I/O requests to finish so that we can
166 * proceed with a truncate or equivalent operation.
167 *
168 * Must be called under a lock that serializes taking new references
169 * to i_dio_count, usually by inode->i_mutex.
170 */
171void inode_dio_wait(struct inode *inode)
172{
173 if (atomic_read(&inode->i_dio_count))
174 __inode_dio_wait(inode);
175}
176EXPORT_SYMBOL(inode_dio_wait);
177
178/*
179 * inode_dio_done - signal finish of a direct I/O requests
180 * @inode: inode the direct I/O happens on
181 *
182 * This is called once we've finished processing a direct I/O request,
183 * and is used to wake up callers waiting for direct I/O to be quiesced.
184 */
185void inode_dio_done(struct inode *inode)
186{
187 if (atomic_dec_and_test(&inode->i_dio_count))
188 wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
189}
190EXPORT_SYMBOL(inode_dio_done);
191
192/* 148/*
193 * How many pages are in the queue? 149 * How many pages are in the queue?
194 */ 150 */
diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
index ab35b113003b..a07441a0a878 100644
--- a/fs/ecryptfs/inode.c
+++ b/fs/ecryptfs/inode.c
@@ -660,11 +660,10 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
660{ 660{
661 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry); 661 struct dentry *lower_dentry = ecryptfs_dentry_to_lower(dentry);
662 char *lower_buf; 662 char *lower_buf;
663 size_t lower_bufsiz = PATH_MAX;
664 mm_segment_t old_fs; 663 mm_segment_t old_fs;
665 int rc; 664 int rc;
666 665
667 lower_buf = kmalloc(lower_bufsiz, GFP_KERNEL); 666 lower_buf = kmalloc(PATH_MAX, GFP_KERNEL);
668 if (!lower_buf) { 667 if (!lower_buf) {
669 rc = -ENOMEM; 668 rc = -ENOMEM;
670 goto out; 669 goto out;
@@ -673,58 +672,29 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
673 set_fs(get_ds()); 672 set_fs(get_ds());
674 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry, 673 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
675 (char __user *)lower_buf, 674 (char __user *)lower_buf,
676 lower_bufsiz); 675 PATH_MAX);
677 set_fs(old_fs); 676 set_fs(old_fs);
678 if (rc < 0) 677 if (rc < 0)
679 goto out; 678 goto out;
680 lower_bufsiz = rc;
681 rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry, 679 rc = ecryptfs_decode_and_decrypt_filename(buf, bufsiz, dentry,
682 lower_buf, lower_bufsiz); 680 lower_buf, rc);
683out: 681out:
684 kfree(lower_buf); 682 kfree(lower_buf);
685 return rc; 683 return rc;
686} 684}
687 685
688static int 686static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
689ecryptfs_readlink(struct dentry *dentry, char __user *buf, int bufsiz)
690{ 687{
691 char *kbuf; 688 char *buf;
692 size_t kbufsiz, copied; 689 size_t len = PATH_MAX;
693 int rc; 690 int rc;
694 691
695 rc = ecryptfs_readlink_lower(dentry, &kbuf, &kbufsiz); 692 rc = ecryptfs_readlink_lower(dentry, &buf, &len);
696 if (rc) 693 if (rc)
697 goto out; 694 goto out;
698 copied = min_t(size_t, bufsiz, kbufsiz);
699 rc = copy_to_user(buf, kbuf, copied) ? -EFAULT : copied;
700 kfree(kbuf);
701 fsstack_copy_attr_atime(dentry->d_inode, 695 fsstack_copy_attr_atime(dentry->d_inode,
702 ecryptfs_dentry_to_lower(dentry)->d_inode); 696 ecryptfs_dentry_to_lower(dentry)->d_inode);
703out: 697 buf[len] = '\0';
704 return rc;
705}
706
707static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
708{
709 char *buf;
710 int len = PAGE_SIZE, rc;
711 mm_segment_t old_fs;
712
713 /* Released in ecryptfs_put_link(); only release here on error */
714 buf = kmalloc(len, GFP_KERNEL);
715 if (!buf) {
716 buf = ERR_PTR(-ENOMEM);
717 goto out;
718 }
719 old_fs = get_fs();
720 set_fs(get_ds());
721 rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len);
722 set_fs(old_fs);
723 if (rc < 0) {
724 kfree(buf);
725 buf = ERR_PTR(rc);
726 } else
727 buf[rc] = '\0';
728out: 698out:
729 nd_set_link(nd, buf); 699 nd_set_link(nd, buf);
730 return NULL; 700 return NULL;
@@ -1153,7 +1123,7 @@ out:
1153} 1123}
1154 1124
1155const struct inode_operations ecryptfs_symlink_iops = { 1125const struct inode_operations ecryptfs_symlink_iops = {
1156 .readlink = ecryptfs_readlink, 1126 .readlink = generic_readlink,
1157 .follow_link = ecryptfs_follow_link, 1127 .follow_link = ecryptfs_follow_link,
1158 .put_link = ecryptfs_put_link, 1128 .put_link = ecryptfs_put_link,
1159 .permission = ecryptfs_permission, 1129 .permission = ecryptfs_permission,
diff --git a/fs/eventfd.c b/fs/eventfd.c
index dba15fecf23e..d81b9f654086 100644
--- a/fs/eventfd.c
+++ b/fs/eventfd.c
@@ -46,20 +46,16 @@ struct eventfd_ctx {
46 * value, and we signal this as overflow condition by returining a POLLERR 46 * value, and we signal this as overflow condition by returining a POLLERR
47 * to poll(2). 47 * to poll(2).
48 * 48 *
49 * Returns @n in case of success, a non-negative number lower than @n in case 49 * Returns the amount by which the counter was incrememnted. This will be less
50 * of overflow, or the following error codes: 50 * than @n if the counter has overflowed.
51 *
52 * -EINVAL : The value of @n is negative.
53 */ 51 */
54int eventfd_signal(struct eventfd_ctx *ctx, int n) 52__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n)
55{ 53{
56 unsigned long flags; 54 unsigned long flags;
57 55
58 if (n < 0)
59 return -EINVAL;
60 spin_lock_irqsave(&ctx->wqh.lock, flags); 56 spin_lock_irqsave(&ctx->wqh.lock, flags);
61 if (ULLONG_MAX - ctx->count < n) 57 if (ULLONG_MAX - ctx->count < n)
62 n = (int) (ULLONG_MAX - ctx->count); 58 n = ULLONG_MAX - ctx->count;
63 ctx->count += n; 59 ctx->count += n;
64 if (waitqueue_active(&ctx->wqh)) 60 if (waitqueue_active(&ctx->wqh))
65 wake_up_locked_poll(&ctx->wqh, POLLIN); 61 wake_up_locked_poll(&ctx->wqh, POLLIN);
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index 079d1be65ba9..74598f67efeb 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -1853,8 +1853,6 @@ error_return:
1853 return error; 1853 return error;
1854} 1854}
1855 1855
1856#ifdef HAVE_SET_RESTORE_SIGMASK
1857
1858/* 1856/*
1859 * Implement the event wait interface for the eventpoll file. It is the kernel 1857 * Implement the event wait interface for the eventpoll file. It is the kernel
1860 * part of the user space epoll_pwait(2). 1858 * part of the user space epoll_pwait(2).
@@ -1899,8 +1897,6 @@ SYSCALL_DEFINE6(epoll_pwait, int, epfd, struct epoll_event __user *, events,
1899 return error; 1897 return error;
1900} 1898}
1901 1899
1902#endif /* HAVE_SET_RESTORE_SIGMASK */
1903
1904static int __init eventpoll_init(void) 1900static int __init eventpoll_init(void)
1905{ 1901{
1906 struct sysinfo si; 1902 struct sysinfo si;
diff --git a/fs/exec.c b/fs/exec.c
index 52c9e2ff6e6b..da27b91ff1e8 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -280,10 +280,6 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
280 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); 280 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
281 INIT_LIST_HEAD(&vma->anon_vma_chain); 281 INIT_LIST_HEAD(&vma->anon_vma_chain);
282 282
283 err = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
284 if (err)
285 goto err;
286
287 err = insert_vm_struct(mm, vma); 283 err = insert_vm_struct(mm, vma);
288 if (err) 284 if (err)
289 goto err; 285 goto err;
@@ -823,10 +819,10 @@ static int exec_mmap(struct mm_struct *mm)
823 /* Notify parent that we're no longer interested in the old VM */ 819 /* Notify parent that we're no longer interested in the old VM */
824 tsk = current; 820 tsk = current;
825 old_mm = current->mm; 821 old_mm = current->mm;
826 sync_mm_rss(old_mm);
827 mm_release(tsk, old_mm); 822 mm_release(tsk, old_mm);
828 823
829 if (old_mm) { 824 if (old_mm) {
825 sync_mm_rss(old_mm);
830 /* 826 /*
831 * Make sure that if there is a core dump in progress 827 * Make sure that if there is a core dump in progress
832 * for the old mm, we get out and die instead of going 828 * for the old mm, we get out and die instead of going
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c
index e32bc919e4e3..5a7b691e748b 100644
--- a/fs/exofs/sys.c
+++ b/fs/exofs/sys.c
@@ -109,7 +109,7 @@ static struct kobj_type odev_ktype = {
109static struct kobj_type uuid_ktype = { 109static struct kobj_type uuid_ktype = {
110}; 110};
111 111
112void exofs_sysfs_dbg_print() 112void exofs_sysfs_dbg_print(void)
113{ 113{
114#ifdef CONFIG_EXOFS_DEBUG 114#ifdef CONFIG_EXOFS_DEBUG
115 struct kobject *k_name, *k_tmp; 115 struct kobject *k_name, *k_tmp;
diff --git a/fs/exportfs/expfs.c b/fs/exportfs/expfs.c
index b05acb796135..b0201ca6e9c6 100644
--- a/fs/exportfs/expfs.c
+++ b/fs/exportfs/expfs.c
@@ -304,24 +304,23 @@ out:
304 304
305/** 305/**
306 * export_encode_fh - default export_operations->encode_fh function 306 * export_encode_fh - default export_operations->encode_fh function
307 * @dentry: the dentry to encode 307 * @inode: the object to encode
308 * @fh: where to store the file handle fragment 308 * @fh: where to store the file handle fragment
309 * @max_len: maximum length to store there 309 * @max_len: maximum length to store there
310 * @connectable: whether to store parent information 310 * @parent: parent directory inode, if wanted
311 * 311 *
312 * This default encode_fh function assumes that the 32 inode number 312 * This default encode_fh function assumes that the 32 inode number
313 * is suitable for locating an inode, and that the generation number 313 * is suitable for locating an inode, and that the generation number
314 * can be used to check that it is still valid. It places them in the 314 * can be used to check that it is still valid. It places them in the
315 * filehandle fragment where export_decode_fh expects to find them. 315 * filehandle fragment where export_decode_fh expects to find them.
316 */ 316 */
317static int export_encode_fh(struct dentry *dentry, struct fid *fid, 317static int export_encode_fh(struct inode *inode, struct fid *fid,
318 int *max_len, int connectable) 318 int *max_len, struct inode *parent)
319{ 319{
320 struct inode * inode = dentry->d_inode;
321 int len = *max_len; 320 int len = *max_len;
322 int type = FILEID_INO32_GEN; 321 int type = FILEID_INO32_GEN;
323 322
324 if (connectable && (len < 4)) { 323 if (parent && (len < 4)) {
325 *max_len = 4; 324 *max_len = 4;
326 return 255; 325 return 255;
327 } else if (len < 2) { 326 } else if (len < 2) {
@@ -332,14 +331,9 @@ static int export_encode_fh(struct dentry *dentry, struct fid *fid,
332 len = 2; 331 len = 2;
333 fid->i32.ino = inode->i_ino; 332 fid->i32.ino = inode->i_ino;
334 fid->i32.gen = inode->i_generation; 333 fid->i32.gen = inode->i_generation;
335 if (connectable && !S_ISDIR(inode->i_mode)) { 334 if (parent) {
336 struct inode *parent;
337
338 spin_lock(&dentry->d_lock);
339 parent = dentry->d_parent->d_inode;
340 fid->i32.parent_ino = parent->i_ino; 335 fid->i32.parent_ino = parent->i_ino;
341 fid->i32.parent_gen = parent->i_generation; 336 fid->i32.parent_gen = parent->i_generation;
342 spin_unlock(&dentry->d_lock);
343 len = 4; 337 len = 4;
344 type = FILEID_INO32_GEN_PARENT; 338 type = FILEID_INO32_GEN_PARENT;
345 } 339 }
@@ -352,11 +346,22 @@ int exportfs_encode_fh(struct dentry *dentry, struct fid *fid, int *max_len,
352{ 346{
353 const struct export_operations *nop = dentry->d_sb->s_export_op; 347 const struct export_operations *nop = dentry->d_sb->s_export_op;
354 int error; 348 int error;
349 struct dentry *p = NULL;
350 struct inode *inode = dentry->d_inode, *parent = NULL;
355 351
352 if (connectable && !S_ISDIR(inode->i_mode)) {
353 p = dget_parent(dentry);
354 /*
355 * note that while p might've ceased to be our parent already,
356 * it's still pinned by and still positive.
357 */
358 parent = p->d_inode;
359 }
356 if (nop->encode_fh) 360 if (nop->encode_fh)
357 error = nop->encode_fh(dentry, fid->raw, max_len, connectable); 361 error = nop->encode_fh(inode, fid->raw, max_len, parent);
358 else 362 else
359 error = export_encode_fh(dentry, fid, max_len, connectable); 363 error = export_encode_fh(inode, fid, max_len, parent);
364 dput(p);
360 365
361 return error; 366 return error;
362} 367}
diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig
index 9ed1bb1f319f..c22f17021b6e 100644
--- a/fs/ext4/Kconfig
+++ b/fs/ext4/Kconfig
@@ -2,6 +2,8 @@ config EXT4_FS
2 tristate "The Extended 4 (ext4) filesystem" 2 tristate "The Extended 4 (ext4) filesystem"
3 select JBD2 3 select JBD2
4 select CRC16 4 select CRC16
5 select CRYPTO
6 select CRYPTO_CRC32C
5 help 7 help
6 This is the next generation of the ext3 filesystem. 8 This is the next generation of the ext3 filesystem.
7 9
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index c45c41129a35..cee7812cc3cf 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -90,8 +90,8 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb,
90 * unusual file system layouts. 90 * unusual file system layouts.
91 */ 91 */
92 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) { 92 if (ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp), block_group)) {
93 block_cluster = EXT4_B2C(sbi, (start - 93 block_cluster = EXT4_B2C(sbi,
94 ext4_block_bitmap(sb, gdp))); 94 ext4_block_bitmap(sb, gdp) - start);
95 if (block_cluster < num_clusters) 95 if (block_cluster < num_clusters)
96 block_cluster = -1; 96 block_cluster = -1;
97 else if (block_cluster == num_clusters) { 97 else if (block_cluster == num_clusters) {
@@ -102,7 +102,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb,
102 102
103 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) { 103 if (ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp), block_group)) {
104 inode_cluster = EXT4_B2C(sbi, 104 inode_cluster = EXT4_B2C(sbi,
105 start - ext4_inode_bitmap(sb, gdp)); 105 ext4_inode_bitmap(sb, gdp) - start);
106 if (inode_cluster < num_clusters) 106 if (inode_cluster < num_clusters)
107 inode_cluster = -1; 107 inode_cluster = -1;
108 else if (inode_cluster == num_clusters) { 108 else if (inode_cluster == num_clusters) {
@@ -114,7 +114,7 @@ unsigned ext4_num_overhead_clusters(struct super_block *sb,
114 itbl_blk = ext4_inode_table(sb, gdp); 114 itbl_blk = ext4_inode_table(sb, gdp);
115 for (i = 0; i < sbi->s_itb_per_group; i++) { 115 for (i = 0; i < sbi->s_itb_per_group; i++) {
116 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) { 116 if (ext4_block_in_group(sb, itbl_blk + i, block_group)) {
117 c = EXT4_B2C(sbi, start - itbl_blk + i); 117 c = EXT4_B2C(sbi, itbl_blk + i - start);
118 if ((c < num_clusters) || (c == inode_cluster) || 118 if ((c < num_clusters) || (c == inode_cluster) ||
119 (c == block_cluster) || (c == itbl_cluster)) 119 (c == block_cluster) || (c == itbl_cluster))
120 continue; 120 continue;
@@ -168,12 +168,14 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
168 168
169 /* If checksum is bad mark all blocks used to prevent allocation 169 /* If checksum is bad mark all blocks used to prevent allocation
170 * essentially implementing a per-group read-only flag. */ 170 * essentially implementing a per-group read-only flag. */
171 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 171 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
172 ext4_error(sb, "Checksum bad for group %u", block_group); 172 ext4_error(sb, "Checksum bad for group %u", block_group);
173 ext4_free_group_clusters_set(sb, gdp, 0); 173 ext4_free_group_clusters_set(sb, gdp, 0);
174 ext4_free_inodes_set(sb, gdp, 0); 174 ext4_free_inodes_set(sb, gdp, 0);
175 ext4_itable_unused_set(sb, gdp, 0); 175 ext4_itable_unused_set(sb, gdp, 0);
176 memset(bh->b_data, 0xff, sb->s_blocksize); 176 memset(bh->b_data, 0xff, sb->s_blocksize);
177 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
178 EXT4_BLOCKS_PER_GROUP(sb) / 8);
177 return; 179 return;
178 } 180 }
179 memset(bh->b_data, 0, sb->s_blocksize); 181 memset(bh->b_data, 0, sb->s_blocksize);
@@ -210,6 +212,9 @@ void ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
210 */ 212 */
211 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group), 213 ext4_mark_bitmap_end(num_clusters_in_group(sb, block_group),
212 sb->s_blocksize * 8, bh->b_data); 214 sb->s_blocksize * 8, bh->b_data);
215 ext4_block_bitmap_csum_set(sb, block_group, gdp, bh,
216 EXT4_BLOCKS_PER_GROUP(sb) / 8);
217 ext4_group_desc_csum_set(sb, block_group, gdp);
213} 218}
214 219
215/* Return the number of free blocks in a block group. It is used when 220/* Return the number of free blocks in a block group. It is used when
@@ -276,9 +281,9 @@ struct ext4_group_desc * ext4_get_group_desc(struct super_block *sb,
276} 281}
277 282
278static int ext4_valid_block_bitmap(struct super_block *sb, 283static int ext4_valid_block_bitmap(struct super_block *sb,
279 struct ext4_group_desc *desc, 284 struct ext4_group_desc *desc,
280 unsigned int block_group, 285 unsigned int block_group,
281 struct buffer_head *bh) 286 struct buffer_head *bh)
282{ 287{
283 ext4_grpblk_t offset; 288 ext4_grpblk_t offset;
284 ext4_grpblk_t next_zero_bit; 289 ext4_grpblk_t next_zero_bit;
@@ -325,6 +330,23 @@ err_out:
325 block_group, bitmap_blk); 330 block_group, bitmap_blk);
326 return 0; 331 return 0;
327} 332}
333
334void ext4_validate_block_bitmap(struct super_block *sb,
335 struct ext4_group_desc *desc,
336 unsigned int block_group,
337 struct buffer_head *bh)
338{
339 if (buffer_verified(bh))
340 return;
341
342 ext4_lock_group(sb, block_group);
343 if (ext4_valid_block_bitmap(sb, desc, block_group, bh) &&
344 ext4_block_bitmap_csum_verify(sb, block_group, desc, bh,
345 EXT4_BLOCKS_PER_GROUP(sb) / 8))
346 set_buffer_verified(bh);
347 ext4_unlock_group(sb, block_group);
348}
349
328/** 350/**
329 * ext4_read_block_bitmap() 351 * ext4_read_block_bitmap()
330 * @sb: super block 352 * @sb: super block
@@ -355,12 +377,12 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
355 } 377 }
356 378
357 if (bitmap_uptodate(bh)) 379 if (bitmap_uptodate(bh))
358 return bh; 380 goto verify;
359 381
360 lock_buffer(bh); 382 lock_buffer(bh);
361 if (bitmap_uptodate(bh)) { 383 if (bitmap_uptodate(bh)) {
362 unlock_buffer(bh); 384 unlock_buffer(bh);
363 return bh; 385 goto verify;
364 } 386 }
365 ext4_lock_group(sb, block_group); 387 ext4_lock_group(sb, block_group);
366 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 388 if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
@@ -379,7 +401,7 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
379 */ 401 */
380 set_bitmap_uptodate(bh); 402 set_bitmap_uptodate(bh);
381 unlock_buffer(bh); 403 unlock_buffer(bh);
382 return bh; 404 goto verify;
383 } 405 }
384 /* 406 /*
385 * submit the buffer_head for reading 407 * submit the buffer_head for reading
@@ -390,6 +412,9 @@ ext4_read_block_bitmap_nowait(struct super_block *sb, ext4_group_t block_group)
390 get_bh(bh); 412 get_bh(bh);
391 submit_bh(READ, bh); 413 submit_bh(READ, bh);
392 return bh; 414 return bh;
415verify:
416 ext4_validate_block_bitmap(sb, desc, block_group, bh);
417 return bh;
393} 418}
394 419
395/* Returns 0 on success, 1 on error */ 420/* Returns 0 on success, 1 on error */
@@ -412,7 +437,7 @@ int ext4_wait_block_bitmap(struct super_block *sb, ext4_group_t block_group,
412 } 437 }
413 clear_buffer_new(bh); 438 clear_buffer_new(bh);
414 /* Panic or remount fs read-only if block bitmap is invalid */ 439 /* Panic or remount fs read-only if block bitmap is invalid */
415 ext4_valid_block_bitmap(sb, desc, block_group, bh); 440 ext4_validate_block_bitmap(sb, desc, block_group, bh);
416 return 0; 441 return 0;
417} 442}
418 443
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index fa3af81ac565..b319721da26a 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -29,3 +29,86 @@ unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
29 29
30#endif /* EXT4FS_DEBUG */ 30#endif /* EXT4FS_DEBUG */
31 31
32int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
33 struct ext4_group_desc *gdp,
34 struct buffer_head *bh, int sz)
35{
36 __u32 hi;
37 __u32 provided, calculated;
38 struct ext4_sb_info *sbi = EXT4_SB(sb);
39
40 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
41 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
42 return 1;
43
44 provided = le16_to_cpu(gdp->bg_inode_bitmap_csum_lo);
45 calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
46 if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END) {
47 hi = le16_to_cpu(gdp->bg_inode_bitmap_csum_hi);
48 provided |= (hi << 16);
49 } else
50 calculated &= 0xFFFF;
51
52 return provided == calculated;
53}
54
55void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
56 struct ext4_group_desc *gdp,
57 struct buffer_head *bh, int sz)
58{
59 __u32 csum;
60 struct ext4_sb_info *sbi = EXT4_SB(sb);
61
62 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
63 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
64 return;
65
66 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
67 gdp->bg_inode_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
68 if (sbi->s_desc_size >= EXT4_BG_INODE_BITMAP_CSUM_HI_END)
69 gdp->bg_inode_bitmap_csum_hi = cpu_to_le16(csum >> 16);
70}
71
72int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
73 struct ext4_group_desc *gdp,
74 struct buffer_head *bh, int sz)
75{
76 __u32 hi;
77 __u32 provided, calculated;
78 struct ext4_sb_info *sbi = EXT4_SB(sb);
79
80 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
81 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
82 return 1;
83
84 provided = le16_to_cpu(gdp->bg_block_bitmap_csum_lo);
85 calculated = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
86 if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END) {
87 hi = le16_to_cpu(gdp->bg_block_bitmap_csum_hi);
88 provided |= (hi << 16);
89 } else
90 calculated &= 0xFFFF;
91
92 if (provided == calculated)
93 return 1;
94
95 ext4_error(sb, "Bad block bitmap checksum: block_group = %u", group);
96 return 0;
97}
98
99void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
100 struct ext4_group_desc *gdp,
101 struct buffer_head *bh, int sz)
102{
103 __u32 csum;
104 struct ext4_sb_info *sbi = EXT4_SB(sb);
105
106 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
107 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
108 return;
109
110 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)bh->b_data, sz);
111 gdp->bg_block_bitmap_csum_lo = cpu_to_le16(csum & 0xFFFF);
112 if (sbi->s_desc_size >= EXT4_BG_BLOCK_BITMAP_CSUM_HI_END)
113 gdp->bg_block_bitmap_csum_hi = cpu_to_le16(csum >> 16);
114}
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index b86786202643..aa39e600d159 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -179,6 +179,18 @@ static int ext4_readdir(struct file *filp,
179 continue; 179 continue;
180 } 180 }
181 181
182 /* Check the checksum */
183 if (!buffer_verified(bh) &&
184 !ext4_dirent_csum_verify(inode,
185 (struct ext4_dir_entry *)bh->b_data)) {
186 EXT4_ERROR_FILE(filp, 0, "directory fails checksum "
187 "at offset %llu",
188 (unsigned long long)filp->f_pos);
189 filp->f_pos += sb->s_blocksize - offset;
190 continue;
191 }
192 set_buffer_verified(bh);
193
182revalidate: 194revalidate:
183 /* If the dir block has changed since the last call to 195 /* If the dir block has changed since the last call to
184 * readdir(2), then we might be pointing to an invalid 196 * readdir(2), then we might be pointing to an invalid
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c21b1de51afb..cfc4e01b3c83 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -29,6 +29,7 @@
29#include <linux/wait.h> 29#include <linux/wait.h>
30#include <linux/blockgroup_lock.h> 30#include <linux/blockgroup_lock.h>
31#include <linux/percpu_counter.h> 31#include <linux/percpu_counter.h>
32#include <crypto/hash.h>
32#ifdef __KERNEL__ 33#ifdef __KERNEL__
33#include <linux/compat.h> 34#include <linux/compat.h>
34#endif 35#endif
@@ -298,7 +299,9 @@ struct ext4_group_desc
298 __le16 bg_free_inodes_count_lo;/* Free inodes count */ 299 __le16 bg_free_inodes_count_lo;/* Free inodes count */
299 __le16 bg_used_dirs_count_lo; /* Directories count */ 300 __le16 bg_used_dirs_count_lo; /* Directories count */
300 __le16 bg_flags; /* EXT4_BG_flags (INODE_UNINIT, etc) */ 301 __le16 bg_flags; /* EXT4_BG_flags (INODE_UNINIT, etc) */
301 __u32 bg_reserved[2]; /* Likely block/inode bitmap checksum */ 302 __le32 bg_exclude_bitmap_lo; /* Exclude bitmap for snapshots */
303 __le16 bg_block_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+bbitmap) LE */
304 __le16 bg_inode_bitmap_csum_lo;/* crc32c(s_uuid+grp_num+ibitmap) LE */
302 __le16 bg_itable_unused_lo; /* Unused inodes count */ 305 __le16 bg_itable_unused_lo; /* Unused inodes count */
303 __le16 bg_checksum; /* crc16(sb_uuid+group+desc) */ 306 __le16 bg_checksum; /* crc16(sb_uuid+group+desc) */
304 __le32 bg_block_bitmap_hi; /* Blocks bitmap block MSB */ 307 __le32 bg_block_bitmap_hi; /* Blocks bitmap block MSB */
@@ -308,9 +311,19 @@ struct ext4_group_desc
308 __le16 bg_free_inodes_count_hi;/* Free inodes count MSB */ 311 __le16 bg_free_inodes_count_hi;/* Free inodes count MSB */
309 __le16 bg_used_dirs_count_hi; /* Directories count MSB */ 312 __le16 bg_used_dirs_count_hi; /* Directories count MSB */
310 __le16 bg_itable_unused_hi; /* Unused inodes count MSB */ 313 __le16 bg_itable_unused_hi; /* Unused inodes count MSB */
311 __u32 bg_reserved2[3]; 314 __le32 bg_exclude_bitmap_hi; /* Exclude bitmap block MSB */
315 __le16 bg_block_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+bbitmap) BE */
316 __le16 bg_inode_bitmap_csum_hi;/* crc32c(s_uuid+grp_num+ibitmap) BE */
317 __u32 bg_reserved;
312}; 318};
313 319
320#define EXT4_BG_INODE_BITMAP_CSUM_HI_END \
321 (offsetof(struct ext4_group_desc, bg_inode_bitmap_csum_hi) + \
322 sizeof(__le16))
323#define EXT4_BG_BLOCK_BITMAP_CSUM_HI_END \
324 (offsetof(struct ext4_group_desc, bg_block_bitmap_csum_hi) + \
325 sizeof(__le16))
326
314/* 327/*
315 * Structure of a flex block group info 328 * Structure of a flex block group info
316 */ 329 */
@@ -650,7 +663,8 @@ struct ext4_inode {
650 __le16 l_i_file_acl_high; 663 __le16 l_i_file_acl_high;
651 __le16 l_i_uid_high; /* these 2 fields */ 664 __le16 l_i_uid_high; /* these 2 fields */
652 __le16 l_i_gid_high; /* were reserved2[0] */ 665 __le16 l_i_gid_high; /* were reserved2[0] */
653 __u32 l_i_reserved2; 666 __le16 l_i_checksum_lo;/* crc32c(uuid+inum+inode) LE */
667 __le16 l_i_reserved;
654 } linux2; 668 } linux2;
655 struct { 669 struct {
656 __le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */ 670 __le16 h_i_reserved1; /* Obsoleted fragment number/size which are removed in ext4 */
@@ -666,7 +680,7 @@ struct ext4_inode {
666 } masix2; 680 } masix2;
667 } osd2; /* OS dependent 2 */ 681 } osd2; /* OS dependent 2 */
668 __le16 i_extra_isize; 682 __le16 i_extra_isize;
669 __le16 i_pad1; 683 __le16 i_checksum_hi; /* crc32c(uuid+inum+inode) BE */
670 __le32 i_ctime_extra; /* extra Change time (nsec << 2 | epoch) */ 684 __le32 i_ctime_extra; /* extra Change time (nsec << 2 | epoch) */
671 __le32 i_mtime_extra; /* extra Modification time(nsec << 2 | epoch) */ 685 __le32 i_mtime_extra; /* extra Modification time(nsec << 2 | epoch) */
672 __le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */ 686 __le32 i_atime_extra; /* extra Access time (nsec << 2 | epoch) */
@@ -768,7 +782,7 @@ do { \
768#define i_gid_low i_gid 782#define i_gid_low i_gid
769#define i_uid_high osd2.linux2.l_i_uid_high 783#define i_uid_high osd2.linux2.l_i_uid_high
770#define i_gid_high osd2.linux2.l_i_gid_high 784#define i_gid_high osd2.linux2.l_i_gid_high
771#define i_reserved2 osd2.linux2.l_i_reserved2 785#define i_checksum_lo osd2.linux2.l_i_checksum_lo
772 786
773#elif defined(__GNU__) 787#elif defined(__GNU__)
774 788
@@ -908,6 +922,9 @@ struct ext4_inode_info {
908 */ 922 */
909 tid_t i_sync_tid; 923 tid_t i_sync_tid;
910 tid_t i_datasync_tid; 924 tid_t i_datasync_tid;
925
926 /* Precomputed uuid+inum+igen checksum for seeding inode checksums */
927 __u32 i_csum_seed;
911}; 928};
912 929
913/* 930/*
@@ -1001,6 +1018,9 @@ extern void ext4_set_bits(void *bm, int cur, int len);
1001#define EXT4_ERRORS_PANIC 3 /* Panic */ 1018#define EXT4_ERRORS_PANIC 3 /* Panic */
1002#define EXT4_ERRORS_DEFAULT EXT4_ERRORS_CONTINUE 1019#define EXT4_ERRORS_DEFAULT EXT4_ERRORS_CONTINUE
1003 1020
1021/* Metadata checksum algorithm codes */
1022#define EXT4_CRC32C_CHKSUM 1
1023
1004/* 1024/*
1005 * Structure of the super block 1025 * Structure of the super block
1006 */ 1026 */
@@ -1087,7 +1107,7 @@ struct ext4_super_block {
1087 __le64 s_mmp_block; /* Block for multi-mount protection */ 1107 __le64 s_mmp_block; /* Block for multi-mount protection */
1088 __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/ 1108 __le32 s_raid_stripe_width; /* blocks on all data disks (N*stride)*/
1089 __u8 s_log_groups_per_flex; /* FLEX_BG group size */ 1109 __u8 s_log_groups_per_flex; /* FLEX_BG group size */
1090 __u8 s_reserved_char_pad; 1110 __u8 s_checksum_type; /* metadata checksum algorithm used */
1091 __le16 s_reserved_pad; 1111 __le16 s_reserved_pad;
1092 __le64 s_kbytes_written; /* nr of lifetime kilobytes written */ 1112 __le64 s_kbytes_written; /* nr of lifetime kilobytes written */
1093 __le32 s_snapshot_inum; /* Inode number of active snapshot */ 1113 __le32 s_snapshot_inum; /* Inode number of active snapshot */
@@ -1113,7 +1133,8 @@ struct ext4_super_block {
1113 __le32 s_usr_quota_inum; /* inode for tracking user quota */ 1133 __le32 s_usr_quota_inum; /* inode for tracking user quota */
1114 __le32 s_grp_quota_inum; /* inode for tracking group quota */ 1134 __le32 s_grp_quota_inum; /* inode for tracking group quota */
1115 __le32 s_overhead_clusters; /* overhead blocks/clusters in fs */ 1135 __le32 s_overhead_clusters; /* overhead blocks/clusters in fs */
1116 __le32 s_reserved[109]; /* Padding to the end of the block */ 1136 __le32 s_reserved[108]; /* Padding to the end of the block */
1137 __le32 s_checksum; /* crc32c(superblock) */
1117}; 1138};
1118 1139
1119#define EXT4_S_ERR_LEN (EXT4_S_ERR_END - EXT4_S_ERR_START) 1140#define EXT4_S_ERR_LEN (EXT4_S_ERR_END - EXT4_S_ERR_START)
@@ -1176,6 +1197,7 @@ struct ext4_sb_info {
1176 struct proc_dir_entry *s_proc; 1197 struct proc_dir_entry *s_proc;
1177 struct kobject s_kobj; 1198 struct kobject s_kobj;
1178 struct completion s_kobj_unregister; 1199 struct completion s_kobj_unregister;
1200 struct super_block *s_sb;
1179 1201
1180 /* Journaling */ 1202 /* Journaling */
1181 struct journal_s *s_journal; 1203 struct journal_s *s_journal;
@@ -1266,6 +1288,12 @@ struct ext4_sb_info {
1266 1288
1267 /* record the last minlen when FITRIM is called. */ 1289 /* record the last minlen when FITRIM is called. */
1268 atomic_t s_last_trim_minblks; 1290 atomic_t s_last_trim_minblks;
1291
1292 /* Reference to checksum algorithm driver via cryptoapi */
1293 struct crypto_shash *s_chksum_driver;
1294
1295 /* Precomputed FS UUID checksum for seeding other checksums */
1296 __u32 s_csum_seed;
1269}; 1297};
1270 1298
1271static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb) 1299static inline struct ext4_sb_info *EXT4_SB(struct super_block *sb)
@@ -1414,6 +1442,12 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
1414#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040 1442#define EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE 0x0040
1415#define EXT4_FEATURE_RO_COMPAT_QUOTA 0x0100 1443#define EXT4_FEATURE_RO_COMPAT_QUOTA 0x0100
1416#define EXT4_FEATURE_RO_COMPAT_BIGALLOC 0x0200 1444#define EXT4_FEATURE_RO_COMPAT_BIGALLOC 0x0200
1445/*
1446 * METADATA_CSUM also enables group descriptor checksums (GDT_CSUM). When
1447 * METADATA_CSUM is set, group descriptor checksums use the same algorithm as
1448 * all other data structures' checksums. However, the METADATA_CSUM and
1449 * GDT_CSUM bits are mutually exclusive.
1450 */
1417#define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400 1451#define EXT4_FEATURE_RO_COMPAT_METADATA_CSUM 0x0400
1418 1452
1419#define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001 1453#define EXT4_FEATURE_INCOMPAT_COMPRESSION 0x0001
@@ -1461,7 +1495,8 @@ static inline void ext4_clear_state_flags(struct ext4_inode_info *ei)
1461 EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \ 1495 EXT4_FEATURE_RO_COMPAT_EXTRA_ISIZE | \
1462 EXT4_FEATURE_RO_COMPAT_BTREE_DIR |\ 1496 EXT4_FEATURE_RO_COMPAT_BTREE_DIR |\
1463 EXT4_FEATURE_RO_COMPAT_HUGE_FILE |\ 1497 EXT4_FEATURE_RO_COMPAT_HUGE_FILE |\
1464 EXT4_FEATURE_RO_COMPAT_BIGALLOC) 1498 EXT4_FEATURE_RO_COMPAT_BIGALLOC |\
1499 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)
1465 1500
1466/* 1501/*
1467 * Default values for user and/or group using reserved blocks 1502 * Default values for user and/or group using reserved blocks
@@ -1527,6 +1562,18 @@ struct ext4_dir_entry_2 {
1527}; 1562};
1528 1563
1529/* 1564/*
1565 * This is a bogus directory entry at the end of each leaf block that
1566 * records checksums.
1567 */
1568struct ext4_dir_entry_tail {
1569 __le32 det_reserved_zero1; /* Pretend to be unused */
1570 __le16 det_rec_len; /* 12 */
1571 __u8 det_reserved_zero2; /* Zero name length */
1572 __u8 det_reserved_ft; /* 0xDE, fake file type */
1573 __le32 det_checksum; /* crc32c(uuid+inum+dirblock) */
1574};
1575
1576/*
1530 * Ext4 directory file types. Only the low 3 bits are used. The 1577 * Ext4 directory file types. Only the low 3 bits are used. The
1531 * other bits are reserved for now. 1578 * other bits are reserved for now.
1532 */ 1579 */
@@ -1541,6 +1588,8 @@ struct ext4_dir_entry_2 {
1541 1588
1542#define EXT4_FT_MAX 8 1589#define EXT4_FT_MAX 8
1543 1590
1591#define EXT4_FT_DIR_CSUM 0xDE
1592
1544/* 1593/*
1545 * EXT4_DIR_PAD defines the directory entries boundaries 1594 * EXT4_DIR_PAD defines the directory entries boundaries
1546 * 1595 *
@@ -1609,6 +1658,25 @@ static inline __le16 ext4_rec_len_to_disk(unsigned len, unsigned blocksize)
1609#define DX_HASH_HALF_MD4_UNSIGNED 4 1658#define DX_HASH_HALF_MD4_UNSIGNED 4
1610#define DX_HASH_TEA_UNSIGNED 5 1659#define DX_HASH_TEA_UNSIGNED 5
1611 1660
1661static inline u32 ext4_chksum(struct ext4_sb_info *sbi, u32 crc,
1662 const void *address, unsigned int length)
1663{
1664 struct {
1665 struct shash_desc shash;
1666 char ctx[crypto_shash_descsize(sbi->s_chksum_driver)];
1667 } desc;
1668 int err;
1669
1670 desc.shash.tfm = sbi->s_chksum_driver;
1671 desc.shash.flags = 0;
1672 *(u32 *)desc.ctx = crc;
1673
1674 err = crypto_shash_update(&desc.shash, address, length);
1675 BUG_ON(err);
1676
1677 return *(u32 *)desc.ctx;
1678}
1679
1612#ifdef __KERNEL__ 1680#ifdef __KERNEL__
1613 1681
1614/* hash info structure used by the directory hash */ 1682/* hash info structure used by the directory hash */
@@ -1741,7 +1809,8 @@ struct mmp_struct {
1741 __le16 mmp_check_interval; 1809 __le16 mmp_check_interval;
1742 1810
1743 __le16 mmp_pad1; 1811 __le16 mmp_pad1;
1744 __le32 mmp_pad2[227]; 1812 __le32 mmp_pad2[226];
1813 __le32 mmp_checksum; /* crc32c(uuid+mmp_block) */
1745}; 1814};
1746 1815
1747/* arguments passed to the mmp thread */ 1816/* arguments passed to the mmp thread */
@@ -1784,8 +1853,24 @@ struct mmpd_data {
1784 1853
1785/* bitmap.c */ 1854/* bitmap.c */
1786extern unsigned int ext4_count_free(struct buffer_head *, unsigned); 1855extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
1856void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
1857 struct ext4_group_desc *gdp,
1858 struct buffer_head *bh, int sz);
1859int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
1860 struct ext4_group_desc *gdp,
1861 struct buffer_head *bh, int sz);
1862void ext4_block_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
1863 struct ext4_group_desc *gdp,
1864 struct buffer_head *bh, int sz);
1865int ext4_block_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
1866 struct ext4_group_desc *gdp,
1867 struct buffer_head *bh, int sz);
1787 1868
1788/* balloc.c */ 1869/* balloc.c */
1870extern void ext4_validate_block_bitmap(struct super_block *sb,
1871 struct ext4_group_desc *desc,
1872 unsigned int block_group,
1873 struct buffer_head *bh);
1789extern unsigned int ext4_block_group(struct super_block *sb, 1874extern unsigned int ext4_block_group(struct super_block *sb,
1790 ext4_fsblk_t blocknr); 1875 ext4_fsblk_t blocknr);
1791extern ext4_grpblk_t ext4_block_group_offset(struct super_block *sb, 1876extern ext4_grpblk_t ext4_block_group_offset(struct super_block *sb,
@@ -1864,7 +1949,7 @@ extern void ext4_end_bitmap_read(struct buffer_head *bh, int uptodate);
1864/* mballoc.c */ 1949/* mballoc.c */
1865extern long ext4_mb_stats; 1950extern long ext4_mb_stats;
1866extern long ext4_mb_max_to_scan; 1951extern long ext4_mb_max_to_scan;
1867extern int ext4_mb_init(struct super_block *, int); 1952extern int ext4_mb_init(struct super_block *);
1868extern int ext4_mb_release(struct super_block *); 1953extern int ext4_mb_release(struct super_block *);
1869extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *, 1954extern ext4_fsblk_t ext4_mb_new_blocks(handle_t *,
1870 struct ext4_allocation_request *, int *); 1955 struct ext4_allocation_request *, int *);
@@ -1936,6 +2021,8 @@ extern long ext4_compat_ioctl(struct file *, unsigned int, unsigned long);
1936extern int ext4_ext_migrate(struct inode *); 2021extern int ext4_ext_migrate(struct inode *);
1937 2022
1938/* namei.c */ 2023/* namei.c */
2024extern int ext4_dirent_csum_verify(struct inode *inode,
2025 struct ext4_dir_entry *dirent);
1939extern int ext4_orphan_add(handle_t *, struct inode *); 2026extern int ext4_orphan_add(handle_t *, struct inode *);
1940extern int ext4_orphan_del(handle_t *, struct inode *); 2027extern int ext4_orphan_del(handle_t *, struct inode *);
1941extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash, 2028extern int ext4_htree_fill_tree(struct file *dir_file, __u32 start_hash,
@@ -1950,6 +2037,10 @@ extern int ext4_group_extend(struct super_block *sb,
1950extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count); 2037extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
1951 2038
1952/* super.c */ 2039/* super.c */
2040extern int ext4_superblock_csum_verify(struct super_block *sb,
2041 struct ext4_super_block *es);
2042extern void ext4_superblock_csum_set(struct super_block *sb,
2043 struct ext4_super_block *es);
1953extern void *ext4_kvmalloc(size_t size, gfp_t flags); 2044extern void *ext4_kvmalloc(size_t size, gfp_t flags);
1954extern void *ext4_kvzalloc(size_t size, gfp_t flags); 2045extern void *ext4_kvzalloc(size_t size, gfp_t flags);
1955extern void ext4_kvfree(void *ptr); 2046extern void ext4_kvfree(void *ptr);
@@ -2025,10 +2116,17 @@ extern void ext4_used_dirs_set(struct super_block *sb,
2025 struct ext4_group_desc *bg, __u32 count); 2116 struct ext4_group_desc *bg, __u32 count);
2026extern void ext4_itable_unused_set(struct super_block *sb, 2117extern void ext4_itable_unused_set(struct super_block *sb,
2027 struct ext4_group_desc *bg, __u32 count); 2118 struct ext4_group_desc *bg, __u32 count);
2028extern __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 group, 2119extern int ext4_group_desc_csum_verify(struct super_block *sb, __u32 group,
2029 struct ext4_group_desc *gdp);
2030extern int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 group,
2031 struct ext4_group_desc *gdp); 2120 struct ext4_group_desc *gdp);
2121extern void ext4_group_desc_csum_set(struct super_block *sb, __u32 group,
2122 struct ext4_group_desc *gdp);
2123
2124static inline int ext4_has_group_desc_csum(struct super_block *sb)
2125{
2126 return EXT4_HAS_RO_COMPAT_FEATURE(sb,
2127 EXT4_FEATURE_RO_COMPAT_GDT_CSUM |
2128 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM);
2129}
2032 2130
2033static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es) 2131static inline ext4_fsblk_t ext4_blocks_count(struct ext4_super_block *es)
2034{ 2132{
@@ -2225,6 +2323,9 @@ static inline void ext4_unlock_group(struct super_block *sb,
2225 2323
2226static inline void ext4_mark_super_dirty(struct super_block *sb) 2324static inline void ext4_mark_super_dirty(struct super_block *sb)
2227{ 2325{
2326 struct ext4_super_block *es = EXT4_SB(sb)->s_es;
2327
2328 ext4_superblock_csum_set(sb, es);
2228 if (EXT4_SB(sb)->s_journal == NULL) 2329 if (EXT4_SB(sb)->s_journal == NULL)
2229 sb->s_dirt =1; 2330 sb->s_dirt =1;
2230} 2331}
@@ -2314,6 +2415,9 @@ extern int ext4_bio_write_page(struct ext4_io_submit *io,
2314 2415
2315/* mmp.c */ 2416/* mmp.c */
2316extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t); 2417extern int ext4_multi_mount_protect(struct super_block *, ext4_fsblk_t);
2418extern void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp);
2419extern int ext4_mmp_csum_verify(struct super_block *sb,
2420 struct mmp_struct *mmp);
2317 2421
2318/* BH_Uninit flag: blocks are allocated but uninitialized on disk */ 2422/* BH_Uninit flag: blocks are allocated but uninitialized on disk */
2319enum ext4_state_bits { 2423enum ext4_state_bits {
diff --git a/fs/ext4/ext4_extents.h b/fs/ext4/ext4_extents.h
index 0f58b86e3a02..cb1b2c919963 100644
--- a/fs/ext4/ext4_extents.h
+++ b/fs/ext4/ext4_extents.h
@@ -63,9 +63,22 @@
63 * ext4_inode has i_block array (60 bytes total). 63 * ext4_inode has i_block array (60 bytes total).
64 * The first 12 bytes store ext4_extent_header; 64 * The first 12 bytes store ext4_extent_header;
65 * the remainder stores an array of ext4_extent. 65 * the remainder stores an array of ext4_extent.
66 * For non-inode extent blocks, ext4_extent_tail
67 * follows the array.
66 */ 68 */
67 69
68/* 70/*
71 * This is the extent tail on-disk structure.
72 * All other extent structures are 12 bytes long. It turns out that
73 * block_size % 12 >= 4 for at least all powers of 2 greater than 512, which
74 * covers all valid ext4 block sizes. Therefore, this tail structure can be
75 * crammed into the end of the block without having to rebalance the tree.
76 */
77struct ext4_extent_tail {
78 __le32 et_checksum; /* crc32c(uuid+inum+extent_block) */
79};
80
81/*
69 * This is the extent on-disk structure. 82 * This is the extent on-disk structure.
70 * It's used at the bottom of the tree. 83 * It's used at the bottom of the tree.
71 */ 84 */
@@ -101,6 +114,17 @@ struct ext4_extent_header {
101 114
102#define EXT4_EXT_MAGIC cpu_to_le16(0xf30a) 115#define EXT4_EXT_MAGIC cpu_to_le16(0xf30a)
103 116
117#define EXT4_EXTENT_TAIL_OFFSET(hdr) \
118 (sizeof(struct ext4_extent_header) + \
119 (sizeof(struct ext4_extent) * le16_to_cpu((hdr)->eh_max)))
120
121static inline struct ext4_extent_tail *
122find_ext4_extent_tail(struct ext4_extent_header *eh)
123{
124 return (struct ext4_extent_tail *)(((void *)eh) +
125 EXT4_EXTENT_TAIL_OFFSET(eh));
126}
127
104/* 128/*
105 * Array of ext4_ext_path contains path to some extent. 129 * Array of ext4_ext_path contains path to some extent.
106 * Creation/lookup routines use it for traversal/splitting/etc. 130 * Creation/lookup routines use it for traversal/splitting/etc.
diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c
index aca179017582..90f7c2e84db1 100644
--- a/fs/ext4/ext4_jbd2.c
+++ b/fs/ext4/ext4_jbd2.c
@@ -138,16 +138,23 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
138} 138}
139 139
140int __ext4_handle_dirty_super(const char *where, unsigned int line, 140int __ext4_handle_dirty_super(const char *where, unsigned int line,
141 handle_t *handle, struct super_block *sb) 141 handle_t *handle, struct super_block *sb,
142 int now)
142{ 143{
143 struct buffer_head *bh = EXT4_SB(sb)->s_sbh; 144 struct buffer_head *bh = EXT4_SB(sb)->s_sbh;
144 int err = 0; 145 int err = 0;
145 146
146 if (ext4_handle_valid(handle)) { 147 if (ext4_handle_valid(handle)) {
148 ext4_superblock_csum_set(sb,
149 (struct ext4_super_block *)bh->b_data);
147 err = jbd2_journal_dirty_metadata(handle, bh); 150 err = jbd2_journal_dirty_metadata(handle, bh);
148 if (err) 151 if (err)
149 ext4_journal_abort_handle(where, line, __func__, 152 ext4_journal_abort_handle(where, line, __func__,
150 bh, handle, err); 153 bh, handle, err);
154 } else if (now) {
155 ext4_superblock_csum_set(sb,
156 (struct ext4_super_block *)bh->b_data);
157 mark_buffer_dirty(bh);
151 } else 158 } else
152 sb->s_dirt = 1; 159 sb->s_dirt = 1;
153 return err; 160 return err;
diff --git a/fs/ext4/ext4_jbd2.h b/fs/ext4/ext4_jbd2.h
index 83b20fcf9400..f440e8f1841f 100644
--- a/fs/ext4/ext4_jbd2.h
+++ b/fs/ext4/ext4_jbd2.h
@@ -213,7 +213,8 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line,
213 struct buffer_head *bh); 213 struct buffer_head *bh);
214 214
215int __ext4_handle_dirty_super(const char *where, unsigned int line, 215int __ext4_handle_dirty_super(const char *where, unsigned int line,
216 handle_t *handle, struct super_block *sb); 216 handle_t *handle, struct super_block *sb,
217 int now);
217 218
218#define ext4_journal_get_write_access(handle, bh) \ 219#define ext4_journal_get_write_access(handle, bh) \
219 __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh)) 220 __ext4_journal_get_write_access(__func__, __LINE__, (handle), (bh))
@@ -225,8 +226,10 @@ int __ext4_handle_dirty_super(const char *where, unsigned int line,
225#define ext4_handle_dirty_metadata(handle, inode, bh) \ 226#define ext4_handle_dirty_metadata(handle, inode, bh) \
226 __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \ 227 __ext4_handle_dirty_metadata(__func__, __LINE__, (handle), (inode), \
227 (bh)) 228 (bh))
229#define ext4_handle_dirty_super_now(handle, sb) \
230 __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb), 1)
228#define ext4_handle_dirty_super(handle, sb) \ 231#define ext4_handle_dirty_super(handle, sb) \
229 __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb)) 232 __ext4_handle_dirty_super(__func__, __LINE__, (handle), (sb), 0)
230 233
231handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks); 234handle_t *ext4_journal_start_sb(struct super_block *sb, int nblocks);
232int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle); 235int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index abcdeab67f52..91341ec6e06a 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -52,6 +52,46 @@
52#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */ 52#define EXT4_EXT_MARK_UNINIT1 0x2 /* mark first half uninitialized */
53#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */ 53#define EXT4_EXT_MARK_UNINIT2 0x4 /* mark second half uninitialized */
54 54
55static __le32 ext4_extent_block_csum(struct inode *inode,
56 struct ext4_extent_header *eh)
57{
58 struct ext4_inode_info *ei = EXT4_I(inode);
59 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
60 __u32 csum;
61
62 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)eh,
63 EXT4_EXTENT_TAIL_OFFSET(eh));
64 return cpu_to_le32(csum);
65}
66
67static int ext4_extent_block_csum_verify(struct inode *inode,
68 struct ext4_extent_header *eh)
69{
70 struct ext4_extent_tail *et;
71
72 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
73 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
74 return 1;
75
76 et = find_ext4_extent_tail(eh);
77 if (et->et_checksum != ext4_extent_block_csum(inode, eh))
78 return 0;
79 return 1;
80}
81
82static void ext4_extent_block_csum_set(struct inode *inode,
83 struct ext4_extent_header *eh)
84{
85 struct ext4_extent_tail *et;
86
87 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
88 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
89 return;
90
91 et = find_ext4_extent_tail(eh);
92 et->et_checksum = ext4_extent_block_csum(inode, eh);
93}
94
55static int ext4_split_extent(handle_t *handle, 95static int ext4_split_extent(handle_t *handle,
56 struct inode *inode, 96 struct inode *inode,
57 struct ext4_ext_path *path, 97 struct ext4_ext_path *path,
@@ -117,6 +157,7 @@ static int __ext4_ext_dirty(const char *where, unsigned int line,
117{ 157{
118 int err; 158 int err;
119 if (path->p_bh) { 159 if (path->p_bh) {
160 ext4_extent_block_csum_set(inode, ext_block_hdr(path->p_bh));
120 /* path points to block */ 161 /* path points to block */
121 err = __ext4_handle_dirty_metadata(where, line, handle, 162 err = __ext4_handle_dirty_metadata(where, line, handle,
122 inode, path->p_bh); 163 inode, path->p_bh);
@@ -391,6 +432,12 @@ static int __ext4_ext_check(const char *function, unsigned int line,
391 error_msg = "invalid extent entries"; 432 error_msg = "invalid extent entries";
392 goto corrupted; 433 goto corrupted;
393 } 434 }
435 /* Verify checksum on non-root extent tree nodes */
436 if (ext_depth(inode) != depth &&
437 !ext4_extent_block_csum_verify(inode, eh)) {
438 error_msg = "extent tree corrupted";
439 goto corrupted;
440 }
394 return 0; 441 return 0;
395 442
396corrupted: 443corrupted:
@@ -412,6 +459,26 @@ int ext4_ext_check_inode(struct inode *inode)
412 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode)); 459 return ext4_ext_check(inode, ext_inode_hdr(inode), ext_depth(inode));
413} 460}
414 461
462static int __ext4_ext_check_block(const char *function, unsigned int line,
463 struct inode *inode,
464 struct ext4_extent_header *eh,
465 int depth,
466 struct buffer_head *bh)
467{
468 int ret;
469
470 if (buffer_verified(bh))
471 return 0;
472 ret = ext4_ext_check(inode, eh, depth);
473 if (ret)
474 return ret;
475 set_buffer_verified(bh);
476 return ret;
477}
478
479#define ext4_ext_check_block(inode, eh, depth, bh) \
480 __ext4_ext_check_block(__func__, __LINE__, inode, eh, depth, bh)
481
415#ifdef EXT_DEBUG 482#ifdef EXT_DEBUG
416static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path) 483static void ext4_ext_show_path(struct inode *inode, struct ext4_ext_path *path)
417{ 484{
@@ -536,7 +603,7 @@ ext4_ext_binsearch_idx(struct inode *inode,
536 } 603 }
537 604
538 path->p_idx = l - 1; 605 path->p_idx = l - 1;
539 ext_debug(" -> %d->%lld ", le32_to_cpu(path->p_idx->ei_block), 606 ext_debug(" -> %u->%lld ", le32_to_cpu(path->p_idx->ei_block),
540 ext4_idx_pblock(path->p_idx)); 607 ext4_idx_pblock(path->p_idx));
541 608
542#ifdef CHECK_BINSEARCH 609#ifdef CHECK_BINSEARCH
@@ -668,8 +735,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
668 i = depth; 735 i = depth;
669 /* walk through the tree */ 736 /* walk through the tree */
670 while (i) { 737 while (i) {
671 int need_to_validate = 0;
672
673 ext_debug("depth %d: num %d, max %d\n", 738 ext_debug("depth %d: num %d, max %d\n",
674 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max)); 739 ppos, le16_to_cpu(eh->eh_entries), le16_to_cpu(eh->eh_max));
675 740
@@ -688,8 +753,6 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
688 put_bh(bh); 753 put_bh(bh);
689 goto err; 754 goto err;
690 } 755 }
691 /* validate the extent entries */
692 need_to_validate = 1;
693 } 756 }
694 eh = ext_block_hdr(bh); 757 eh = ext_block_hdr(bh);
695 ppos++; 758 ppos++;
@@ -703,7 +766,7 @@ ext4_ext_find_extent(struct inode *inode, ext4_lblk_t block,
703 path[ppos].p_hdr = eh; 766 path[ppos].p_hdr = eh;
704 i--; 767 i--;
705 768
706 if (need_to_validate && ext4_ext_check(inode, eh, i)) 769 if (ext4_ext_check_block(inode, eh, i, bh))
707 goto err; 770 goto err;
708 } 771 }
709 772
@@ -914,6 +977,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
914 le16_add_cpu(&neh->eh_entries, m); 977 le16_add_cpu(&neh->eh_entries, m);
915 } 978 }
916 979
980 ext4_extent_block_csum_set(inode, neh);
917 set_buffer_uptodate(bh); 981 set_buffer_uptodate(bh);
918 unlock_buffer(bh); 982 unlock_buffer(bh);
919 983
@@ -992,6 +1056,7 @@ static int ext4_ext_split(handle_t *handle, struct inode *inode,
992 sizeof(struct ext4_extent_idx) * m); 1056 sizeof(struct ext4_extent_idx) * m);
993 le16_add_cpu(&neh->eh_entries, m); 1057 le16_add_cpu(&neh->eh_entries, m);
994 } 1058 }
1059 ext4_extent_block_csum_set(inode, neh);
995 set_buffer_uptodate(bh); 1060 set_buffer_uptodate(bh);
996 unlock_buffer(bh); 1061 unlock_buffer(bh);
997 1062
@@ -1089,6 +1154,7 @@ static int ext4_ext_grow_indepth(handle_t *handle, struct inode *inode,
1089 else 1154 else
1090 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0)); 1155 neh->eh_max = cpu_to_le16(ext4_ext_space_block(inode, 0));
1091 neh->eh_magic = EXT4_EXT_MAGIC; 1156 neh->eh_magic = EXT4_EXT_MAGIC;
1157 ext4_extent_block_csum_set(inode, neh);
1092 set_buffer_uptodate(bh); 1158 set_buffer_uptodate(bh);
1093 unlock_buffer(bh); 1159 unlock_buffer(bh);
1094 1160
@@ -1344,7 +1410,8 @@ got_index:
1344 return -EIO; 1410 return -EIO;
1345 eh = ext_block_hdr(bh); 1411 eh = ext_block_hdr(bh);
1346 /* subtract from p_depth to get proper eh_depth */ 1412 /* subtract from p_depth to get proper eh_depth */
1347 if (ext4_ext_check(inode, eh, path->p_depth - depth)) { 1413 if (ext4_ext_check_block(inode, eh,
1414 path->p_depth - depth, bh)) {
1348 put_bh(bh); 1415 put_bh(bh);
1349 return -EIO; 1416 return -EIO;
1350 } 1417 }
@@ -1357,7 +1424,7 @@ got_index:
1357 if (bh == NULL) 1424 if (bh == NULL)
1358 return -EIO; 1425 return -EIO;
1359 eh = ext_block_hdr(bh); 1426 eh = ext_block_hdr(bh);
1360 if (ext4_ext_check(inode, eh, path->p_depth - depth)) { 1427 if (ext4_ext_check_block(inode, eh, path->p_depth - depth, bh)) {
1361 put_bh(bh); 1428 put_bh(bh);
1362 return -EIO; 1429 return -EIO;
1363 } 1430 }
@@ -2644,8 +2711,8 @@ cont:
2644 err = -EIO; 2711 err = -EIO;
2645 break; 2712 break;
2646 } 2713 }
2647 if (ext4_ext_check(inode, ext_block_hdr(bh), 2714 if (ext4_ext_check_block(inode, ext_block_hdr(bh),
2648 depth - i - 1)) { 2715 depth - i - 1, bh)) {
2649 err = -EIO; 2716 err = -EIO;
2650 break; 2717 break;
2651 } 2718 }
@@ -4722,8 +4789,8 @@ int ext4_ext_punch_hole(struct file *file, loff_t offset, loff_t length)
4722 4789
4723 /* Now release the pages */ 4790 /* Now release the pages */
4724 if (last_page_offset > first_page_offset) { 4791 if (last_page_offset > first_page_offset) {
4725 truncate_inode_pages_range(mapping, first_page_offset, 4792 truncate_pagecache_range(inode, first_page_offset,
4726 last_page_offset-1); 4793 last_page_offset - 1);
4727 } 4794 }
4728 4795
4729 /* finish any pending end_io work */ 4796 /* finish any pending end_io work */
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index cb70f1812a70..8c7642a00054 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -95,7 +95,7 @@ ext4_file_write(struct kiocb *iocb, const struct iovec *iov,
95{ 95{
96 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; 96 struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode;
97 int unaligned_aio = 0; 97 int unaligned_aio = 0;
98 int ret; 98 ssize_t ret;
99 99
100 /* 100 /*
101 * If we have encountered a bitmap-format file, the size limit 101 * If we have encountered a bitmap-format file, the size limit
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 9f9acac6c43f..d48e8b14928c 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -70,24 +70,27 @@ static unsigned ext4_init_inode_bitmap(struct super_block *sb,
70 ext4_group_t block_group, 70 ext4_group_t block_group,
71 struct ext4_group_desc *gdp) 71 struct ext4_group_desc *gdp)
72{ 72{
73 struct ext4_sb_info *sbi = EXT4_SB(sb);
74
75 J_ASSERT_BH(bh, buffer_locked(bh)); 73 J_ASSERT_BH(bh, buffer_locked(bh));
76 74
77 /* If checksum is bad mark all blocks and inodes use to prevent 75 /* If checksum is bad mark all blocks and inodes use to prevent
78 * allocation, essentially implementing a per-group read-only flag. */ 76 * allocation, essentially implementing a per-group read-only flag. */
79 if (!ext4_group_desc_csum_verify(sbi, block_group, gdp)) { 77 if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
80 ext4_error(sb, "Checksum bad for group %u", block_group); 78 ext4_error(sb, "Checksum bad for group %u", block_group);
81 ext4_free_group_clusters_set(sb, gdp, 0); 79 ext4_free_group_clusters_set(sb, gdp, 0);
82 ext4_free_inodes_set(sb, gdp, 0); 80 ext4_free_inodes_set(sb, gdp, 0);
83 ext4_itable_unused_set(sb, gdp, 0); 81 ext4_itable_unused_set(sb, gdp, 0);
84 memset(bh->b_data, 0xff, sb->s_blocksize); 82 memset(bh->b_data, 0xff, sb->s_blocksize);
83 ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
84 EXT4_INODES_PER_GROUP(sb) / 8);
85 return 0; 85 return 0;
86 } 86 }
87 87
88 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8); 88 memset(bh->b_data, 0, (EXT4_INODES_PER_GROUP(sb) + 7) / 8);
89 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8, 89 ext4_mark_bitmap_end(EXT4_INODES_PER_GROUP(sb), sb->s_blocksize * 8,
90 bh->b_data); 90 bh->b_data);
91 ext4_inode_bitmap_csum_set(sb, block_group, gdp, bh,
92 EXT4_INODES_PER_GROUP(sb) / 8);
93 ext4_group_desc_csum_set(sb, block_group, gdp);
91 94
92 return EXT4_INODES_PER_GROUP(sb); 95 return EXT4_INODES_PER_GROUP(sb);
93} 96}
@@ -128,12 +131,12 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
128 return NULL; 131 return NULL;
129 } 132 }
130 if (bitmap_uptodate(bh)) 133 if (bitmap_uptodate(bh))
131 return bh; 134 goto verify;
132 135
133 lock_buffer(bh); 136 lock_buffer(bh);
134 if (bitmap_uptodate(bh)) { 137 if (bitmap_uptodate(bh)) {
135 unlock_buffer(bh); 138 unlock_buffer(bh);
136 return bh; 139 goto verify;
137 } 140 }
138 141
139 ext4_lock_group(sb, block_group); 142 ext4_lock_group(sb, block_group);
@@ -141,6 +144,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
141 ext4_init_inode_bitmap(sb, bh, block_group, desc); 144 ext4_init_inode_bitmap(sb, bh, block_group, desc);
142 set_bitmap_uptodate(bh); 145 set_bitmap_uptodate(bh);
143 set_buffer_uptodate(bh); 146 set_buffer_uptodate(bh);
147 set_buffer_verified(bh);
144 ext4_unlock_group(sb, block_group); 148 ext4_unlock_group(sb, block_group);
145 unlock_buffer(bh); 149 unlock_buffer(bh);
146 return bh; 150 return bh;
@@ -154,7 +158,7 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
154 */ 158 */
155 set_bitmap_uptodate(bh); 159 set_bitmap_uptodate(bh);
156 unlock_buffer(bh); 160 unlock_buffer(bh);
157 return bh; 161 goto verify;
158 } 162 }
159 /* 163 /*
160 * submit the buffer_head for reading 164 * submit the buffer_head for reading
@@ -171,6 +175,20 @@ ext4_read_inode_bitmap(struct super_block *sb, ext4_group_t block_group)
171 block_group, bitmap_blk); 175 block_group, bitmap_blk);
172 return NULL; 176 return NULL;
173 } 177 }
178
179verify:
180 ext4_lock_group(sb, block_group);
181 if (!buffer_verified(bh) &&
182 !ext4_inode_bitmap_csum_verify(sb, block_group, desc, bh,
183 EXT4_INODES_PER_GROUP(sb) / 8)) {
184 ext4_unlock_group(sb, block_group);
185 put_bh(bh);
186 ext4_error(sb, "Corrupt inode bitmap - block_group = %u, "
187 "inode_bitmap = %llu", block_group, bitmap_blk);
188 return NULL;
189 }
190 ext4_unlock_group(sb, block_group);
191 set_buffer_verified(bh);
174 return bh; 192 return bh;
175} 193}
176 194
@@ -276,7 +294,9 @@ void ext4_free_inode(handle_t *handle, struct inode *inode)
276 ext4_used_dirs_set(sb, gdp, count); 294 ext4_used_dirs_set(sb, gdp, count);
277 percpu_counter_dec(&sbi->s_dirs_counter); 295 percpu_counter_dec(&sbi->s_dirs_counter);
278 } 296 }
279 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); 297 ext4_inode_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
298 EXT4_INODES_PER_GROUP(sb) / 8);
299 ext4_group_desc_csum_set(sb, block_group, gdp);
280 ext4_unlock_group(sb, block_group); 300 ext4_unlock_group(sb, block_group);
281 301
282 percpu_counter_inc(&sbi->s_freeinodes_counter); 302 percpu_counter_inc(&sbi->s_freeinodes_counter);
@@ -488,10 +508,12 @@ fallback_retry:
488 for (i = 0; i < ngroups; i++) { 508 for (i = 0; i < ngroups; i++) {
489 grp = (parent_group + i) % ngroups; 509 grp = (parent_group + i) % ngroups;
490 desc = ext4_get_group_desc(sb, grp, NULL); 510 desc = ext4_get_group_desc(sb, grp, NULL);
491 grp_free = ext4_free_inodes_count(sb, desc); 511 if (desc) {
492 if (desc && grp_free && grp_free >= avefreei) { 512 grp_free = ext4_free_inodes_count(sb, desc);
493 *group = grp; 513 if (grp_free && grp_free >= avefreei) {
494 return 0; 514 *group = grp;
515 return 0;
516 }
495 } 517 }
496 } 518 }
497 519
@@ -709,7 +731,7 @@ repeat_in_this_group:
709 731
710got: 732got:
711 /* We may have to initialize the block bitmap if it isn't already */ 733 /* We may have to initialize the block bitmap if it isn't already */
712 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM) && 734 if (ext4_has_group_desc_csum(sb) &&
713 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) { 735 gdp->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
714 struct buffer_head *block_bitmap_bh; 736 struct buffer_head *block_bitmap_bh;
715 737
@@ -731,8 +753,11 @@ got:
731 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT); 753 gdp->bg_flags &= cpu_to_le16(~EXT4_BG_BLOCK_UNINIT);
732 ext4_free_group_clusters_set(sb, gdp, 754 ext4_free_group_clusters_set(sb, gdp,
733 ext4_free_clusters_after_init(sb, group, gdp)); 755 ext4_free_clusters_after_init(sb, group, gdp));
734 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, 756 ext4_block_bitmap_csum_set(sb, group, gdp,
735 gdp); 757 block_bitmap_bh,
758 EXT4_BLOCKS_PER_GROUP(sb) /
759 8);
760 ext4_group_desc_csum_set(sb, group, gdp);
736 } 761 }
737 ext4_unlock_group(sb, group); 762 ext4_unlock_group(sb, group);
738 763
@@ -751,7 +776,7 @@ got:
751 goto fail; 776 goto fail;
752 777
753 /* Update the relevant bg descriptor fields */ 778 /* Update the relevant bg descriptor fields */
754 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { 779 if (ext4_has_group_desc_csum(sb)) {
755 int free; 780 int free;
756 struct ext4_group_info *grp = ext4_get_group_info(sb, group); 781 struct ext4_group_info *grp = ext4_get_group_info(sb, group);
757 782
@@ -772,7 +797,10 @@ got:
772 ext4_itable_unused_set(sb, gdp, 797 ext4_itable_unused_set(sb, gdp,
773 (EXT4_INODES_PER_GROUP(sb) - ino)); 798 (EXT4_INODES_PER_GROUP(sb) - ino));
774 up_read(&grp->alloc_sem); 799 up_read(&grp->alloc_sem);
800 } else {
801 ext4_lock_group(sb, group);
775 } 802 }
803
776 ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1); 804 ext4_free_inodes_set(sb, gdp, ext4_free_inodes_count(sb, gdp) - 1);
777 if (S_ISDIR(mode)) { 805 if (S_ISDIR(mode)) {
778 ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1); 806 ext4_used_dirs_set(sb, gdp, ext4_used_dirs_count(sb, gdp) + 1);
@@ -782,10 +810,12 @@ got:
782 atomic_inc(&sbi->s_flex_groups[f].used_dirs); 810 atomic_inc(&sbi->s_flex_groups[f].used_dirs);
783 } 811 }
784 } 812 }
785 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { 813 if (ext4_has_group_desc_csum(sb)) {
786 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); 814 ext4_inode_bitmap_csum_set(sb, group, gdp, inode_bitmap_bh,
787 ext4_unlock_group(sb, group); 815 EXT4_INODES_PER_GROUP(sb) / 8);
816 ext4_group_desc_csum_set(sb, group, gdp);
788 } 817 }
818 ext4_unlock_group(sb, group);
789 819
790 BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata"); 820 BUFFER_TRACE(inode_bitmap_bh, "call ext4_handle_dirty_metadata");
791 err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh); 821 err = ext4_handle_dirty_metadata(handle, NULL, inode_bitmap_bh);
@@ -850,6 +880,19 @@ got:
850 inode->i_generation = sbi->s_next_generation++; 880 inode->i_generation = sbi->s_next_generation++;
851 spin_unlock(&sbi->s_next_gen_lock); 881 spin_unlock(&sbi->s_next_gen_lock);
852 882
883 /* Precompute checksum seed for inode metadata */
884 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
885 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
886 __u32 csum;
887 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
888 __le32 inum = cpu_to_le32(inode->i_ino);
889 __le32 gen = cpu_to_le32(inode->i_generation);
890 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
891 sizeof(inum));
892 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
893 sizeof(gen));
894 }
895
853 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */ 896 ext4_clear_state_flags(ei); /* Only relevant on 32-bit archs */
854 ext4_set_inode_state(inode, EXT4_STATE_NEW); 897 ext4_set_inode_state(inode, EXT4_STATE_NEW);
855 898
@@ -1140,7 +1183,7 @@ int ext4_init_inode_table(struct super_block *sb, ext4_group_t group,
1140skip_zeroout: 1183skip_zeroout:
1141 ext4_lock_group(sb, group); 1184 ext4_lock_group(sb, group);
1142 gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED); 1185 gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
1143 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); 1186 ext4_group_desc_csum_set(sb, group, gdp);
1144 ext4_unlock_group(sb, group); 1187 ext4_unlock_group(sb, group);
1145 1188
1146 BUFFER_TRACE(group_desc_bh, 1189 BUFFER_TRACE(group_desc_bh,
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 07eaf565fdcb..02bc8cbe7281 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -47,6 +47,73 @@
47 47
48#define MPAGE_DA_EXTENT_TAIL 0x01 48#define MPAGE_DA_EXTENT_TAIL 0x01
49 49
50static __u32 ext4_inode_csum(struct inode *inode, struct ext4_inode *raw,
51 struct ext4_inode_info *ei)
52{
53 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
54 __u16 csum_lo;
55 __u16 csum_hi = 0;
56 __u32 csum;
57
58 csum_lo = raw->i_checksum_lo;
59 raw->i_checksum_lo = 0;
60 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
61 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi)) {
62 csum_hi = raw->i_checksum_hi;
63 raw->i_checksum_hi = 0;
64 }
65
66 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)raw,
67 EXT4_INODE_SIZE(inode->i_sb));
68
69 raw->i_checksum_lo = csum_lo;
70 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
71 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
72 raw->i_checksum_hi = csum_hi;
73
74 return csum;
75}
76
77static int ext4_inode_csum_verify(struct inode *inode, struct ext4_inode *raw,
78 struct ext4_inode_info *ei)
79{
80 __u32 provided, calculated;
81
82 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
83 cpu_to_le32(EXT4_OS_LINUX) ||
84 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
85 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
86 return 1;
87
88 provided = le16_to_cpu(raw->i_checksum_lo);
89 calculated = ext4_inode_csum(inode, raw, ei);
90 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
91 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
92 provided |= ((__u32)le16_to_cpu(raw->i_checksum_hi)) << 16;
93 else
94 calculated &= 0xFFFF;
95
96 return provided == calculated;
97}
98
99static void ext4_inode_csum_set(struct inode *inode, struct ext4_inode *raw,
100 struct ext4_inode_info *ei)
101{
102 __u32 csum;
103
104 if (EXT4_SB(inode->i_sb)->s_es->s_creator_os !=
105 cpu_to_le32(EXT4_OS_LINUX) ||
106 !EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
107 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
108 return;
109
110 csum = ext4_inode_csum(inode, raw, ei);
111 raw->i_checksum_lo = cpu_to_le16(csum & 0xFFFF);
112 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE &&
113 EXT4_FITS_IN_INODE(raw, ei, i_checksum_hi))
114 raw->i_checksum_hi = cpu_to_le16(csum >> 16);
115}
116
50static inline int ext4_begin_ordered_truncate(struct inode *inode, 117static inline int ext4_begin_ordered_truncate(struct inode *inode,
51 loff_t new_size) 118 loff_t new_size)
52{ 119{
@@ -3517,8 +3584,7 @@ make_io:
3517 b = table; 3584 b = table;
3518 end = b + EXT4_SB(sb)->s_inode_readahead_blks; 3585 end = b + EXT4_SB(sb)->s_inode_readahead_blks;
3519 num = EXT4_INODES_PER_GROUP(sb); 3586 num = EXT4_INODES_PER_GROUP(sb);
3520 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 3587 if (ext4_has_group_desc_csum(sb))
3521 EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
3522 num -= ext4_itable_unused_count(sb, gdp); 3588 num -= ext4_itable_unused_count(sb, gdp);
3523 table += num / inodes_per_block; 3589 table += num / inodes_per_block;
3524 if (end > table) 3590 if (end > table)
@@ -3646,6 +3712,39 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3646 if (ret < 0) 3712 if (ret < 0)
3647 goto bad_inode; 3713 goto bad_inode;
3648 raw_inode = ext4_raw_inode(&iloc); 3714 raw_inode = ext4_raw_inode(&iloc);
3715
3716 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3717 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3718 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3719 EXT4_INODE_SIZE(inode->i_sb)) {
3720 EXT4_ERROR_INODE(inode, "bad extra_isize (%u != %u)",
3721 EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize,
3722 EXT4_INODE_SIZE(inode->i_sb));
3723 ret = -EIO;
3724 goto bad_inode;
3725 }
3726 } else
3727 ei->i_extra_isize = 0;
3728
3729 /* Precompute checksum seed for inode metadata */
3730 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3731 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3732 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
3733 __u32 csum;
3734 __le32 inum = cpu_to_le32(inode->i_ino);
3735 __le32 gen = raw_inode->i_generation;
3736 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&inum,
3737 sizeof(inum));
3738 ei->i_csum_seed = ext4_chksum(sbi, csum, (__u8 *)&gen,
3739 sizeof(gen));
3740 }
3741
3742 if (!ext4_inode_csum_verify(inode, raw_inode, ei)) {
3743 EXT4_ERROR_INODE(inode, "checksum invalid");
3744 ret = -EIO;
3745 goto bad_inode;
3746 }
3747
3649 inode->i_mode = le16_to_cpu(raw_inode->i_mode); 3748 inode->i_mode = le16_to_cpu(raw_inode->i_mode);
3650 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low); 3749 i_uid = (uid_t)le16_to_cpu(raw_inode->i_uid_low);
3651 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low); 3750 i_gid = (gid_t)le16_to_cpu(raw_inode->i_gid_low);
@@ -3725,12 +3824,6 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3725 } 3824 }
3726 3825
3727 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) { 3826 if (EXT4_INODE_SIZE(inode->i_sb) > EXT4_GOOD_OLD_INODE_SIZE) {
3728 ei->i_extra_isize = le16_to_cpu(raw_inode->i_extra_isize);
3729 if (EXT4_GOOD_OLD_INODE_SIZE + ei->i_extra_isize >
3730 EXT4_INODE_SIZE(inode->i_sb)) {
3731 ret = -EIO;
3732 goto bad_inode;
3733 }
3734 if (ei->i_extra_isize == 0) { 3827 if (ei->i_extra_isize == 0) {
3735 /* The extra space is currently unused. Use it. */ 3828 /* The extra space is currently unused. Use it. */
3736 ei->i_extra_isize = sizeof(struct ext4_inode) - 3829 ei->i_extra_isize = sizeof(struct ext4_inode) -
@@ -3742,8 +3835,7 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
3742 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC)) 3835 if (*magic == cpu_to_le32(EXT4_XATTR_MAGIC))
3743 ext4_set_inode_state(inode, EXT4_STATE_XATTR); 3836 ext4_set_inode_state(inode, EXT4_STATE_XATTR);
3744 } 3837 }
3745 } else 3838 }
3746 ei->i_extra_isize = 0;
3747 3839
3748 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode); 3840 EXT4_INODE_GET_XTIME(i_ctime, inode, raw_inode);
3749 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode); 3841 EXT4_INODE_GET_XTIME(i_mtime, inode, raw_inode);
@@ -3942,7 +4034,7 @@ static int ext4_do_update_inode(handle_t *handle,
3942 EXT4_SET_RO_COMPAT_FEATURE(sb, 4034 EXT4_SET_RO_COMPAT_FEATURE(sb,
3943 EXT4_FEATURE_RO_COMPAT_LARGE_FILE); 4035 EXT4_FEATURE_RO_COMPAT_LARGE_FILE);
3944 ext4_handle_sync(handle); 4036 ext4_handle_sync(handle);
3945 err = ext4_handle_dirty_super(handle, sb); 4037 err = ext4_handle_dirty_super_now(handle, sb);
3946 } 4038 }
3947 } 4039 }
3948 raw_inode->i_generation = cpu_to_le32(inode->i_generation); 4040 raw_inode->i_generation = cpu_to_le32(inode->i_generation);
@@ -3969,6 +4061,8 @@ static int ext4_do_update_inode(handle_t *handle,
3969 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize); 4061 raw_inode->i_extra_isize = cpu_to_le16(ei->i_extra_isize);
3970 } 4062 }
3971 4063
4064 ext4_inode_csum_set(inode, raw_inode, ei);
4065
3972 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 4066 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
3973 rc = ext4_handle_dirty_metadata(handle, NULL, bh); 4067 rc = ext4_handle_dirty_metadata(handle, NULL, bh);
3974 if (!err) 4068 if (!err)
@@ -4213,7 +4307,8 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry,
4213 * will return the blocks that include the delayed allocation 4307 * will return the blocks that include the delayed allocation
4214 * blocks for this file. 4308 * blocks for this file.
4215 */ 4309 */
4216 delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; 4310 delalloc_blocks = EXT4_C2B(EXT4_SB(inode->i_sb),
4311 EXT4_I(inode)->i_reserved_data_blocks);
4217 4312
4218 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; 4313 stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9;
4219 return 0; 4314 return 0;
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 6eee25591b81..e34deac3f366 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -38,7 +38,7 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
38 handle_t *handle = NULL; 38 handle_t *handle = NULL;
39 int err, migrate = 0; 39 int err, migrate = 0;
40 struct ext4_iloc iloc; 40 struct ext4_iloc iloc;
41 unsigned int oldflags; 41 unsigned int oldflags, mask, i;
42 unsigned int jflag; 42 unsigned int jflag;
43 43
44 if (!inode_owner_or_capable(inode)) 44 if (!inode_owner_or_capable(inode))
@@ -115,9 +115,14 @@ long ext4_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
115 if (err) 115 if (err)
116 goto flags_err; 116 goto flags_err;
117 117
118 flags = flags & EXT4_FL_USER_MODIFIABLE; 118 for (i = 0, mask = 1; i < 32; i++, mask <<= 1) {
119 flags |= oldflags & ~EXT4_FL_USER_MODIFIABLE; 119 if (!(mask & EXT4_FL_USER_MODIFIABLE))
120 ei->i_flags = flags; 120 continue;
121 if (mask & flags)
122 ext4_set_inode_flag(inode, i);
123 else
124 ext4_clear_inode_flag(inode, i);
125 }
121 126
122 ext4_set_inode_flags(inode); 127 ext4_set_inode_flags(inode);
123 inode->i_ctime = ext4_current_time(inode); 128 inode->i_ctime = ext4_current_time(inode);
@@ -152,6 +157,13 @@ flags_out:
152 if (!inode_owner_or_capable(inode)) 157 if (!inode_owner_or_capable(inode))
153 return -EPERM; 158 return -EPERM;
154 159
160 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
161 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
162 ext4_warning(sb, "Setting inode version is not "
163 "supported with metadata_csum enabled.");
164 return -ENOTTY;
165 }
166
155 err = mnt_want_write_file(filp); 167 err = mnt_want_write_file(filp);
156 if (err) 168 if (err)
157 return err; 169 return err;
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 99ab428bcfa0..1cd6994fc446 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -788,7 +788,7 @@ static int ext4_mb_init_cache(struct page *page, char *incore)
788 int first_block; 788 int first_block;
789 struct super_block *sb; 789 struct super_block *sb;
790 struct buffer_head *bhs; 790 struct buffer_head *bhs;
791 struct buffer_head **bh; 791 struct buffer_head **bh = NULL;
792 struct inode *inode; 792 struct inode *inode;
793 char *data; 793 char *data;
794 char *bitmap; 794 char *bitmap;
@@ -2375,7 +2375,7 @@ static int ext4_groupinfo_create_slab(size_t size)
2375 return 0; 2375 return 0;
2376} 2376}
2377 2377
2378int ext4_mb_init(struct super_block *sb, int needs_recovery) 2378int ext4_mb_init(struct super_block *sb)
2379{ 2379{
2380 struct ext4_sb_info *sbi = EXT4_SB(sb); 2380 struct ext4_sb_info *sbi = EXT4_SB(sb);
2381 unsigned i, j; 2381 unsigned i, j;
@@ -2517,6 +2517,9 @@ int ext4_mb_release(struct super_block *sb)
2517 struct ext4_sb_info *sbi = EXT4_SB(sb); 2517 struct ext4_sb_info *sbi = EXT4_SB(sb);
2518 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits); 2518 struct kmem_cache *cachep = get_groupinfo_cache(sb->s_blocksize_bits);
2519 2519
2520 if (sbi->s_proc)
2521 remove_proc_entry("mb_groups", sbi->s_proc);
2522
2520 if (sbi->s_group_info) { 2523 if (sbi->s_group_info) {
2521 for (i = 0; i < ngroups; i++) { 2524 for (i = 0; i < ngroups; i++) {
2522 grinfo = ext4_get_group_info(sb, i); 2525 grinfo = ext4_get_group_info(sb, i);
@@ -2564,8 +2567,6 @@ int ext4_mb_release(struct super_block *sb)
2564 } 2567 }
2565 2568
2566 free_percpu(sbi->s_locality_groups); 2569 free_percpu(sbi->s_locality_groups);
2567 if (sbi->s_proc)
2568 remove_proc_entry("mb_groups", sbi->s_proc);
2569 2570
2570 return 0; 2571 return 0;
2571} 2572}
@@ -2797,7 +2798,9 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
2797 } 2798 }
2798 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len; 2799 len = ext4_free_group_clusters(sb, gdp) - ac->ac_b_ex.fe_len;
2799 ext4_free_group_clusters_set(sb, gdp, len); 2800 ext4_free_group_clusters_set(sb, gdp, len);
2800 gdp->bg_checksum = ext4_group_desc_csum(sbi, ac->ac_b_ex.fe_group, gdp); 2801 ext4_block_bitmap_csum_set(sb, ac->ac_b_ex.fe_group, gdp, bitmap_bh,
2802 EXT4_BLOCKS_PER_GROUP(sb) / 8);
2803 ext4_group_desc_csum_set(sb, ac->ac_b_ex.fe_group, gdp);
2801 2804
2802 ext4_unlock_group(sb, ac->ac_b_ex.fe_group); 2805 ext4_unlock_group(sb, ac->ac_b_ex.fe_group);
2803 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len); 2806 percpu_counter_sub(&sbi->s_freeclusters_counter, ac->ac_b_ex.fe_len);
@@ -3071,13 +3074,9 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
3071static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac) 3074static void ext4_discard_allocated_blocks(struct ext4_allocation_context *ac)
3072{ 3075{
3073 struct ext4_prealloc_space *pa = ac->ac_pa; 3076 struct ext4_prealloc_space *pa = ac->ac_pa;
3074 int len;
3075
3076 if (pa && pa->pa_type == MB_INODE_PA) {
3077 len = ac->ac_b_ex.fe_len;
3078 pa->pa_free += len;
3079 }
3080 3077
3078 if (pa && pa->pa_type == MB_INODE_PA)
3079 pa->pa_free += ac->ac_b_ex.fe_len;
3081} 3080}
3082 3081
3083/* 3082/*
@@ -4636,6 +4635,7 @@ do_more:
4636 */ 4635 */
4637 new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS); 4636 new_entry = kmem_cache_alloc(ext4_free_data_cachep, GFP_NOFS);
4638 if (!new_entry) { 4637 if (!new_entry) {
4638 ext4_mb_unload_buddy(&e4b);
4639 err = -ENOMEM; 4639 err = -ENOMEM;
4640 goto error_return; 4640 goto error_return;
4641 } 4641 }
@@ -4659,7 +4659,9 @@ do_more:
4659 4659
4660 ret = ext4_free_group_clusters(sb, gdp) + count_clusters; 4660 ret = ext4_free_group_clusters(sb, gdp) + count_clusters;
4661 ext4_free_group_clusters_set(sb, gdp, ret); 4661 ext4_free_group_clusters_set(sb, gdp, ret);
4662 gdp->bg_checksum = ext4_group_desc_csum(sbi, block_group, gdp); 4662 ext4_block_bitmap_csum_set(sb, block_group, gdp, bitmap_bh,
4663 EXT4_BLOCKS_PER_GROUP(sb) / 8);
4664 ext4_group_desc_csum_set(sb, block_group, gdp);
4663 ext4_unlock_group(sb, block_group); 4665 ext4_unlock_group(sb, block_group);
4664 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters); 4666 percpu_counter_add(&sbi->s_freeclusters_counter, count_clusters);
4665 4667
@@ -4803,7 +4805,9 @@ int ext4_group_add_blocks(handle_t *handle, struct super_block *sb,
4803 mb_free_blocks(NULL, &e4b, bit, count); 4805 mb_free_blocks(NULL, &e4b, bit, count);
4804 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc); 4806 blk_free_count = blocks_freed + ext4_free_group_clusters(sb, desc);
4805 ext4_free_group_clusters_set(sb, desc, blk_free_count); 4807 ext4_free_group_clusters_set(sb, desc, blk_free_count);
4806 desc->bg_checksum = ext4_group_desc_csum(sbi, block_group, desc); 4808 ext4_block_bitmap_csum_set(sb, block_group, desc, bitmap_bh,
4809 EXT4_BLOCKS_PER_GROUP(sb) / 8);
4810 ext4_group_desc_csum_set(sb, block_group, desc);
4807 ext4_unlock_group(sb, block_group); 4811 ext4_unlock_group(sb, block_group);
4808 percpu_counter_add(&sbi->s_freeclusters_counter, 4812 percpu_counter_add(&sbi->s_freeclusters_counter,
4809 EXT4_B2C(sbi, blocks_freed)); 4813 EXT4_B2C(sbi, blocks_freed));
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index ed6548d89165..f99a1311e847 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -6,12 +6,45 @@
6 6
7#include "ext4.h" 7#include "ext4.h"
8 8
9/* Checksumming functions */
10static __u32 ext4_mmp_csum(struct super_block *sb, struct mmp_struct *mmp)
11{
12 struct ext4_sb_info *sbi = EXT4_SB(sb);
13 int offset = offsetof(struct mmp_struct, mmp_checksum);
14 __u32 csum;
15
16 csum = ext4_chksum(sbi, sbi->s_csum_seed, (char *)mmp, offset);
17
18 return cpu_to_le32(csum);
19}
20
21int ext4_mmp_csum_verify(struct super_block *sb, struct mmp_struct *mmp)
22{
23 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
24 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
25 return 1;
26
27 return mmp->mmp_checksum == ext4_mmp_csum(sb, mmp);
28}
29
30void ext4_mmp_csum_set(struct super_block *sb, struct mmp_struct *mmp)
31{
32 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
33 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
34 return;
35
36 mmp->mmp_checksum = ext4_mmp_csum(sb, mmp);
37}
38
9/* 39/*
10 * Write the MMP block using WRITE_SYNC to try to get the block on-disk 40 * Write the MMP block using WRITE_SYNC to try to get the block on-disk
11 * faster. 41 * faster.
12 */ 42 */
13static int write_mmp_block(struct buffer_head *bh) 43static int write_mmp_block(struct super_block *sb, struct buffer_head *bh)
14{ 44{
45 struct mmp_struct *mmp = (struct mmp_struct *)(bh->b_data);
46
47 ext4_mmp_csum_set(sb, mmp);
15 mark_buffer_dirty(bh); 48 mark_buffer_dirty(bh);
16 lock_buffer(bh); 49 lock_buffer(bh);
17 bh->b_end_io = end_buffer_write_sync; 50 bh->b_end_io = end_buffer_write_sync;
@@ -59,7 +92,8 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
59 } 92 }
60 93
61 mmp = (struct mmp_struct *)((*bh)->b_data); 94 mmp = (struct mmp_struct *)((*bh)->b_data);
62 if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC) 95 if (le32_to_cpu(mmp->mmp_magic) != EXT4_MMP_MAGIC ||
96 !ext4_mmp_csum_verify(sb, mmp))
63 return -EINVAL; 97 return -EINVAL;
64 98
65 return 0; 99 return 0;
@@ -120,7 +154,7 @@ static int kmmpd(void *data)
120 mmp->mmp_time = cpu_to_le64(get_seconds()); 154 mmp->mmp_time = cpu_to_le64(get_seconds());
121 last_update_time = jiffies; 155 last_update_time = jiffies;
122 156
123 retval = write_mmp_block(bh); 157 retval = write_mmp_block(sb, bh);
124 /* 158 /*
125 * Don't spew too many error messages. Print one every 159 * Don't spew too many error messages. Print one every
126 * (s_mmp_update_interval * 60) seconds. 160 * (s_mmp_update_interval * 60) seconds.
@@ -200,7 +234,7 @@ static int kmmpd(void *data)
200 mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN); 234 mmp->mmp_seq = cpu_to_le32(EXT4_MMP_SEQ_CLEAN);
201 mmp->mmp_time = cpu_to_le64(get_seconds()); 235 mmp->mmp_time = cpu_to_le64(get_seconds());
202 236
203 retval = write_mmp_block(bh); 237 retval = write_mmp_block(sb, bh);
204 238
205failed: 239failed:
206 kfree(data); 240 kfree(data);
@@ -299,7 +333,7 @@ skip:
299 seq = mmp_new_seq(); 333 seq = mmp_new_seq();
300 mmp->mmp_seq = cpu_to_le32(seq); 334 mmp->mmp_seq = cpu_to_le32(seq);
301 335
302 retval = write_mmp_block(bh); 336 retval = write_mmp_block(sb, bh);
303 if (retval) 337 if (retval)
304 goto failed; 338 goto failed;
305 339
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index e2a3f4b0ff78..5845cd97bf8b 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -145,6 +145,14 @@ struct dx_map_entry
145 u16 size; 145 u16 size;
146}; 146};
147 147
148/*
149 * This goes at the end of each htree block.
150 */
151struct dx_tail {
152 u32 dt_reserved;
153 __le32 dt_checksum; /* crc32c(uuid+inum+dirblock) */
154};
155
148static inline ext4_lblk_t dx_get_block(struct dx_entry *entry); 156static inline ext4_lblk_t dx_get_block(struct dx_entry *entry);
149static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value); 157static void dx_set_block(struct dx_entry *entry, ext4_lblk_t value);
150static inline unsigned dx_get_hash(struct dx_entry *entry); 158static inline unsigned dx_get_hash(struct dx_entry *entry);
@@ -180,6 +188,230 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir,
180static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry, 188static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
181 struct inode *inode); 189 struct inode *inode);
182 190
191/* checksumming functions */
192#define EXT4_DIRENT_TAIL(block, blocksize) \
193 ((struct ext4_dir_entry_tail *)(((void *)(block)) + \
194 ((blocksize) - \
195 sizeof(struct ext4_dir_entry_tail))))
196
197static void initialize_dirent_tail(struct ext4_dir_entry_tail *t,
198 unsigned int blocksize)
199{
200 memset(t, 0, sizeof(struct ext4_dir_entry_tail));
201 t->det_rec_len = ext4_rec_len_to_disk(
202 sizeof(struct ext4_dir_entry_tail), blocksize);
203 t->det_reserved_ft = EXT4_FT_DIR_CSUM;
204}
205
206/* Walk through a dirent block to find a checksum "dirent" at the tail */
207static struct ext4_dir_entry_tail *get_dirent_tail(struct inode *inode,
208 struct ext4_dir_entry *de)
209{
210 struct ext4_dir_entry_tail *t;
211
212#ifdef PARANOID
213 struct ext4_dir_entry *d, *top;
214
215 d = de;
216 top = (struct ext4_dir_entry *)(((void *)de) +
217 (EXT4_BLOCK_SIZE(inode->i_sb) -
218 sizeof(struct ext4_dir_entry_tail)));
219 while (d < top && d->rec_len)
220 d = (struct ext4_dir_entry *)(((void *)d) +
221 le16_to_cpu(d->rec_len));
222
223 if (d != top)
224 return NULL;
225
226 t = (struct ext4_dir_entry_tail *)d;
227#else
228 t = EXT4_DIRENT_TAIL(de, EXT4_BLOCK_SIZE(inode->i_sb));
229#endif
230
231 if (t->det_reserved_zero1 ||
232 le16_to_cpu(t->det_rec_len) != sizeof(struct ext4_dir_entry_tail) ||
233 t->det_reserved_zero2 ||
234 t->det_reserved_ft != EXT4_FT_DIR_CSUM)
235 return NULL;
236
237 return t;
238}
239
240static __le32 ext4_dirent_csum(struct inode *inode,
241 struct ext4_dir_entry *dirent, int size)
242{
243 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
244 struct ext4_inode_info *ei = EXT4_I(inode);
245 __u32 csum;
246
247 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
248 return cpu_to_le32(csum);
249}
250
251int ext4_dirent_csum_verify(struct inode *inode, struct ext4_dir_entry *dirent)
252{
253 struct ext4_dir_entry_tail *t;
254
255 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
256 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
257 return 1;
258
259 t = get_dirent_tail(inode, dirent);
260 if (!t) {
261 EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
262 "leaf for checksum. Please run e2fsck -D.");
263 return 0;
264 }
265
266 if (t->det_checksum != ext4_dirent_csum(inode, dirent,
267 (void *)t - (void *)dirent))
268 return 0;
269
270 return 1;
271}
272
273static void ext4_dirent_csum_set(struct inode *inode,
274 struct ext4_dir_entry *dirent)
275{
276 struct ext4_dir_entry_tail *t;
277
278 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
279 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
280 return;
281
282 t = get_dirent_tail(inode, dirent);
283 if (!t) {
284 EXT4_ERROR_INODE(inode, "metadata_csum set but no space in dir "
285 "leaf for checksum. Please run e2fsck -D.");
286 return;
287 }
288
289 t->det_checksum = ext4_dirent_csum(inode, dirent,
290 (void *)t - (void *)dirent);
291}
292
293static inline int ext4_handle_dirty_dirent_node(handle_t *handle,
294 struct inode *inode,
295 struct buffer_head *bh)
296{
297 ext4_dirent_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
298 return ext4_handle_dirty_metadata(handle, inode, bh);
299}
300
301static struct dx_countlimit *get_dx_countlimit(struct inode *inode,
302 struct ext4_dir_entry *dirent,
303 int *offset)
304{
305 struct ext4_dir_entry *dp;
306 struct dx_root_info *root;
307 int count_offset;
308
309 if (le16_to_cpu(dirent->rec_len) == EXT4_BLOCK_SIZE(inode->i_sb))
310 count_offset = 8;
311 else if (le16_to_cpu(dirent->rec_len) == 12) {
312 dp = (struct ext4_dir_entry *)(((void *)dirent) + 12);
313 if (le16_to_cpu(dp->rec_len) !=
314 EXT4_BLOCK_SIZE(inode->i_sb) - 12)
315 return NULL;
316 root = (struct dx_root_info *)(((void *)dp + 12));
317 if (root->reserved_zero ||
318 root->info_length != sizeof(struct dx_root_info))
319 return NULL;
320 count_offset = 32;
321 } else
322 return NULL;
323
324 if (offset)
325 *offset = count_offset;
326 return (struct dx_countlimit *)(((void *)dirent) + count_offset);
327}
328
329static __le32 ext4_dx_csum(struct inode *inode, struct ext4_dir_entry *dirent,
330 int count_offset, int count, struct dx_tail *t)
331{
332 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
333 struct ext4_inode_info *ei = EXT4_I(inode);
334 __u32 csum, old_csum;
335 int size;
336
337 size = count_offset + (count * sizeof(struct dx_entry));
338 old_csum = t->dt_checksum;
339 t->dt_checksum = 0;
340 csum = ext4_chksum(sbi, ei->i_csum_seed, (__u8 *)dirent, size);
341 csum = ext4_chksum(sbi, csum, (__u8 *)t, sizeof(struct dx_tail));
342 t->dt_checksum = old_csum;
343
344 return cpu_to_le32(csum);
345}
346
347static int ext4_dx_csum_verify(struct inode *inode,
348 struct ext4_dir_entry *dirent)
349{
350 struct dx_countlimit *c;
351 struct dx_tail *t;
352 int count_offset, limit, count;
353
354 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
355 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
356 return 1;
357
358 c = get_dx_countlimit(inode, dirent, &count_offset);
359 if (!c) {
360 EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D.");
361 return 1;
362 }
363 limit = le16_to_cpu(c->limit);
364 count = le16_to_cpu(c->count);
365 if (count_offset + (limit * sizeof(struct dx_entry)) >
366 EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
367 EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
368 "tree checksum found. Run e2fsck -D.");
369 return 1;
370 }
371 t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
372
373 if (t->dt_checksum != ext4_dx_csum(inode, dirent, count_offset,
374 count, t))
375 return 0;
376 return 1;
377}
378
379static void ext4_dx_csum_set(struct inode *inode, struct ext4_dir_entry *dirent)
380{
381 struct dx_countlimit *c;
382 struct dx_tail *t;
383 int count_offset, limit, count;
384
385 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
386 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
387 return;
388
389 c = get_dx_countlimit(inode, dirent, &count_offset);
390 if (!c) {
391 EXT4_ERROR_INODE(inode, "dir seems corrupt? Run e2fsck -D.");
392 return;
393 }
394 limit = le16_to_cpu(c->limit);
395 count = le16_to_cpu(c->count);
396 if (count_offset + (limit * sizeof(struct dx_entry)) >
397 EXT4_BLOCK_SIZE(inode->i_sb) - sizeof(struct dx_tail)) {
398 EXT4_ERROR_INODE(inode, "metadata_csum set but no space for "
399 "tree checksum. Run e2fsck -D.");
400 return;
401 }
402 t = (struct dx_tail *)(((struct dx_entry *)c) + limit);
403
404 t->dt_checksum = ext4_dx_csum(inode, dirent, count_offset, count, t);
405}
406
407static inline int ext4_handle_dirty_dx_node(handle_t *handle,
408 struct inode *inode,
409 struct buffer_head *bh)
410{
411 ext4_dx_csum_set(inode, (struct ext4_dir_entry *)bh->b_data);
412 return ext4_handle_dirty_metadata(handle, inode, bh);
413}
414
183/* 415/*
184 * p is at least 6 bytes before the end of page 416 * p is at least 6 bytes before the end of page
185 */ 417 */
@@ -239,12 +471,20 @@ static inline unsigned dx_root_limit(struct inode *dir, unsigned infosize)
239{ 471{
240 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) - 472 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(1) -
241 EXT4_DIR_REC_LEN(2) - infosize; 473 EXT4_DIR_REC_LEN(2) - infosize;
474
475 if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
476 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
477 entry_space -= sizeof(struct dx_tail);
242 return entry_space / sizeof(struct dx_entry); 478 return entry_space / sizeof(struct dx_entry);
243} 479}
244 480
245static inline unsigned dx_node_limit(struct inode *dir) 481static inline unsigned dx_node_limit(struct inode *dir)
246{ 482{
247 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0); 483 unsigned entry_space = dir->i_sb->s_blocksize - EXT4_DIR_REC_LEN(0);
484
485 if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
486 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
487 entry_space -= sizeof(struct dx_tail);
248 return entry_space / sizeof(struct dx_entry); 488 return entry_space / sizeof(struct dx_entry);
249} 489}
250 490
@@ -390,6 +630,15 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
390 goto fail; 630 goto fail;
391 } 631 }
392 632
633 if (!buffer_verified(bh) &&
634 !ext4_dx_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data)) {
635 ext4_warning(dir->i_sb, "Root failed checksum");
636 brelse(bh);
637 *err = ERR_BAD_DX_DIR;
638 goto fail;
639 }
640 set_buffer_verified(bh);
641
393 entries = (struct dx_entry *) (((char *)&root->info) + 642 entries = (struct dx_entry *) (((char *)&root->info) +
394 root->info.info_length); 643 root->info.info_length);
395 644
@@ -450,6 +699,17 @@ dx_probe(const struct qstr *d_name, struct inode *dir,
450 if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err))) 699 if (!(bh = ext4_bread (NULL,dir, dx_get_block(at), 0, err)))
451 goto fail2; 700 goto fail2;
452 at = entries = ((struct dx_node *) bh->b_data)->entries; 701 at = entries = ((struct dx_node *) bh->b_data)->entries;
702
703 if (!buffer_verified(bh) &&
704 !ext4_dx_csum_verify(dir,
705 (struct ext4_dir_entry *)bh->b_data)) {
706 ext4_warning(dir->i_sb, "Node failed checksum");
707 brelse(bh);
708 *err = ERR_BAD_DX_DIR;
709 goto fail;
710 }
711 set_buffer_verified(bh);
712
453 if (dx_get_limit(entries) != dx_node_limit (dir)) { 713 if (dx_get_limit(entries) != dx_node_limit (dir)) {
454 ext4_warning(dir->i_sb, 714 ext4_warning(dir->i_sb,
455 "dx entry: limit != node limit"); 715 "dx entry: limit != node limit");
@@ -549,6 +809,15 @@ static int ext4_htree_next_block(struct inode *dir, __u32 hash,
549 if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at), 809 if (!(bh = ext4_bread(NULL, dir, dx_get_block(p->at),
550 0, &err))) 810 0, &err)))
551 return err; /* Failure */ 811 return err; /* Failure */
812
813 if (!buffer_verified(bh) &&
814 !ext4_dx_csum_verify(dir,
815 (struct ext4_dir_entry *)bh->b_data)) {
816 ext4_warning(dir->i_sb, "Node failed checksum");
817 return -EIO;
818 }
819 set_buffer_verified(bh);
820
552 p++; 821 p++;
553 brelse(p->bh); 822 brelse(p->bh);
554 p->bh = bh; 823 p->bh = bh;
@@ -577,6 +846,11 @@ static int htree_dirblock_to_tree(struct file *dir_file,
577 if (!(bh = ext4_bread (NULL, dir, block, 0, &err))) 846 if (!(bh = ext4_bread (NULL, dir, block, 0, &err)))
578 return err; 847 return err;
579 848
849 if (!buffer_verified(bh) &&
850 !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
851 return -EIO;
852 set_buffer_verified(bh);
853
580 de = (struct ext4_dir_entry_2 *) bh->b_data; 854 de = (struct ext4_dir_entry_2 *) bh->b_data;
581 top = (struct ext4_dir_entry_2 *) ((char *) de + 855 top = (struct ext4_dir_entry_2 *) ((char *) de +
582 dir->i_sb->s_blocksize - 856 dir->i_sb->s_blocksize -
@@ -936,6 +1210,15 @@ restart:
936 brelse(bh); 1210 brelse(bh);
937 goto next; 1211 goto next;
938 } 1212 }
1213 if (!buffer_verified(bh) &&
1214 !ext4_dirent_csum_verify(dir,
1215 (struct ext4_dir_entry *)bh->b_data)) {
1216 EXT4_ERROR_INODE(dir, "checksumming directory "
1217 "block %lu", (unsigned long)block);
1218 brelse(bh);
1219 goto next;
1220 }
1221 set_buffer_verified(bh);
939 i = search_dirblock(bh, dir, d_name, 1222 i = search_dirblock(bh, dir, d_name,
940 block << EXT4_BLOCK_SIZE_BITS(sb), res_dir); 1223 block << EXT4_BLOCK_SIZE_BITS(sb), res_dir);
941 if (i == 1) { 1224 if (i == 1) {
@@ -987,6 +1270,16 @@ static struct buffer_head * ext4_dx_find_entry(struct inode *dir, const struct q
987 if (!(bh = ext4_bread(NULL, dir, block, 0, err))) 1270 if (!(bh = ext4_bread(NULL, dir, block, 0, err)))
988 goto errout; 1271 goto errout;
989 1272
1273 if (!buffer_verified(bh) &&
1274 !ext4_dirent_csum_verify(dir,
1275 (struct ext4_dir_entry *)bh->b_data)) {
1276 EXT4_ERROR_INODE(dir, "checksumming directory "
1277 "block %lu", (unsigned long)block);
1278 brelse(bh);
1279 *err = -EIO;
1280 goto errout;
1281 }
1282 set_buffer_verified(bh);
990 retval = search_dirblock(bh, dir, d_name, 1283 retval = search_dirblock(bh, dir, d_name,
991 block << EXT4_BLOCK_SIZE_BITS(sb), 1284 block << EXT4_BLOCK_SIZE_BITS(sb),
992 res_dir); 1285 res_dir);
@@ -1037,6 +1330,12 @@ static struct dentry *ext4_lookup(struct inode *dir, struct dentry *dentry, stru
1037 EXT4_ERROR_INODE(dir, "bad inode number: %u", ino); 1330 EXT4_ERROR_INODE(dir, "bad inode number: %u", ino);
1038 return ERR_PTR(-EIO); 1331 return ERR_PTR(-EIO);
1039 } 1332 }
1333 if (unlikely(ino == dir->i_ino)) {
1334 EXT4_ERROR_INODE(dir, "'%.*s' linked to parent dir",
1335 dentry->d_name.len,
1336 dentry->d_name.name);
1337 return ERR_PTR(-EIO);
1338 }
1040 inode = ext4_iget(dir->i_sb, ino); 1339 inode = ext4_iget(dir->i_sb, ino);
1041 if (inode == ERR_PTR(-ESTALE)) { 1340 if (inode == ERR_PTR(-ESTALE)) {
1042 EXT4_ERROR_INODE(dir, 1341 EXT4_ERROR_INODE(dir,
@@ -1156,8 +1455,14 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1156 char *data1 = (*bh)->b_data, *data2; 1455 char *data1 = (*bh)->b_data, *data2;
1157 unsigned split, move, size; 1456 unsigned split, move, size;
1158 struct ext4_dir_entry_2 *de = NULL, *de2; 1457 struct ext4_dir_entry_2 *de = NULL, *de2;
1458 struct ext4_dir_entry_tail *t;
1459 int csum_size = 0;
1159 int err = 0, i; 1460 int err = 0, i;
1160 1461
1462 if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
1463 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1464 csum_size = sizeof(struct ext4_dir_entry_tail);
1465
1161 bh2 = ext4_append (handle, dir, &newblock, &err); 1466 bh2 = ext4_append (handle, dir, &newblock, &err);
1162 if (!(bh2)) { 1467 if (!(bh2)) {
1163 brelse(*bh); 1468 brelse(*bh);
@@ -1204,10 +1509,20 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1204 /* Fancy dance to stay within two buffers */ 1509 /* Fancy dance to stay within two buffers */
1205 de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize); 1510 de2 = dx_move_dirents(data1, data2, map + split, count - split, blocksize);
1206 de = dx_pack_dirents(data1, blocksize); 1511 de = dx_pack_dirents(data1, blocksize);
1207 de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de, 1512 de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1513 (char *) de,
1208 blocksize); 1514 blocksize);
1209 de2->rec_len = ext4_rec_len_to_disk(data2 + blocksize - (char *) de2, 1515 de2->rec_len = ext4_rec_len_to_disk(data2 + (blocksize - csum_size) -
1516 (char *) de2,
1210 blocksize); 1517 blocksize);
1518 if (csum_size) {
1519 t = EXT4_DIRENT_TAIL(data2, blocksize);
1520 initialize_dirent_tail(t, blocksize);
1521
1522 t = EXT4_DIRENT_TAIL(data1, blocksize);
1523 initialize_dirent_tail(t, blocksize);
1524 }
1525
1211 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1)); 1526 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data1, blocksize, 1));
1212 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1)); 1527 dxtrace(dx_show_leaf (hinfo, (struct ext4_dir_entry_2 *) data2, blocksize, 1));
1213 1528
@@ -1218,10 +1533,10 @@ static struct ext4_dir_entry_2 *do_split(handle_t *handle, struct inode *dir,
1218 de = de2; 1533 de = de2;
1219 } 1534 }
1220 dx_insert_block(frame, hash2 + continued, newblock); 1535 dx_insert_block(frame, hash2 + continued, newblock);
1221 err = ext4_handle_dirty_metadata(handle, dir, bh2); 1536 err = ext4_handle_dirty_dirent_node(handle, dir, bh2);
1222 if (err) 1537 if (err)
1223 goto journal_error; 1538 goto journal_error;
1224 err = ext4_handle_dirty_metadata(handle, dir, frame->bh); 1539 err = ext4_handle_dirty_dx_node(handle, dir, frame->bh);
1225 if (err) 1540 if (err)
1226 goto journal_error; 1541 goto journal_error;
1227 brelse(bh2); 1542 brelse(bh2);
@@ -1258,11 +1573,16 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1258 unsigned short reclen; 1573 unsigned short reclen;
1259 int nlen, rlen, err; 1574 int nlen, rlen, err;
1260 char *top; 1575 char *top;
1576 int csum_size = 0;
1577
1578 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
1579 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1580 csum_size = sizeof(struct ext4_dir_entry_tail);
1261 1581
1262 reclen = EXT4_DIR_REC_LEN(namelen); 1582 reclen = EXT4_DIR_REC_LEN(namelen);
1263 if (!de) { 1583 if (!de) {
1264 de = (struct ext4_dir_entry_2 *)bh->b_data; 1584 de = (struct ext4_dir_entry_2 *)bh->b_data;
1265 top = bh->b_data + blocksize - reclen; 1585 top = bh->b_data + (blocksize - csum_size) - reclen;
1266 while ((char *) de <= top) { 1586 while ((char *) de <= top) {
1267 if (ext4_check_dir_entry(dir, NULL, de, bh, offset)) 1587 if (ext4_check_dir_entry(dir, NULL, de, bh, offset))
1268 return -EIO; 1588 return -EIO;
@@ -1295,11 +1615,8 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1295 de = de1; 1615 de = de1;
1296 } 1616 }
1297 de->file_type = EXT4_FT_UNKNOWN; 1617 de->file_type = EXT4_FT_UNKNOWN;
1298 if (inode) { 1618 de->inode = cpu_to_le32(inode->i_ino);
1299 de->inode = cpu_to_le32(inode->i_ino); 1619 ext4_set_de_type(dir->i_sb, de, inode->i_mode);
1300 ext4_set_de_type(dir->i_sb, de, inode->i_mode);
1301 } else
1302 de->inode = 0;
1303 de->name_len = namelen; 1620 de->name_len = namelen;
1304 memcpy(de->name, name, namelen); 1621 memcpy(de->name, name, namelen);
1305 /* 1622 /*
@@ -1318,7 +1635,7 @@ static int add_dirent_to_buf(handle_t *handle, struct dentry *dentry,
1318 dir->i_version++; 1635 dir->i_version++;
1319 ext4_mark_inode_dirty(handle, dir); 1636 ext4_mark_inode_dirty(handle, dir);
1320 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 1637 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1321 err = ext4_handle_dirty_metadata(handle, dir, bh); 1638 err = ext4_handle_dirty_dirent_node(handle, dir, bh);
1322 if (err) 1639 if (err)
1323 ext4_std_error(dir->i_sb, err); 1640 ext4_std_error(dir->i_sb, err);
1324 return 0; 1641 return 0;
@@ -1339,6 +1656,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1339 struct dx_frame frames[2], *frame; 1656 struct dx_frame frames[2], *frame;
1340 struct dx_entry *entries; 1657 struct dx_entry *entries;
1341 struct ext4_dir_entry_2 *de, *de2; 1658 struct ext4_dir_entry_2 *de, *de2;
1659 struct ext4_dir_entry_tail *t;
1342 char *data1, *top; 1660 char *data1, *top;
1343 unsigned len; 1661 unsigned len;
1344 int retval; 1662 int retval;
@@ -1346,6 +1664,11 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1346 struct dx_hash_info hinfo; 1664 struct dx_hash_info hinfo;
1347 ext4_lblk_t block; 1665 ext4_lblk_t block;
1348 struct fake_dirent *fde; 1666 struct fake_dirent *fde;
1667 int csum_size = 0;
1668
1669 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
1670 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1671 csum_size = sizeof(struct ext4_dir_entry_tail);
1349 1672
1350 blocksize = dir->i_sb->s_blocksize; 1673 blocksize = dir->i_sb->s_blocksize;
1351 dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino)); 1674 dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
@@ -1366,7 +1689,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1366 brelse(bh); 1689 brelse(bh);
1367 return -EIO; 1690 return -EIO;
1368 } 1691 }
1369 len = ((char *) root) + blocksize - (char *) de; 1692 len = ((char *) root) + (blocksize - csum_size) - (char *) de;
1370 1693
1371 /* Allocate new block for the 0th block's dirents */ 1694 /* Allocate new block for the 0th block's dirents */
1372 bh2 = ext4_append(handle, dir, &block, &retval); 1695 bh2 = ext4_append(handle, dir, &block, &retval);
@@ -1382,8 +1705,15 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1382 top = data1 + len; 1705 top = data1 + len;
1383 while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top) 1706 while ((char *)(de2 = ext4_next_entry(de, blocksize)) < top)
1384 de = de2; 1707 de = de2;
1385 de->rec_len = ext4_rec_len_to_disk(data1 + blocksize - (char *) de, 1708 de->rec_len = ext4_rec_len_to_disk(data1 + (blocksize - csum_size) -
1709 (char *) de,
1386 blocksize); 1710 blocksize);
1711
1712 if (csum_size) {
1713 t = EXT4_DIRENT_TAIL(data1, blocksize);
1714 initialize_dirent_tail(t, blocksize);
1715 }
1716
1387 /* Initialize the root; the dot dirents already exist */ 1717 /* Initialize the root; the dot dirents already exist */
1388 de = (struct ext4_dir_entry_2 *) (&root->dotdot); 1718 de = (struct ext4_dir_entry_2 *) (&root->dotdot);
1389 de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2), 1719 de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(2),
@@ -1408,8 +1738,8 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
1408 frame->bh = bh; 1738 frame->bh = bh;
1409 bh = bh2; 1739 bh = bh2;
1410 1740
1411 ext4_handle_dirty_metadata(handle, dir, frame->bh); 1741 ext4_handle_dirty_dx_node(handle, dir, frame->bh);
1412 ext4_handle_dirty_metadata(handle, dir, bh); 1742 ext4_handle_dirty_dirent_node(handle, dir, bh);
1413 1743
1414 de = do_split(handle,dir, &bh, frame, &hinfo, &retval); 1744 de = do_split(handle,dir, &bh, frame, &hinfo, &retval);
1415 if (!de) { 1745 if (!de) {
@@ -1445,11 +1775,17 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1445 struct inode *dir = dentry->d_parent->d_inode; 1775 struct inode *dir = dentry->d_parent->d_inode;
1446 struct buffer_head *bh; 1776 struct buffer_head *bh;
1447 struct ext4_dir_entry_2 *de; 1777 struct ext4_dir_entry_2 *de;
1778 struct ext4_dir_entry_tail *t;
1448 struct super_block *sb; 1779 struct super_block *sb;
1449 int retval; 1780 int retval;
1450 int dx_fallback=0; 1781 int dx_fallback=0;
1451 unsigned blocksize; 1782 unsigned blocksize;
1452 ext4_lblk_t block, blocks; 1783 ext4_lblk_t block, blocks;
1784 int csum_size = 0;
1785
1786 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
1787 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1788 csum_size = sizeof(struct ext4_dir_entry_tail);
1453 1789
1454 sb = dir->i_sb; 1790 sb = dir->i_sb;
1455 blocksize = sb->s_blocksize; 1791 blocksize = sb->s_blocksize;
@@ -1468,6 +1804,11 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1468 bh = ext4_bread(handle, dir, block, 0, &retval); 1804 bh = ext4_bread(handle, dir, block, 0, &retval);
1469 if(!bh) 1805 if(!bh)
1470 return retval; 1806 return retval;
1807 if (!buffer_verified(bh) &&
1808 !ext4_dirent_csum_verify(dir,
1809 (struct ext4_dir_entry *)bh->b_data))
1810 return -EIO;
1811 set_buffer_verified(bh);
1471 retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh); 1812 retval = add_dirent_to_buf(handle, dentry, inode, NULL, bh);
1472 if (retval != -ENOSPC) { 1813 if (retval != -ENOSPC) {
1473 brelse(bh); 1814 brelse(bh);
@@ -1484,7 +1825,13 @@ static int ext4_add_entry(handle_t *handle, struct dentry *dentry,
1484 return retval; 1825 return retval;
1485 de = (struct ext4_dir_entry_2 *) bh->b_data; 1826 de = (struct ext4_dir_entry_2 *) bh->b_data;
1486 de->inode = 0; 1827 de->inode = 0;
1487 de->rec_len = ext4_rec_len_to_disk(blocksize, blocksize); 1828 de->rec_len = ext4_rec_len_to_disk(blocksize - csum_size, blocksize);
1829
1830 if (csum_size) {
1831 t = EXT4_DIRENT_TAIL(bh->b_data, blocksize);
1832 initialize_dirent_tail(t, blocksize);
1833 }
1834
1488 retval = add_dirent_to_buf(handle, dentry, inode, de, bh); 1835 retval = add_dirent_to_buf(handle, dentry, inode, de, bh);
1489 brelse(bh); 1836 brelse(bh);
1490 if (retval == 0) 1837 if (retval == 0)
@@ -1516,6 +1863,11 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1516 if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err))) 1863 if (!(bh = ext4_bread(handle,dir, dx_get_block(frame->at), 0, &err)))
1517 goto cleanup; 1864 goto cleanup;
1518 1865
1866 if (!buffer_verified(bh) &&
1867 !ext4_dirent_csum_verify(dir, (struct ext4_dir_entry *)bh->b_data))
1868 goto journal_error;
1869 set_buffer_verified(bh);
1870
1519 BUFFER_TRACE(bh, "get_write_access"); 1871 BUFFER_TRACE(bh, "get_write_access");
1520 err = ext4_journal_get_write_access(handle, bh); 1872 err = ext4_journal_get_write_access(handle, bh);
1521 if (err) 1873 if (err)
@@ -1583,7 +1935,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1583 dxtrace(dx_show_index("node", frames[1].entries)); 1935 dxtrace(dx_show_index("node", frames[1].entries));
1584 dxtrace(dx_show_index("node", 1936 dxtrace(dx_show_index("node",
1585 ((struct dx_node *) bh2->b_data)->entries)); 1937 ((struct dx_node *) bh2->b_data)->entries));
1586 err = ext4_handle_dirty_metadata(handle, dir, bh2); 1938 err = ext4_handle_dirty_dx_node(handle, dir, bh2);
1587 if (err) 1939 if (err)
1588 goto journal_error; 1940 goto journal_error;
1589 brelse (bh2); 1941 brelse (bh2);
@@ -1609,7 +1961,7 @@ static int ext4_dx_add_entry(handle_t *handle, struct dentry *dentry,
1609 if (err) 1961 if (err)
1610 goto journal_error; 1962 goto journal_error;
1611 } 1963 }
1612 err = ext4_handle_dirty_metadata(handle, dir, frames[0].bh); 1964 err = ext4_handle_dirty_dx_node(handle, dir, frames[0].bh);
1613 if (err) { 1965 if (err) {
1614 ext4_std_error(inode->i_sb, err); 1966 ext4_std_error(inode->i_sb, err);
1615 goto cleanup; 1967 goto cleanup;
@@ -1641,12 +1993,17 @@ static int ext4_delete_entry(handle_t *handle,
1641{ 1993{
1642 struct ext4_dir_entry_2 *de, *pde; 1994 struct ext4_dir_entry_2 *de, *pde;
1643 unsigned int blocksize = dir->i_sb->s_blocksize; 1995 unsigned int blocksize = dir->i_sb->s_blocksize;
1996 int csum_size = 0;
1644 int i, err; 1997 int i, err;
1645 1998
1999 if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
2000 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
2001 csum_size = sizeof(struct ext4_dir_entry_tail);
2002
1646 i = 0; 2003 i = 0;
1647 pde = NULL; 2004 pde = NULL;
1648 de = (struct ext4_dir_entry_2 *) bh->b_data; 2005 de = (struct ext4_dir_entry_2 *) bh->b_data;
1649 while (i < bh->b_size) { 2006 while (i < bh->b_size - csum_size) {
1650 if (ext4_check_dir_entry(dir, NULL, de, bh, i)) 2007 if (ext4_check_dir_entry(dir, NULL, de, bh, i))
1651 return -EIO; 2008 return -EIO;
1652 if (de == de_del) { 2009 if (de == de_del) {
@@ -1667,7 +2024,7 @@ static int ext4_delete_entry(handle_t *handle,
1667 de->inode = 0; 2024 de->inode = 0;
1668 dir->i_version++; 2025 dir->i_version++;
1669 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata"); 2026 BUFFER_TRACE(bh, "call ext4_handle_dirty_metadata");
1670 err = ext4_handle_dirty_metadata(handle, dir, bh); 2027 err = ext4_handle_dirty_dirent_node(handle, dir, bh);
1671 if (unlikely(err)) { 2028 if (unlikely(err)) {
1672 ext4_std_error(dir->i_sb, err); 2029 ext4_std_error(dir->i_sb, err);
1673 return err; 2030 return err;
@@ -1809,9 +2166,15 @@ static int ext4_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
1809 struct inode *inode; 2166 struct inode *inode;
1810 struct buffer_head *dir_block = NULL; 2167 struct buffer_head *dir_block = NULL;
1811 struct ext4_dir_entry_2 *de; 2168 struct ext4_dir_entry_2 *de;
2169 struct ext4_dir_entry_tail *t;
1812 unsigned int blocksize = dir->i_sb->s_blocksize; 2170 unsigned int blocksize = dir->i_sb->s_blocksize;
2171 int csum_size = 0;
1813 int err, retries = 0; 2172 int err, retries = 0;
1814 2173
2174 if (EXT4_HAS_RO_COMPAT_FEATURE(dir->i_sb,
2175 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
2176 csum_size = sizeof(struct ext4_dir_entry_tail);
2177
1815 if (EXT4_DIR_LINK_MAX(dir)) 2178 if (EXT4_DIR_LINK_MAX(dir))
1816 return -EMLINK; 2179 return -EMLINK;
1817 2180
@@ -1852,16 +2215,24 @@ retry:
1852 ext4_set_de_type(dir->i_sb, de, S_IFDIR); 2215 ext4_set_de_type(dir->i_sb, de, S_IFDIR);
1853 de = ext4_next_entry(de, blocksize); 2216 de = ext4_next_entry(de, blocksize);
1854 de->inode = cpu_to_le32(dir->i_ino); 2217 de->inode = cpu_to_le32(dir->i_ino);
1855 de->rec_len = ext4_rec_len_to_disk(blocksize - EXT4_DIR_REC_LEN(1), 2218 de->rec_len = ext4_rec_len_to_disk(blocksize -
2219 (csum_size + EXT4_DIR_REC_LEN(1)),
1856 blocksize); 2220 blocksize);
1857 de->name_len = 2; 2221 de->name_len = 2;
1858 strcpy(de->name, ".."); 2222 strcpy(de->name, "..");
1859 ext4_set_de_type(dir->i_sb, de, S_IFDIR); 2223 ext4_set_de_type(dir->i_sb, de, S_IFDIR);
1860 set_nlink(inode, 2); 2224 set_nlink(inode, 2);
2225
2226 if (csum_size) {
2227 t = EXT4_DIRENT_TAIL(dir_block->b_data, blocksize);
2228 initialize_dirent_tail(t, blocksize);
2229 }
2230
1861 BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata"); 2231 BUFFER_TRACE(dir_block, "call ext4_handle_dirty_metadata");
1862 err = ext4_handle_dirty_metadata(handle, inode, dir_block); 2232 err = ext4_handle_dirty_dirent_node(handle, inode, dir_block);
1863 if (err) 2233 if (err)
1864 goto out_clear_inode; 2234 goto out_clear_inode;
2235 set_buffer_verified(dir_block);
1865 err = ext4_mark_inode_dirty(handle, inode); 2236 err = ext4_mark_inode_dirty(handle, inode);
1866 if (!err) 2237 if (!err)
1867 err = ext4_add_entry(handle, dentry, inode); 2238 err = ext4_add_entry(handle, dentry, inode);
@@ -1911,6 +2282,14 @@ static int empty_dir(struct inode *inode)
1911 inode->i_ino); 2282 inode->i_ino);
1912 return 1; 2283 return 1;
1913 } 2284 }
2285 if (!buffer_verified(bh) &&
2286 !ext4_dirent_csum_verify(inode,
2287 (struct ext4_dir_entry *)bh->b_data)) {
2288 EXT4_ERROR_INODE(inode, "checksum error reading directory "
2289 "lblock 0");
2290 return -EIO;
2291 }
2292 set_buffer_verified(bh);
1914 de = (struct ext4_dir_entry_2 *) bh->b_data; 2293 de = (struct ext4_dir_entry_2 *) bh->b_data;
1915 de1 = ext4_next_entry(de, sb->s_blocksize); 2294 de1 = ext4_next_entry(de, sb->s_blocksize);
1916 if (le32_to_cpu(de->inode) != inode->i_ino || 2295 if (le32_to_cpu(de->inode) != inode->i_ino ||
@@ -1942,6 +2321,14 @@ static int empty_dir(struct inode *inode)
1942 offset += sb->s_blocksize; 2321 offset += sb->s_blocksize;
1943 continue; 2322 continue;
1944 } 2323 }
2324 if (!buffer_verified(bh) &&
2325 !ext4_dirent_csum_verify(inode,
2326 (struct ext4_dir_entry *)bh->b_data)) {
2327 EXT4_ERROR_INODE(inode, "checksum error "
2328 "reading directory lblock 0");
2329 return -EIO;
2330 }
2331 set_buffer_verified(bh);
1945 de = (struct ext4_dir_entry_2 *) bh->b_data; 2332 de = (struct ext4_dir_entry_2 *) bh->b_data;
1946 } 2333 }
1947 if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) { 2334 if (ext4_check_dir_entry(inode, NULL, de, bh, offset)) {
@@ -2010,7 +2397,7 @@ int ext4_orphan_add(handle_t *handle, struct inode *inode)
2010 /* Insert this inode at the head of the on-disk orphan list... */ 2397 /* Insert this inode at the head of the on-disk orphan list... */
2011 NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan); 2398 NEXT_ORPHAN(inode) = le32_to_cpu(EXT4_SB(sb)->s_es->s_last_orphan);
2012 EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino); 2399 EXT4_SB(sb)->s_es->s_last_orphan = cpu_to_le32(inode->i_ino);
2013 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 2400 err = ext4_handle_dirty_super_now(handle, sb);
2014 rc = ext4_mark_iloc_dirty(handle, inode, &iloc); 2401 rc = ext4_mark_iloc_dirty(handle, inode, &iloc);
2015 if (!err) 2402 if (!err)
2016 err = rc; 2403 err = rc;
@@ -2083,7 +2470,7 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode)
2083 if (err) 2470 if (err)
2084 goto out_brelse; 2471 goto out_brelse;
2085 sbi->s_es->s_last_orphan = cpu_to_le32(ino_next); 2472 sbi->s_es->s_last_orphan = cpu_to_le32(ino_next);
2086 err = ext4_handle_dirty_metadata(handle, NULL, sbi->s_sbh); 2473 err = ext4_handle_dirty_super_now(handle, inode->i_sb);
2087 } else { 2474 } else {
2088 struct ext4_iloc iloc2; 2475 struct ext4_iloc iloc2;
2089 struct inode *i_prev = 2476 struct inode *i_prev =
@@ -2442,6 +2829,11 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2442 dir_bh = ext4_bread(handle, old_inode, 0, 0, &retval); 2829 dir_bh = ext4_bread(handle, old_inode, 0, 0, &retval);
2443 if (!dir_bh) 2830 if (!dir_bh)
2444 goto end_rename; 2831 goto end_rename;
2832 if (!buffer_verified(dir_bh) &&
2833 !ext4_dirent_csum_verify(old_inode,
2834 (struct ext4_dir_entry *)dir_bh->b_data))
2835 goto end_rename;
2836 set_buffer_verified(dir_bh);
2445 if (le32_to_cpu(PARENT_INO(dir_bh->b_data, 2837 if (le32_to_cpu(PARENT_INO(dir_bh->b_data,
2446 old_dir->i_sb->s_blocksize)) != old_dir->i_ino) 2838 old_dir->i_sb->s_blocksize)) != old_dir->i_ino)
2447 goto end_rename; 2839 goto end_rename;
@@ -2472,7 +2864,7 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2472 ext4_current_time(new_dir); 2864 ext4_current_time(new_dir);
2473 ext4_mark_inode_dirty(handle, new_dir); 2865 ext4_mark_inode_dirty(handle, new_dir);
2474 BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata"); 2866 BUFFER_TRACE(new_bh, "call ext4_handle_dirty_metadata");
2475 retval = ext4_handle_dirty_metadata(handle, new_dir, new_bh); 2867 retval = ext4_handle_dirty_dirent_node(handle, new_dir, new_bh);
2476 if (unlikely(retval)) { 2868 if (unlikely(retval)) {
2477 ext4_std_error(new_dir->i_sb, retval); 2869 ext4_std_error(new_dir->i_sb, retval);
2478 goto end_rename; 2870 goto end_rename;
@@ -2526,7 +2918,8 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
2526 PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) = 2918 PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
2527 cpu_to_le32(new_dir->i_ino); 2919 cpu_to_le32(new_dir->i_ino);
2528 BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata"); 2920 BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
2529 retval = ext4_handle_dirty_metadata(handle, old_inode, dir_bh); 2921 retval = ext4_handle_dirty_dirent_node(handle, old_inode,
2922 dir_bh);
2530 if (retval) { 2923 if (retval) {
2531 ext4_std_error(old_dir->i_sb, retval); 2924 ext4_std_error(old_dir->i_sb, retval);
2532 goto end_rename; 2925 goto end_rename;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 59fa0be27251..7ea6cbb44121 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -161,6 +161,8 @@ static struct ext4_new_flex_group_data *alloc_flex_gd(unsigned long flexbg_size)
161 if (flex_gd == NULL) 161 if (flex_gd == NULL)
162 goto out3; 162 goto out3;
163 163
164 if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
165 goto out2;
164 flex_gd->count = flexbg_size; 166 flex_gd->count = flexbg_size;
165 167
166 flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) * 168 flex_gd->groups = kmalloc(sizeof(struct ext4_new_group_data) *
@@ -796,7 +798,7 @@ static int add_new_gdb(handle_t *handle, struct inode *inode,
796 ext4_kvfree(o_group_desc); 798 ext4_kvfree(o_group_desc);
797 799
798 le16_add_cpu(&es->s_reserved_gdt_blocks, -1); 800 le16_add_cpu(&es->s_reserved_gdt_blocks, -1);
799 err = ext4_handle_dirty_metadata(handle, NULL, EXT4_SB(sb)->s_sbh); 801 err = ext4_handle_dirty_super_now(handle, sb);
800 if (err) 802 if (err)
801 ext4_std_error(sb, err); 803 ext4_std_error(sb, err);
802 804
@@ -968,6 +970,8 @@ static void update_backups(struct super_block *sb,
968 goto exit_err; 970 goto exit_err;
969 } 971 }
970 972
973 ext4_superblock_csum_set(sb, (struct ext4_super_block *)data);
974
971 while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) { 975 while ((group = ext4_list_backups(sb, &three, &five, &seven)) < last) {
972 struct buffer_head *bh; 976 struct buffer_head *bh;
973 977
@@ -1067,6 +1071,54 @@ static int ext4_add_new_descs(handle_t *handle, struct super_block *sb,
1067 return err; 1071 return err;
1068} 1072}
1069 1073
1074static struct buffer_head *ext4_get_bitmap(struct super_block *sb, __u64 block)
1075{
1076 struct buffer_head *bh = sb_getblk(sb, block);
1077 if (!bh)
1078 return NULL;
1079
1080 if (bitmap_uptodate(bh))
1081 return bh;
1082
1083 lock_buffer(bh);
1084 if (bh_submit_read(bh) < 0) {
1085 unlock_buffer(bh);
1086 brelse(bh);
1087 return NULL;
1088 }
1089 unlock_buffer(bh);
1090
1091 return bh;
1092}
1093
1094static int ext4_set_bitmap_checksums(struct super_block *sb,
1095 ext4_group_t group,
1096 struct ext4_group_desc *gdp,
1097 struct ext4_new_group_data *group_data)
1098{
1099 struct buffer_head *bh;
1100
1101 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
1102 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
1103 return 0;
1104
1105 bh = ext4_get_bitmap(sb, group_data->inode_bitmap);
1106 if (!bh)
1107 return -EIO;
1108 ext4_inode_bitmap_csum_set(sb, group, gdp, bh,
1109 EXT4_INODES_PER_GROUP(sb) / 8);
1110 brelse(bh);
1111
1112 bh = ext4_get_bitmap(sb, group_data->block_bitmap);
1113 if (!bh)
1114 return -EIO;
1115 ext4_block_bitmap_csum_set(sb, group, gdp, bh,
1116 EXT4_BLOCKS_PER_GROUP(sb) / 8);
1117 brelse(bh);
1118
1119 return 0;
1120}
1121
1070/* 1122/*
1071 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg 1123 * ext4_setup_new_descs() will set up the group descriptor descriptors of a flex bg
1072 */ 1124 */
@@ -1093,18 +1145,24 @@ static int ext4_setup_new_descs(handle_t *handle, struct super_block *sb,
1093 */ 1145 */
1094 gdb_bh = sbi->s_group_desc[gdb_num]; 1146 gdb_bh = sbi->s_group_desc[gdb_num];
1095 /* Update group descriptor block for new group */ 1147 /* Update group descriptor block for new group */
1096 gdp = (struct ext4_group_desc *)((char *)gdb_bh->b_data + 1148 gdp = (struct ext4_group_desc *)(gdb_bh->b_data +
1097 gdb_off * EXT4_DESC_SIZE(sb)); 1149 gdb_off * EXT4_DESC_SIZE(sb));
1098 1150
1099 memset(gdp, 0, EXT4_DESC_SIZE(sb)); 1151 memset(gdp, 0, EXT4_DESC_SIZE(sb));
1100 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap); 1152 ext4_block_bitmap_set(sb, gdp, group_data->block_bitmap);
1101 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap); 1153 ext4_inode_bitmap_set(sb, gdp, group_data->inode_bitmap);
1154 err = ext4_set_bitmap_checksums(sb, group, gdp, group_data);
1155 if (err) {
1156 ext4_std_error(sb, err);
1157 break;
1158 }
1159
1102 ext4_inode_table_set(sb, gdp, group_data->inode_table); 1160 ext4_inode_table_set(sb, gdp, group_data->inode_table);
1103 ext4_free_group_clusters_set(sb, gdp, 1161 ext4_free_group_clusters_set(sb, gdp,
1104 EXT4_B2C(sbi, group_data->free_blocks_count)); 1162 EXT4_B2C(sbi, group_data->free_blocks_count));
1105 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb)); 1163 ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
1106 gdp->bg_flags = cpu_to_le16(*bg_flags); 1164 gdp->bg_flags = cpu_to_le16(*bg_flags);
1107 gdp->bg_checksum = ext4_group_desc_csum(sbi, group, gdp); 1165 ext4_group_desc_csum_set(sb, group, gdp);
1108 1166
1109 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh); 1167 err = ext4_handle_dirty_metadata(handle, NULL, gdb_bh);
1110 if (unlikely(err)) { 1168 if (unlikely(err)) {
@@ -1343,17 +1401,14 @@ static int ext4_setup_next_flex_gd(struct super_block *sb,
1343 (1 + ext4_bg_num_gdb(sb, group + i) + 1401 (1 + ext4_bg_num_gdb(sb, group + i) +
1344 le16_to_cpu(es->s_reserved_gdt_blocks)) : 0; 1402 le16_to_cpu(es->s_reserved_gdt_blocks)) : 0;
1345 group_data[i].free_blocks_count = blocks_per_group - overhead; 1403 group_data[i].free_blocks_count = blocks_per_group - overhead;
1346 if (EXT4_HAS_RO_COMPAT_FEATURE(sb, 1404 if (ext4_has_group_desc_csum(sb))
1347 EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
1348 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT | 1405 flex_gd->bg_flags[i] = EXT4_BG_BLOCK_UNINIT |
1349 EXT4_BG_INODE_UNINIT; 1406 EXT4_BG_INODE_UNINIT;
1350 else 1407 else
1351 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED; 1408 flex_gd->bg_flags[i] = EXT4_BG_INODE_ZEROED;
1352 } 1409 }
1353 1410
1354 if (last_group == n_group && 1411 if (last_group == n_group && ext4_has_group_desc_csum(sb))
1355 EXT4_HAS_RO_COMPAT_FEATURE(sb,
1356 EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
1357 /* We need to initialize block bitmap of last group. */ 1412 /* We need to initialize block bitmap of last group. */
1358 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT; 1413 flex_gd->bg_flags[i - 1] &= ~EXT4_BG_BLOCK_UNINIT;
1359 1414
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 35b5954489ee..eb7aa3e4ef05 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -112,6 +112,48 @@ static struct file_system_type ext3_fs_type = {
112#define IS_EXT3_SB(sb) (0) 112#define IS_EXT3_SB(sb) (0)
113#endif 113#endif
114 114
115static int ext4_verify_csum_type(struct super_block *sb,
116 struct ext4_super_block *es)
117{
118 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
119 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
120 return 1;
121
122 return es->s_checksum_type == EXT4_CRC32C_CHKSUM;
123}
124
125static __le32 ext4_superblock_csum(struct super_block *sb,
126 struct ext4_super_block *es)
127{
128 struct ext4_sb_info *sbi = EXT4_SB(sb);
129 int offset = offsetof(struct ext4_super_block, s_checksum);
130 __u32 csum;
131
132 csum = ext4_chksum(sbi, ~0, (char *)es, offset);
133
134 return cpu_to_le32(csum);
135}
136
137int ext4_superblock_csum_verify(struct super_block *sb,
138 struct ext4_super_block *es)
139{
140 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
141 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
142 return 1;
143
144 return es->s_checksum == ext4_superblock_csum(sb, es);
145}
146
147void ext4_superblock_csum_set(struct super_block *sb,
148 struct ext4_super_block *es)
149{
150 if (!EXT4_HAS_RO_COMPAT_FEATURE(sb,
151 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
152 return;
153
154 es->s_checksum = ext4_superblock_csum(sb, es);
155}
156
115void *ext4_kvmalloc(size_t size, gfp_t flags) 157void *ext4_kvmalloc(size_t size, gfp_t flags)
116{ 158{
117 void *ret; 159 void *ret;
@@ -497,6 +539,7 @@ void __ext4_error(struct super_block *sb, const char *function,
497 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n", 539 printk(KERN_CRIT "EXT4-fs error (device %s): %s:%d: comm %s: %pV\n",
498 sb->s_id, function, line, current->comm, &vaf); 540 sb->s_id, function, line, current->comm, &vaf);
499 va_end(args); 541 va_end(args);
542 save_error_info(sb, function, line);
500 543
501 ext4_handle_error(sb); 544 ext4_handle_error(sb);
502} 545}
@@ -905,6 +948,8 @@ static void ext4_put_super(struct super_block *sb)
905 unlock_super(sb); 948 unlock_super(sb);
906 kobject_put(&sbi->s_kobj); 949 kobject_put(&sbi->s_kobj);
907 wait_for_completion(&sbi->s_kobj_unregister); 950 wait_for_completion(&sbi->s_kobj_unregister);
951 if (sbi->s_chksum_driver)
952 crypto_free_shash(sbi->s_chksum_driver);
908 kfree(sbi->s_blockgroup_lock); 953 kfree(sbi->s_blockgroup_lock);
909 kfree(sbi); 954 kfree(sbi);
910} 955}
@@ -1922,43 +1967,69 @@ failed:
1922 return 0; 1967 return 0;
1923} 1968}
1924 1969
1925__le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group, 1970static __le16 ext4_group_desc_csum(struct ext4_sb_info *sbi, __u32 block_group,
1926 struct ext4_group_desc *gdp) 1971 struct ext4_group_desc *gdp)
1927{ 1972{
1973 int offset;
1928 __u16 crc = 0; 1974 __u16 crc = 0;
1975 __le32 le_group = cpu_to_le32(block_group);
1929 1976
1930 if (sbi->s_es->s_feature_ro_compat & 1977 if ((sbi->s_es->s_feature_ro_compat &
1931 cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) { 1978 cpu_to_le32(EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))) {
1932 int offset = offsetof(struct ext4_group_desc, bg_checksum); 1979 /* Use new metadata_csum algorithm */
1933 __le32 le_group = cpu_to_le32(block_group); 1980 __u16 old_csum;
1934 1981 __u32 csum32;
1935 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid)); 1982
1936 crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group)); 1983 old_csum = gdp->bg_checksum;
1937 crc = crc16(crc, (__u8 *)gdp, offset); 1984 gdp->bg_checksum = 0;
1938 offset += sizeof(gdp->bg_checksum); /* skip checksum */ 1985 csum32 = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&le_group,
1939 /* for checksum of struct ext4_group_desc do the rest...*/ 1986 sizeof(le_group));
1940 if ((sbi->s_es->s_feature_incompat & 1987 csum32 = ext4_chksum(sbi, csum32, (__u8 *)gdp,
1941 cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) && 1988 sbi->s_desc_size);
1942 offset < le16_to_cpu(sbi->s_es->s_desc_size)) 1989 gdp->bg_checksum = old_csum;
1943 crc = crc16(crc, (__u8 *)gdp + offset, 1990
1944 le16_to_cpu(sbi->s_es->s_desc_size) - 1991 crc = csum32 & 0xFFFF;
1945 offset); 1992 goto out;
1946 } 1993 }
1947 1994
1995 /* old crc16 code */
1996 offset = offsetof(struct ext4_group_desc, bg_checksum);
1997
1998 crc = crc16(~0, sbi->s_es->s_uuid, sizeof(sbi->s_es->s_uuid));
1999 crc = crc16(crc, (__u8 *)&le_group, sizeof(le_group));
2000 crc = crc16(crc, (__u8 *)gdp, offset);
2001 offset += sizeof(gdp->bg_checksum); /* skip checksum */
2002 /* for checksum of struct ext4_group_desc do the rest...*/
2003 if ((sbi->s_es->s_feature_incompat &
2004 cpu_to_le32(EXT4_FEATURE_INCOMPAT_64BIT)) &&
2005 offset < le16_to_cpu(sbi->s_es->s_desc_size))
2006 crc = crc16(crc, (__u8 *)gdp + offset,
2007 le16_to_cpu(sbi->s_es->s_desc_size) -
2008 offset);
2009
2010out:
1948 return cpu_to_le16(crc); 2011 return cpu_to_le16(crc);
1949} 2012}
1950 2013
1951int ext4_group_desc_csum_verify(struct ext4_sb_info *sbi, __u32 block_group, 2014int ext4_group_desc_csum_verify(struct super_block *sb, __u32 block_group,
1952 struct ext4_group_desc *gdp) 2015 struct ext4_group_desc *gdp)
1953{ 2016{
1954 if ((sbi->s_es->s_feature_ro_compat & 2017 if (ext4_has_group_desc_csum(sb) &&
1955 cpu_to_le32(EXT4_FEATURE_RO_COMPAT_GDT_CSUM)) && 2018 (gdp->bg_checksum != ext4_group_desc_csum(EXT4_SB(sb),
1956 (gdp->bg_checksum != ext4_group_desc_csum(sbi, block_group, gdp))) 2019 block_group, gdp)))
1957 return 0; 2020 return 0;
1958 2021
1959 return 1; 2022 return 1;
1960} 2023}
1961 2024
2025void ext4_group_desc_csum_set(struct super_block *sb, __u32 block_group,
2026 struct ext4_group_desc *gdp)
2027{
2028 if (!ext4_has_group_desc_csum(sb))
2029 return;
2030 gdp->bg_checksum = ext4_group_desc_csum(EXT4_SB(sb), block_group, gdp);
2031}
2032
1962/* Called at mount-time, super-block is locked */ 2033/* Called at mount-time, super-block is locked */
1963static int ext4_check_descriptors(struct super_block *sb, 2034static int ext4_check_descriptors(struct super_block *sb,
1964 ext4_group_t *first_not_zeroed) 2035 ext4_group_t *first_not_zeroed)
@@ -2013,7 +2084,7 @@ static int ext4_check_descriptors(struct super_block *sb,
2013 return 0; 2084 return 0;
2014 } 2085 }
2015 ext4_lock_group(sb, i); 2086 ext4_lock_group(sb, i);
2016 if (!ext4_group_desc_csum_verify(sbi, i, gdp)) { 2087 if (!ext4_group_desc_csum_verify(sb, i, gdp)) {
2017 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: " 2088 ext4_msg(sb, KERN_ERR, "ext4_check_descriptors: "
2018 "Checksum for group %u failed (%u!=%u)", 2089 "Checksum for group %u failed (%u!=%u)",
2019 i, le16_to_cpu(ext4_group_desc_csum(sbi, i, 2090 i, le16_to_cpu(ext4_group_desc_csum(sbi, i,
@@ -2417,6 +2488,23 @@ static ssize_t sbi_ui_store(struct ext4_attr *a,
2417 return count; 2488 return count;
2418} 2489}
2419 2490
2491static ssize_t trigger_test_error(struct ext4_attr *a,
2492 struct ext4_sb_info *sbi,
2493 const char *buf, size_t count)
2494{
2495 int len = count;
2496
2497 if (!capable(CAP_SYS_ADMIN))
2498 return -EPERM;
2499
2500 if (len && buf[len-1] == '\n')
2501 len--;
2502
2503 if (len)
2504 ext4_error(sbi->s_sb, "%.*s", len, buf);
2505 return count;
2506}
2507
2420#define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \ 2508#define EXT4_ATTR_OFFSET(_name,_mode,_show,_store,_elname) \
2421static struct ext4_attr ext4_attr_##_name = { \ 2509static struct ext4_attr ext4_attr_##_name = { \
2422 .attr = {.name = __stringify(_name), .mode = _mode }, \ 2510 .attr = {.name = __stringify(_name), .mode = _mode }, \
@@ -2447,6 +2535,7 @@ EXT4_RW_ATTR_SBI_UI(mb_order2_req, s_mb_order2_reqs);
2447EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request); 2535EXT4_RW_ATTR_SBI_UI(mb_stream_req, s_mb_stream_request);
2448EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc); 2536EXT4_RW_ATTR_SBI_UI(mb_group_prealloc, s_mb_group_prealloc);
2449EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump); 2537EXT4_RW_ATTR_SBI_UI(max_writeback_mb_bump, s_max_writeback_mb_bump);
2538EXT4_ATTR(trigger_fs_error, 0200, NULL, trigger_test_error);
2450 2539
2451static struct attribute *ext4_attrs[] = { 2540static struct attribute *ext4_attrs[] = {
2452 ATTR_LIST(delayed_allocation_blocks), 2541 ATTR_LIST(delayed_allocation_blocks),
@@ -2461,6 +2550,7 @@ static struct attribute *ext4_attrs[] = {
2461 ATTR_LIST(mb_stream_req), 2550 ATTR_LIST(mb_stream_req),
2462 ATTR_LIST(mb_group_prealloc), 2551 ATTR_LIST(mb_group_prealloc),
2463 ATTR_LIST(max_writeback_mb_bump), 2552 ATTR_LIST(max_writeback_mb_bump),
2553 ATTR_LIST(trigger_fs_error),
2464 NULL, 2554 NULL,
2465}; 2555};
2466 2556
@@ -2957,6 +3047,44 @@ static void ext4_destroy_lazyinit_thread(void)
2957 kthread_stop(ext4_lazyinit_task); 3047 kthread_stop(ext4_lazyinit_task);
2958} 3048}
2959 3049
3050static int set_journal_csum_feature_set(struct super_block *sb)
3051{
3052 int ret = 1;
3053 int compat, incompat;
3054 struct ext4_sb_info *sbi = EXT4_SB(sb);
3055
3056 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3057 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3058 /* journal checksum v2 */
3059 compat = 0;
3060 incompat = JBD2_FEATURE_INCOMPAT_CSUM_V2;
3061 } else {
3062 /* journal checksum v1 */
3063 compat = JBD2_FEATURE_COMPAT_CHECKSUM;
3064 incompat = 0;
3065 }
3066
3067 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) {
3068 ret = jbd2_journal_set_features(sbi->s_journal,
3069 compat, 0,
3070 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
3071 incompat);
3072 } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
3073 ret = jbd2_journal_set_features(sbi->s_journal,
3074 compat, 0,
3075 incompat);
3076 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3077 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3078 } else {
3079 jbd2_journal_clear_features(sbi->s_journal,
3080 JBD2_FEATURE_COMPAT_CHECKSUM, 0,
3081 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT |
3082 JBD2_FEATURE_INCOMPAT_CSUM_V2);
3083 }
3084
3085 return ret;
3086}
3087
2960static int ext4_fill_super(struct super_block *sb, void *data, int silent) 3088static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2961{ 3089{
2962 char *orig_data = kstrdup(data, GFP_KERNEL); 3090 char *orig_data = kstrdup(data, GFP_KERNEL);
@@ -2993,6 +3121,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
2993 goto out_free_orig; 3121 goto out_free_orig;
2994 } 3122 }
2995 sb->s_fs_info = sbi; 3123 sb->s_fs_info = sbi;
3124 sbi->s_sb = sb;
2996 sbi->s_mount_opt = 0; 3125 sbi->s_mount_opt = 0;
2997 sbi->s_resuid = make_kuid(&init_user_ns, EXT4_DEF_RESUID); 3126 sbi->s_resuid = make_kuid(&init_user_ns, EXT4_DEF_RESUID);
2998 sbi->s_resgid = make_kgid(&init_user_ns, EXT4_DEF_RESGID); 3127 sbi->s_resgid = make_kgid(&init_user_ns, EXT4_DEF_RESGID);
@@ -3032,13 +3161,54 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3032 * Note: s_es must be initialized as soon as possible because 3161 * Note: s_es must be initialized as soon as possible because
3033 * some ext4 macro-instructions depend on its value 3162 * some ext4 macro-instructions depend on its value
3034 */ 3163 */
3035 es = (struct ext4_super_block *) (((char *)bh->b_data) + offset); 3164 es = (struct ext4_super_block *) (bh->b_data + offset);
3036 sbi->s_es = es; 3165 sbi->s_es = es;
3037 sb->s_magic = le16_to_cpu(es->s_magic); 3166 sb->s_magic = le16_to_cpu(es->s_magic);
3038 if (sb->s_magic != EXT4_SUPER_MAGIC) 3167 if (sb->s_magic != EXT4_SUPER_MAGIC)
3039 goto cantfind_ext4; 3168 goto cantfind_ext4;
3040 sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written); 3169 sbi->s_kbytes_written = le64_to_cpu(es->s_kbytes_written);
3041 3170
3171 /* Warn if metadata_csum and gdt_csum are both set. */
3172 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3173 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
3174 EXT4_HAS_RO_COMPAT_FEATURE(sb, EXT4_FEATURE_RO_COMPAT_GDT_CSUM))
3175 ext4_warning(sb, KERN_INFO "metadata_csum and uninit_bg are "
3176 "redundant flags; please run fsck.");
3177
3178 /* Check for a known checksum algorithm */
3179 if (!ext4_verify_csum_type(sb, es)) {
3180 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
3181 "unknown checksum algorithm.");
3182 silent = 1;
3183 goto cantfind_ext4;
3184 }
3185
3186 /* Load the checksum driver */
3187 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3188 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) {
3189 sbi->s_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
3190 if (IS_ERR(sbi->s_chksum_driver)) {
3191 ext4_msg(sb, KERN_ERR, "Cannot load crc32c driver.");
3192 ret = PTR_ERR(sbi->s_chksum_driver);
3193 sbi->s_chksum_driver = NULL;
3194 goto failed_mount;
3195 }
3196 }
3197
3198 /* Check superblock checksum */
3199 if (!ext4_superblock_csum_verify(sb, es)) {
3200 ext4_msg(sb, KERN_ERR, "VFS: Found ext4 filesystem with "
3201 "invalid superblock checksum. Run e2fsck?");
3202 silent = 1;
3203 goto cantfind_ext4;
3204 }
3205
3206 /* Precompute checksum seed for all metadata */
3207 if (EXT4_HAS_RO_COMPAT_FEATURE(sb,
3208 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
3209 sbi->s_csum_seed = ext4_chksum(sbi, ~0, es->s_uuid,
3210 sizeof(es->s_uuid));
3211
3042 /* Set defaults before we parse the mount options */ 3212 /* Set defaults before we parse the mount options */
3043 def_mount_opts = le32_to_cpu(es->s_default_mount_opts); 3213 def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
3044 set_opt(sb, INIT_INODE_TABLE); 3214 set_opt(sb, INIT_INODE_TABLE);
@@ -3200,7 +3370,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3200 "Can't read superblock on 2nd try"); 3370 "Can't read superblock on 2nd try");
3201 goto failed_mount; 3371 goto failed_mount;
3202 } 3372 }
3203 es = (struct ext4_super_block *)(((char *)bh->b_data) + offset); 3373 es = (struct ext4_super_block *)(bh->b_data + offset);
3204 sbi->s_es = es; 3374 sbi->s_es = es;
3205 if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) { 3375 if (es->s_magic != cpu_to_le16(EXT4_SUPER_MAGIC)) {
3206 ext4_msg(sb, KERN_ERR, 3376 ext4_msg(sb, KERN_ERR,
@@ -3392,6 +3562,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3392 GFP_KERNEL); 3562 GFP_KERNEL);
3393 if (sbi->s_group_desc == NULL) { 3563 if (sbi->s_group_desc == NULL) {
3394 ext4_msg(sb, KERN_ERR, "not enough memory"); 3564 ext4_msg(sb, KERN_ERR, "not enough memory");
3565 ret = -ENOMEM;
3395 goto failed_mount; 3566 goto failed_mount;
3396 } 3567 }
3397 3568
@@ -3449,6 +3620,7 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3449 } 3620 }
3450 if (err) { 3621 if (err) {
3451 ext4_msg(sb, KERN_ERR, "insufficient memory"); 3622 ext4_msg(sb, KERN_ERR, "insufficient memory");
3623 ret = err;
3452 goto failed_mount3; 3624 goto failed_mount3;
3453 } 3625 }
3454 3626
@@ -3506,26 +3678,17 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
3506 goto no_journal; 3678 goto no_journal;
3507 } 3679 }
3508 3680
3509 if (ext4_blocks_count(es) > 0xffffffffULL && 3681 if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_64BIT) &&
3510 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0, 3682 !jbd2_journal_set_features(EXT4_SB(sb)->s_journal, 0, 0,
3511 JBD2_FEATURE_INCOMPAT_64BIT)) { 3683 JBD2_FEATURE_INCOMPAT_64BIT)) {
3512 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature"); 3684 ext4_msg(sb, KERN_ERR, "Failed to set 64-bit journal feature");
3513 goto failed_mount_wq; 3685 goto failed_mount_wq;
3514 } 3686 }
3515 3687
3516 if (test_opt(sb, JOURNAL_ASYNC_COMMIT)) { 3688 if (!set_journal_csum_feature_set(sb)) {
3517 jbd2_journal_set_features(sbi->s_journal, 3689 ext4_msg(sb, KERN_ERR, "Failed to set journal checksum "
3518 JBD2_FEATURE_COMPAT_CHECKSUM, 0, 3690 "feature set");
3519 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT); 3691 goto failed_mount_wq;
3520 } else if (test_opt(sb, JOURNAL_CHECKSUM)) {
3521 jbd2_journal_set_features(sbi->s_journal,
3522 JBD2_FEATURE_COMPAT_CHECKSUM, 0, 0);
3523 jbd2_journal_clear_features(sbi->s_journal, 0, 0,
3524 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3525 } else {
3526 jbd2_journal_clear_features(sbi->s_journal,
3527 JBD2_FEATURE_COMPAT_CHECKSUM, 0,
3528 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT);
3529 } 3692 }
3530 3693
3531 /* We have now updated the journal if required, so we can 3694 /* We have now updated the journal if required, so we can
@@ -3606,7 +3769,8 @@ no_journal:
3606 goto failed_mount4; 3769 goto failed_mount4;
3607 } 3770 }
3608 3771
3609 ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY); 3772 if (ext4_setup_super(sb, es, sb->s_flags & MS_RDONLY))
3773 sb->s_flags |= MS_RDONLY;
3610 3774
3611 /* determine the minimum size of new large inodes, if present */ 3775 /* determine the minimum size of new large inodes, if present */
3612 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) { 3776 if (sbi->s_inode_size > EXT4_GOOD_OLD_INODE_SIZE) {
@@ -3641,7 +3805,7 @@ no_journal:
3641 } 3805 }
3642 3806
3643 ext4_ext_init(sb); 3807 ext4_ext_init(sb);
3644 err = ext4_mb_init(sb, needs_recovery); 3808 err = ext4_mb_init(sb);
3645 if (err) { 3809 if (err) {
3646 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)", 3810 ext4_msg(sb, KERN_ERR, "failed to initialize mballoc (%d)",
3647 err); 3811 err);
@@ -3724,6 +3888,8 @@ failed_mount2:
3724 brelse(sbi->s_group_desc[i]); 3888 brelse(sbi->s_group_desc[i]);
3725 ext4_kvfree(sbi->s_group_desc); 3889 ext4_kvfree(sbi->s_group_desc);
3726failed_mount: 3890failed_mount:
3891 if (sbi->s_chksum_driver)
3892 crypto_free_shash(sbi->s_chksum_driver);
3727 if (sbi->s_proc) { 3893 if (sbi->s_proc) {
3728 remove_proc_entry("options", sbi->s_proc); 3894 remove_proc_entry("options", sbi->s_proc);
3729 remove_proc_entry(sb->s_id, ext4_proc_root); 3895 remove_proc_entry(sb->s_id, ext4_proc_root);
@@ -3847,7 +4013,7 @@ static journal_t *ext4_get_dev_journal(struct super_block *sb,
3847 goto out_bdev; 4013 goto out_bdev;
3848 } 4014 }
3849 4015
3850 es = (struct ext4_super_block *) (((char *)bh->b_data) + offset); 4016 es = (struct ext4_super_block *) (bh->b_data + offset);
3851 if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) || 4017 if ((le16_to_cpu(es->s_magic) != EXT4_SUPER_MAGIC) ||
3852 !(le32_to_cpu(es->s_feature_incompat) & 4018 !(le32_to_cpu(es->s_feature_incompat) &
3853 EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) { 4019 EXT4_FEATURE_INCOMPAT_JOURNAL_DEV)) {
@@ -4039,6 +4205,7 @@ static int ext4_commit_super(struct super_block *sb, int sync)
4039 &EXT4_SB(sb)->s_freeinodes_counter)); 4205 &EXT4_SB(sb)->s_freeinodes_counter));
4040 sb->s_dirt = 0; 4206 sb->s_dirt = 0;
4041 BUFFER_TRACE(sbh, "marking dirty"); 4207 BUFFER_TRACE(sbh, "marking dirty");
4208 ext4_superblock_csum_set(sb, es);
4042 mark_buffer_dirty(sbh); 4209 mark_buffer_dirty(sbh);
4043 if (sync) { 4210 if (sync) {
4044 error = sync_dirty_buffer(sbh); 4211 error = sync_dirty_buffer(sbh);
@@ -4333,7 +4500,7 @@ static int ext4_remount(struct super_block *sb, int *flags, char *data)
4333 struct ext4_group_desc *gdp = 4500 struct ext4_group_desc *gdp =
4334 ext4_get_group_desc(sb, g, NULL); 4501 ext4_get_group_desc(sb, g, NULL);
4335 4502
4336 if (!ext4_group_desc_csum_verify(sbi, g, gdp)) { 4503 if (!ext4_group_desc_csum_verify(sb, g, gdp)) {
4337 ext4_msg(sb, KERN_ERR, 4504 ext4_msg(sb, KERN_ERR,
4338 "ext4_remount: Checksum for group %u failed (%u!=%u)", 4505 "ext4_remount: Checksum for group %u failed (%u!=%u)",
4339 g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)), 4506 g, le16_to_cpu(ext4_group_desc_csum(sbi, g, gdp)),
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index e88748e55c0f..e56c9ed7d6e3 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -122,6 +122,58 @@ const struct xattr_handler *ext4_xattr_handlers[] = {
122 NULL 122 NULL
123}; 123};
124 124
125static __le32 ext4_xattr_block_csum(struct inode *inode,
126 sector_t block_nr,
127 struct ext4_xattr_header *hdr)
128{
129 struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
130 struct ext4_inode_info *ei = EXT4_I(inode);
131 __u32 csum, old;
132
133 old = hdr->h_checksum;
134 hdr->h_checksum = 0;
135 if (le32_to_cpu(hdr->h_refcount) != 1) {
136 block_nr = cpu_to_le64(block_nr);
137 csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr,
138 sizeof(block_nr));
139 } else
140 csum = ei->i_csum_seed;
141 csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
142 EXT4_BLOCK_SIZE(inode->i_sb));
143 hdr->h_checksum = old;
144 return cpu_to_le32(csum);
145}
146
147static int ext4_xattr_block_csum_verify(struct inode *inode,
148 sector_t block_nr,
149 struct ext4_xattr_header *hdr)
150{
151 if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
152 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) &&
153 (hdr->h_checksum != ext4_xattr_block_csum(inode, block_nr, hdr)))
154 return 0;
155 return 1;
156}
157
158static void ext4_xattr_block_csum_set(struct inode *inode,
159 sector_t block_nr,
160 struct ext4_xattr_header *hdr)
161{
162 if (!EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb,
163 EXT4_FEATURE_RO_COMPAT_METADATA_CSUM))
164 return;
165
166 hdr->h_checksum = ext4_xattr_block_csum(inode, block_nr, hdr);
167}
168
169static inline int ext4_handle_dirty_xattr_block(handle_t *handle,
170 struct inode *inode,
171 struct buffer_head *bh)
172{
173 ext4_xattr_block_csum_set(inode, bh->b_blocknr, BHDR(bh));
174 return ext4_handle_dirty_metadata(handle, inode, bh);
175}
176
125static inline const struct xattr_handler * 177static inline const struct xattr_handler *
126ext4_xattr_handler(int name_index) 178ext4_xattr_handler(int name_index)
127{ 179{
@@ -156,12 +208,22 @@ ext4_xattr_check_names(struct ext4_xattr_entry *entry, void *end)
156} 208}
157 209
158static inline int 210static inline int
159ext4_xattr_check_block(struct buffer_head *bh) 211ext4_xattr_check_block(struct inode *inode, struct buffer_head *bh)
160{ 212{
213 int error;
214
215 if (buffer_verified(bh))
216 return 0;
217
161 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) || 218 if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
162 BHDR(bh)->h_blocks != cpu_to_le32(1)) 219 BHDR(bh)->h_blocks != cpu_to_le32(1))
163 return -EIO; 220 return -EIO;
164 return ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size); 221 if (!ext4_xattr_block_csum_verify(inode, bh->b_blocknr, BHDR(bh)))
222 return -EIO;
223 error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size);
224 if (!error)
225 set_buffer_verified(bh);
226 return error;
165} 227}
166 228
167static inline int 229static inline int
@@ -224,7 +286,7 @@ ext4_xattr_block_get(struct inode *inode, int name_index, const char *name,
224 goto cleanup; 286 goto cleanup;
225 ea_bdebug(bh, "b_count=%d, refcount=%d", 287 ea_bdebug(bh, "b_count=%d, refcount=%d",
226 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); 288 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
227 if (ext4_xattr_check_block(bh)) { 289 if (ext4_xattr_check_block(inode, bh)) {
228bad_block: 290bad_block:
229 EXT4_ERROR_INODE(inode, "bad block %llu", 291 EXT4_ERROR_INODE(inode, "bad block %llu",
230 EXT4_I(inode)->i_file_acl); 292 EXT4_I(inode)->i_file_acl);
@@ -369,7 +431,7 @@ ext4_xattr_block_list(struct dentry *dentry, char *buffer, size_t buffer_size)
369 goto cleanup; 431 goto cleanup;
370 ea_bdebug(bh, "b_count=%d, refcount=%d", 432 ea_bdebug(bh, "b_count=%d, refcount=%d",
371 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount)); 433 atomic_read(&(bh->b_count)), le32_to_cpu(BHDR(bh)->h_refcount));
372 if (ext4_xattr_check_block(bh)) { 434 if (ext4_xattr_check_block(inode, bh)) {
373 EXT4_ERROR_INODE(inode, "bad block %llu", 435 EXT4_ERROR_INODE(inode, "bad block %llu",
374 EXT4_I(inode)->i_file_acl); 436 EXT4_I(inode)->i_file_acl);
375 error = -EIO; 437 error = -EIO;
@@ -492,7 +554,7 @@ ext4_xattr_release_block(handle_t *handle, struct inode *inode,
492 if (ce) 554 if (ce)
493 mb_cache_entry_release(ce); 555 mb_cache_entry_release(ce);
494 unlock_buffer(bh); 556 unlock_buffer(bh);
495 error = ext4_handle_dirty_metadata(handle, inode, bh); 557 error = ext4_handle_dirty_xattr_block(handle, inode, bh);
496 if (IS_SYNC(inode)) 558 if (IS_SYNC(inode))
497 ext4_handle_sync(handle); 559 ext4_handle_sync(handle);
498 dquot_free_block(inode, 1); 560 dquot_free_block(inode, 1);
@@ -662,7 +724,7 @@ ext4_xattr_block_find(struct inode *inode, struct ext4_xattr_info *i,
662 ea_bdebug(bs->bh, "b_count=%d, refcount=%d", 724 ea_bdebug(bs->bh, "b_count=%d, refcount=%d",
663 atomic_read(&(bs->bh->b_count)), 725 atomic_read(&(bs->bh->b_count)),
664 le32_to_cpu(BHDR(bs->bh)->h_refcount)); 726 le32_to_cpu(BHDR(bs->bh)->h_refcount));
665 if (ext4_xattr_check_block(bs->bh)) { 727 if (ext4_xattr_check_block(inode, bs->bh)) {
666 EXT4_ERROR_INODE(inode, "bad block %llu", 728 EXT4_ERROR_INODE(inode, "bad block %llu",
667 EXT4_I(inode)->i_file_acl); 729 EXT4_I(inode)->i_file_acl);
668 error = -EIO; 730 error = -EIO;
@@ -725,9 +787,9 @@ ext4_xattr_block_set(handle_t *handle, struct inode *inode,
725 if (error == -EIO) 787 if (error == -EIO)
726 goto bad_block; 788 goto bad_block;
727 if (!error) 789 if (!error)
728 error = ext4_handle_dirty_metadata(handle, 790 error = ext4_handle_dirty_xattr_block(handle,
729 inode, 791 inode,
730 bs->bh); 792 bs->bh);
731 if (error) 793 if (error)
732 goto cleanup; 794 goto cleanup;
733 goto inserted; 795 goto inserted;
@@ -796,9 +858,9 @@ inserted:
796 ea_bdebug(new_bh, "reusing; refcount now=%d", 858 ea_bdebug(new_bh, "reusing; refcount now=%d",
797 le32_to_cpu(BHDR(new_bh)->h_refcount)); 859 le32_to_cpu(BHDR(new_bh)->h_refcount));
798 unlock_buffer(new_bh); 860 unlock_buffer(new_bh);
799 error = ext4_handle_dirty_metadata(handle, 861 error = ext4_handle_dirty_xattr_block(handle,
800 inode, 862 inode,
801 new_bh); 863 new_bh);
802 if (error) 864 if (error)
803 goto cleanup_dquot; 865 goto cleanup_dquot;
804 } 866 }
@@ -855,8 +917,8 @@ getblk_failed:
855 set_buffer_uptodate(new_bh); 917 set_buffer_uptodate(new_bh);
856 unlock_buffer(new_bh); 918 unlock_buffer(new_bh);
857 ext4_xattr_cache_insert(new_bh); 919 ext4_xattr_cache_insert(new_bh);
858 error = ext4_handle_dirty_metadata(handle, 920 error = ext4_handle_dirty_xattr_block(handle,
859 inode, new_bh); 921 inode, new_bh);
860 if (error) 922 if (error)
861 goto cleanup; 923 goto cleanup;
862 } 924 }
@@ -1193,7 +1255,7 @@ retry:
1193 error = -EIO; 1255 error = -EIO;
1194 if (!bh) 1256 if (!bh)
1195 goto cleanup; 1257 goto cleanup;
1196 if (ext4_xattr_check_block(bh)) { 1258 if (ext4_xattr_check_block(inode, bh)) {
1197 EXT4_ERROR_INODE(inode, "bad block %llu", 1259 EXT4_ERROR_INODE(inode, "bad block %llu",
1198 EXT4_I(inode)->i_file_acl); 1260 EXT4_I(inode)->i_file_acl);
1199 error = -EIO; 1261 error = -EIO;
diff --git a/fs/ext4/xattr.h b/fs/ext4/xattr.h
index 25b7387ff183..91f31ca7d9af 100644
--- a/fs/ext4/xattr.h
+++ b/fs/ext4/xattr.h
@@ -27,7 +27,9 @@ struct ext4_xattr_header {
27 __le32 h_refcount; /* reference count */ 27 __le32 h_refcount; /* reference count */
28 __le32 h_blocks; /* number of disk blocks used */ 28 __le32 h_blocks; /* number of disk blocks used */
29 __le32 h_hash; /* hash value of all attributes */ 29 __le32 h_hash; /* hash value of all attributes */
30 __u32 h_reserved[4]; /* zero right now */ 30 __le32 h_checksum; /* crc32c(uuid+id+xattrblock) */
31 /* id = inum if refcount=1, blknum otherwise */
32 __u32 h_reserved[3]; /* zero right now */
31}; 33};
32 34
33struct ext4_xattr_ibody_header { 35struct ext4_xattr_ibody_header {
diff --git a/fs/fat/dir.c b/fs/fat/dir.c
index aca191bd5f8f..6eaa28c98ad1 100644
--- a/fs/fat/dir.c
+++ b/fs/fat/dir.c
@@ -98,8 +98,8 @@ next:
98 98
99 *bh = sb_bread(sb, phys); 99 *bh = sb_bread(sb, phys);
100 if (*bh == NULL) { 100 if (*bh == NULL) {
101 fat_msg(sb, KERN_ERR, "Directory bread(block %llu) failed", 101 fat_msg_ratelimit(sb, KERN_ERR,
102 (llu)phys); 102 "Directory bread(block %llu) failed", (llu)phys);
103 /* skip this block */ 103 /* skip this block */
104 *pos = (iblock + 1) << sb->s_blocksize_bits; 104 *pos = (iblock + 1) << sb->s_blocksize_bits;
105 goto next; 105 goto next;
diff --git a/fs/fat/fat.h b/fs/fat/fat.h
index 66994f316e18..fc35c5c69136 100644
--- a/fs/fat/fat.h
+++ b/fs/fat/fat.h
@@ -82,6 +82,7 @@ struct msdos_sb_info {
82 int fatent_shift; 82 int fatent_shift;
83 struct fatent_operations *fatent_ops; 83 struct fatent_operations *fatent_ops;
84 struct inode *fat_inode; 84 struct inode *fat_inode;
85 struct inode *fsinfo_inode;
85 86
86 struct ratelimit_state ratelimit; 87 struct ratelimit_state ratelimit;
87 88
@@ -334,6 +335,11 @@ void __fat_fs_error(struct super_block *sb, int report, const char *fmt, ...);
334 __fat_fs_error(sb, __ratelimit(&MSDOS_SB(sb)->ratelimit), fmt , ## args) 335 __fat_fs_error(sb, __ratelimit(&MSDOS_SB(sb)->ratelimit), fmt , ## args)
335__printf(3, 4) __cold 336__printf(3, 4) __cold
336void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...); 337void fat_msg(struct super_block *sb, const char *level, const char *fmt, ...);
338#define fat_msg_ratelimit(sb, level, fmt, args...) \
339 do { \
340 if (__ratelimit(&MSDOS_SB(sb)->ratelimit)) \
341 fat_msg(sb, level, fmt, ## args); \
342 } while (0)
337extern int fat_clusters_flush(struct super_block *sb); 343extern int fat_clusters_flush(struct super_block *sb);
338extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster); 344extern int fat_chain_add(struct inode *inode, int new_dclus, int nr_cluster);
339extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts, 345extern void fat_time_fat2unix(struct msdos_sb_info *sbi, struct timespec *ts,
diff --git a/fs/fat/fatent.c b/fs/fat/fatent.c
index 2e81ac0df7e2..31f08ab62c56 100644
--- a/fs/fat/fatent.c
+++ b/fs/fat/fatent.c
@@ -308,6 +308,16 @@ void fat_ent_access_init(struct super_block *sb)
308 } 308 }
309} 309}
310 310
311static void mark_fsinfo_dirty(struct super_block *sb)
312{
313 struct msdos_sb_info *sbi = MSDOS_SB(sb);
314
315 if (sb->s_flags & MS_RDONLY || sbi->fat_bits != 32)
316 return;
317
318 __mark_inode_dirty(sbi->fsinfo_inode, I_DIRTY_SYNC);
319}
320
311static inline int fat_ent_update_ptr(struct super_block *sb, 321static inline int fat_ent_update_ptr(struct super_block *sb,
312 struct fat_entry *fatent, 322 struct fat_entry *fatent,
313 int offset, sector_t blocknr) 323 int offset, sector_t blocknr)
@@ -498,7 +508,6 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
498 sbi->prev_free = entry; 508 sbi->prev_free = entry;
499 if (sbi->free_clusters != -1) 509 if (sbi->free_clusters != -1)
500 sbi->free_clusters--; 510 sbi->free_clusters--;
501 sb->s_dirt = 1;
502 511
503 cluster[idx_clus] = entry; 512 cluster[idx_clus] = entry;
504 idx_clus++; 513 idx_clus++;
@@ -520,11 +529,11 @@ int fat_alloc_clusters(struct inode *inode, int *cluster, int nr_cluster)
520 /* Couldn't allocate the free entries */ 529 /* Couldn't allocate the free entries */
521 sbi->free_clusters = 0; 530 sbi->free_clusters = 0;
522 sbi->free_clus_valid = 1; 531 sbi->free_clus_valid = 1;
523 sb->s_dirt = 1;
524 err = -ENOSPC; 532 err = -ENOSPC;
525 533
526out: 534out:
527 unlock_fat(sbi); 535 unlock_fat(sbi);
536 mark_fsinfo_dirty(sb);
528 fatent_brelse(&fatent); 537 fatent_brelse(&fatent);
529 if (!err) { 538 if (!err) {
530 if (inode_needs_sync(inode)) 539 if (inode_needs_sync(inode))
@@ -549,7 +558,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
549 struct fat_entry fatent; 558 struct fat_entry fatent;
550 struct buffer_head *bhs[MAX_BUF_PER_PAGE]; 559 struct buffer_head *bhs[MAX_BUF_PER_PAGE];
551 int i, err, nr_bhs; 560 int i, err, nr_bhs;
552 int first_cl = cluster; 561 int first_cl = cluster, dirty_fsinfo = 0;
553 562
554 nr_bhs = 0; 563 nr_bhs = 0;
555 fatent_init(&fatent); 564 fatent_init(&fatent);
@@ -587,7 +596,7 @@ int fat_free_clusters(struct inode *inode, int cluster)
587 ops->ent_put(&fatent, FAT_ENT_FREE); 596 ops->ent_put(&fatent, FAT_ENT_FREE);
588 if (sbi->free_clusters != -1) { 597 if (sbi->free_clusters != -1) {
589 sbi->free_clusters++; 598 sbi->free_clusters++;
590 sb->s_dirt = 1; 599 dirty_fsinfo = 1;
591 } 600 }
592 601
593 if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) { 602 if (nr_bhs + fatent.nr_bhs > MAX_BUF_PER_PAGE) {
@@ -617,6 +626,8 @@ error:
617 for (i = 0; i < nr_bhs; i++) 626 for (i = 0; i < nr_bhs; i++)
618 brelse(bhs[i]); 627 brelse(bhs[i]);
619 unlock_fat(sbi); 628 unlock_fat(sbi);
629 if (dirty_fsinfo)
630 mark_fsinfo_dirty(sb);
620 631
621 return err; 632 return err;
622} 633}
@@ -677,7 +688,7 @@ int fat_count_free_clusters(struct super_block *sb)
677 } 688 }
678 sbi->free_clusters = free; 689 sbi->free_clusters = free;
679 sbi->free_clus_valid = 1; 690 sbi->free_clus_valid = 1;
680 sb->s_dirt = 1; 691 mark_fsinfo_dirty(sb);
681 fatent_brelse(&fatent); 692 fatent_brelse(&fatent);
682out: 693out:
683 unlock_fat(sbi); 694 unlock_fat(sbi);
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index b3d290c1b513..a3d81ebf6d86 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -459,37 +459,11 @@ static void fat_evict_inode(struct inode *inode)
459 fat_detach(inode); 459 fat_detach(inode);
460} 460}
461 461
462static void fat_write_super(struct super_block *sb)
463{
464 lock_super(sb);
465 sb->s_dirt = 0;
466
467 if (!(sb->s_flags & MS_RDONLY))
468 fat_clusters_flush(sb);
469 unlock_super(sb);
470}
471
472static int fat_sync_fs(struct super_block *sb, int wait)
473{
474 int err = 0;
475
476 if (sb->s_dirt) {
477 lock_super(sb);
478 sb->s_dirt = 0;
479 err = fat_clusters_flush(sb);
480 unlock_super(sb);
481 }
482
483 return err;
484}
485
486static void fat_put_super(struct super_block *sb) 462static void fat_put_super(struct super_block *sb)
487{ 463{
488 struct msdos_sb_info *sbi = MSDOS_SB(sb); 464 struct msdos_sb_info *sbi = MSDOS_SB(sb);
489 465
490 if (sb->s_dirt) 466 iput(sbi->fsinfo_inode);
491 fat_write_super(sb);
492
493 iput(sbi->fat_inode); 467 iput(sbi->fat_inode);
494 468
495 unload_nls(sbi->nls_disk); 469 unload_nls(sbi->nls_disk);
@@ -661,7 +635,18 @@ retry:
661 635
662static int fat_write_inode(struct inode *inode, struct writeback_control *wbc) 636static int fat_write_inode(struct inode *inode, struct writeback_control *wbc)
663{ 637{
664 return __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL); 638 int err;
639
640 if (inode->i_ino == MSDOS_FSINFO_INO) {
641 struct super_block *sb = inode->i_sb;
642
643 lock_super(sb);
644 err = fat_clusters_flush(sb);
645 unlock_super(sb);
646 } else
647 err = __fat_write_inode(inode, wbc->sync_mode == WB_SYNC_ALL);
648
649 return err;
665} 650}
666 651
667int fat_sync_inode(struct inode *inode) 652int fat_sync_inode(struct inode *inode)
@@ -678,8 +663,6 @@ static const struct super_operations fat_sops = {
678 .write_inode = fat_write_inode, 663 .write_inode = fat_write_inode,
679 .evict_inode = fat_evict_inode, 664 .evict_inode = fat_evict_inode,
680 .put_super = fat_put_super, 665 .put_super = fat_put_super,
681 .write_super = fat_write_super,
682 .sync_fs = fat_sync_fs,
683 .statfs = fat_statfs, 666 .statfs = fat_statfs,
684 .remount_fs = fat_remount, 667 .remount_fs = fat_remount,
685 668
@@ -752,10 +735,9 @@ static struct dentry *fat_fh_to_dentry(struct super_block *sb,
752} 735}
753 736
754static int 737static int
755fat_encode_fh(struct dentry *de, __u32 *fh, int *lenp, int connectable) 738fat_encode_fh(struct inode *inode, __u32 *fh, int *lenp, struct inode *parent)
756{ 739{
757 int len = *lenp; 740 int len = *lenp;
758 struct inode *inode = de->d_inode;
759 u32 ipos_h, ipos_m, ipos_l; 741 u32 ipos_h, ipos_m, ipos_l;
760 742
761 if (len < 5) { 743 if (len < 5) {
@@ -771,9 +753,9 @@ fat_encode_fh(struct dentry *de, __u32 *fh, int *lenp, int connectable)
771 fh[1] = inode->i_generation; 753 fh[1] = inode->i_generation;
772 fh[2] = ipos_h; 754 fh[2] = ipos_h;
773 fh[3] = ipos_m | MSDOS_I(inode)->i_logstart; 755 fh[3] = ipos_m | MSDOS_I(inode)->i_logstart;
774 spin_lock(&de->d_lock); 756 fh[4] = ipos_l;
775 fh[4] = ipos_l | MSDOS_I(de->d_parent->d_inode)->i_logstart; 757 if (parent)
776 spin_unlock(&de->d_lock); 758 fh[4] |= MSDOS_I(parent)->i_logstart;
777 return 3; 759 return 3;
778} 760}
779 761
@@ -1244,6 +1226,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
1244 void (*setup)(struct super_block *)) 1226 void (*setup)(struct super_block *))
1245{ 1227{
1246 struct inode *root_inode = NULL, *fat_inode = NULL; 1228 struct inode *root_inode = NULL, *fat_inode = NULL;
1229 struct inode *fsinfo_inode = NULL;
1247 struct buffer_head *bh; 1230 struct buffer_head *bh;
1248 struct fat_boot_sector *b; 1231 struct fat_boot_sector *b;
1249 struct msdos_sb_info *sbi; 1232 struct msdos_sb_info *sbi;
@@ -1490,6 +1473,14 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat,
1490 goto out_fail; 1473 goto out_fail;
1491 MSDOS_I(fat_inode)->i_pos = 0; 1474 MSDOS_I(fat_inode)->i_pos = 0;
1492 sbi->fat_inode = fat_inode; 1475 sbi->fat_inode = fat_inode;
1476
1477 fsinfo_inode = new_inode(sb);
1478 if (!fsinfo_inode)
1479 goto out_fail;
1480 fsinfo_inode->i_ino = MSDOS_FSINFO_INO;
1481 sbi->fsinfo_inode = fsinfo_inode;
1482 insert_inode_hash(fsinfo_inode);
1483
1493 root_inode = new_inode(sb); 1484 root_inode = new_inode(sb);
1494 if (!root_inode) 1485 if (!root_inode)
1495 goto out_fail; 1486 goto out_fail;
@@ -1516,6 +1507,8 @@ out_invalid:
1516 fat_msg(sb, KERN_INFO, "Can't find a valid FAT filesystem"); 1507 fat_msg(sb, KERN_INFO, "Can't find a valid FAT filesystem");
1517 1508
1518out_fail: 1509out_fail:
1510 if (fsinfo_inode)
1511 iput(fsinfo_inode);
1519 if (fat_inode) 1512 if (fat_inode)
1520 iput(fat_inode); 1513 iput(fat_inode);
1521 unload_nls(sbi->nls_io); 1514 unload_nls(sbi->nls_io);
diff --git a/fs/fcntl.c b/fs/fcntl.c
index d078b75572a7..81b70e665bf0 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -442,28 +442,24 @@ static int check_fcntl_cmd(unsigned cmd)
442SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg) 442SYSCALL_DEFINE3(fcntl, unsigned int, fd, unsigned int, cmd, unsigned long, arg)
443{ 443{
444 struct file *filp; 444 struct file *filp;
445 int fput_needed;
445 long err = -EBADF; 446 long err = -EBADF;
446 447
447 filp = fget_raw(fd); 448 filp = fget_raw_light(fd, &fput_needed);
448 if (!filp) 449 if (!filp)
449 goto out; 450 goto out;
450 451
451 if (unlikely(filp->f_mode & FMODE_PATH)) { 452 if (unlikely(filp->f_mode & FMODE_PATH)) {
452 if (!check_fcntl_cmd(cmd)) { 453 if (!check_fcntl_cmd(cmd))
453 fput(filp); 454 goto out1;
454 goto out;
455 }
456 } 455 }
457 456
458 err = security_file_fcntl(filp, cmd, arg); 457 err = security_file_fcntl(filp, cmd, arg);
459 if (err) { 458 if (!err)
460 fput(filp); 459 err = do_fcntl(fd, cmd, arg, filp);
461 return err;
462 }
463 460
464 err = do_fcntl(fd, cmd, arg, filp); 461out1:
465 462 fput_light(filp, fput_needed);
466 fput(filp);
467out: 463out:
468 return err; 464 return err;
469} 465}
@@ -473,26 +469,21 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
473 unsigned long, arg) 469 unsigned long, arg)
474{ 470{
475 struct file * filp; 471 struct file * filp;
476 long err; 472 long err = -EBADF;
473 int fput_needed;
477 474
478 err = -EBADF; 475 filp = fget_raw_light(fd, &fput_needed);
479 filp = fget_raw(fd);
480 if (!filp) 476 if (!filp)
481 goto out; 477 goto out;
482 478
483 if (unlikely(filp->f_mode & FMODE_PATH)) { 479 if (unlikely(filp->f_mode & FMODE_PATH)) {
484 if (!check_fcntl_cmd(cmd)) { 480 if (!check_fcntl_cmd(cmd))
485 fput(filp); 481 goto out1;
486 goto out;
487 }
488 } 482 }
489 483
490 err = security_file_fcntl(filp, cmd, arg); 484 err = security_file_fcntl(filp, cmd, arg);
491 if (err) { 485 if (err)
492 fput(filp); 486 goto out1;
493 return err;
494 }
495 err = -EBADF;
496 487
497 switch (cmd) { 488 switch (cmd) {
498 case F_GETLK64: 489 case F_GETLK64:
@@ -507,7 +498,8 @@ SYSCALL_DEFINE3(fcntl64, unsigned int, fd, unsigned int, cmd,
507 err = do_fcntl(fd, cmd, arg, filp); 498 err = do_fcntl(fd, cmd, arg, filp);
508 break; 499 break;
509 } 500 }
510 fput(filp); 501out1:
502 fput_light(filp, fput_needed);
511out: 503out:
512 return err; 504 return err;
513} 505}
diff --git a/fs/file_table.c b/fs/file_table.c
index 70f2a0fd6aec..a305d9e2d1b2 100644
--- a/fs/file_table.c
+++ b/fs/file_table.c
@@ -34,7 +34,6 @@ struct files_stat_struct files_stat = {
34 .max_files = NR_FILE 34 .max_files = NR_FILE
35}; 35};
36 36
37DECLARE_LGLOCK(files_lglock);
38DEFINE_LGLOCK(files_lglock); 37DEFINE_LGLOCK(files_lglock);
39 38
40/* SLAB cache for file structures */ 39/* SLAB cache for file structures */
@@ -421,9 +420,9 @@ static inline void __file_sb_list_add(struct file *file, struct super_block *sb)
421 */ 420 */
422void file_sb_list_add(struct file *file, struct super_block *sb) 421void file_sb_list_add(struct file *file, struct super_block *sb)
423{ 422{
424 lg_local_lock(files_lglock); 423 lg_local_lock(&files_lglock);
425 __file_sb_list_add(file, sb); 424 __file_sb_list_add(file, sb);
426 lg_local_unlock(files_lglock); 425 lg_local_unlock(&files_lglock);
427} 426}
428 427
429/** 428/**
@@ -436,9 +435,9 @@ void file_sb_list_add(struct file *file, struct super_block *sb)
436void file_sb_list_del(struct file *file) 435void file_sb_list_del(struct file *file)
437{ 436{
438 if (!list_empty(&file->f_u.fu_list)) { 437 if (!list_empty(&file->f_u.fu_list)) {
439 lg_local_lock_cpu(files_lglock, file_list_cpu(file)); 438 lg_local_lock_cpu(&files_lglock, file_list_cpu(file));
440 list_del_init(&file->f_u.fu_list); 439 list_del_init(&file->f_u.fu_list);
441 lg_local_unlock_cpu(files_lglock, file_list_cpu(file)); 440 lg_local_unlock_cpu(&files_lglock, file_list_cpu(file));
442 } 441 }
443} 442}
444 443
@@ -485,7 +484,7 @@ void mark_files_ro(struct super_block *sb)
485 struct file *f; 484 struct file *f;
486 485
487retry: 486retry:
488 lg_global_lock(files_lglock); 487 lg_global_lock(&files_lglock);
489 do_file_list_for_each_entry(sb, f) { 488 do_file_list_for_each_entry(sb, f) {
490 struct vfsmount *mnt; 489 struct vfsmount *mnt;
491 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode)) 490 if (!S_ISREG(f->f_path.dentry->d_inode->i_mode))
@@ -502,12 +501,12 @@ retry:
502 file_release_write(f); 501 file_release_write(f);
503 mnt = mntget(f->f_path.mnt); 502 mnt = mntget(f->f_path.mnt);
504 /* This can sleep, so we can't hold the spinlock. */ 503 /* This can sleep, so we can't hold the spinlock. */
505 lg_global_unlock(files_lglock); 504 lg_global_unlock(&files_lglock);
506 mnt_drop_write(mnt); 505 mnt_drop_write(mnt);
507 mntput(mnt); 506 mntput(mnt);
508 goto retry; 507 goto retry;
509 } while_file_list_for_each_entry; 508 } while_file_list_for_each_entry;
510 lg_global_unlock(files_lglock); 509 lg_global_unlock(&files_lglock);
511} 510}
512 511
513void __init files_init(unsigned long mempages) 512void __init files_init(unsigned long mempages)
@@ -525,6 +524,6 @@ void __init files_init(unsigned long mempages)
525 n = (mempages * (PAGE_SIZE / 1024)) / 10; 524 n = (mempages * (PAGE_SIZE / 1024)) / 10;
526 files_stat.max_files = max_t(unsigned long, n, NR_FILE); 525 files_stat.max_files = max_t(unsigned long, n, NR_FILE);
527 files_defer_init(); 526 files_defer_init();
528 lg_lock_init(files_lglock); 527 lg_lock_init(&files_lglock, "files_lglock");
529 percpu_counter_init(&nr_files, 0); 528 percpu_counter_init(&nr_files, 0);
530} 529}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 8d2fb8c88cf3..41a3ccff18d8 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -664,6 +664,7 @@ static long writeback_sb_inodes(struct super_block *sb,
664 /* Wait for I_SYNC. This function drops i_lock... */ 664 /* Wait for I_SYNC. This function drops i_lock... */
665 inode_sleep_on_writeback(inode); 665 inode_sleep_on_writeback(inode);
666 /* Inode may be gone, start again */ 666 /* Inode may be gone, start again */
667 spin_lock(&wb->list_lock);
667 continue; 668 continue;
668 } 669 }
669 inode->i_state |= I_SYNC; 670 inode->i_state |= I_SYNC;
diff --git a/fs/fuse/control.c b/fs/fuse/control.c
index 42593c587d48..03ff5b1eba93 100644
--- a/fs/fuse/control.c
+++ b/fs/fuse/control.c
@@ -75,19 +75,13 @@ static ssize_t fuse_conn_limit_write(struct file *file, const char __user *buf,
75 unsigned global_limit) 75 unsigned global_limit)
76{ 76{
77 unsigned long t; 77 unsigned long t;
78 char tmp[32];
79 unsigned limit = (1 << 16) - 1; 78 unsigned limit = (1 << 16) - 1;
80 int err; 79 int err;
81 80
82 if (*ppos || count >= sizeof(tmp) - 1) 81 if (*ppos)
83 return -EINVAL;
84
85 if (copy_from_user(tmp, buf, count))
86 return -EINVAL; 82 return -EINVAL;
87 83
88 tmp[count] = '\0'; 84 err = kstrtoul_from_user(buf, count, 0, &t);
89
90 err = strict_strtoul(tmp, 0, &t);
91 if (err) 85 if (err)
92 return err; 86 return err;
93 87
diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
index df5ac048dc74..334e0b18a014 100644
--- a/fs/fuse/dir.c
+++ b/fs/fuse/dir.c
@@ -775,6 +775,8 @@ static int fuse_link(struct dentry *entry, struct inode *newdir,
775static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr, 775static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
776 struct kstat *stat) 776 struct kstat *stat)
777{ 777{
778 unsigned int blkbits;
779
778 stat->dev = inode->i_sb->s_dev; 780 stat->dev = inode->i_sb->s_dev;
779 stat->ino = attr->ino; 781 stat->ino = attr->ino;
780 stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); 782 stat->mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
@@ -790,7 +792,13 @@ static void fuse_fillattr(struct inode *inode, struct fuse_attr *attr,
790 stat->ctime.tv_nsec = attr->ctimensec; 792 stat->ctime.tv_nsec = attr->ctimensec;
791 stat->size = attr->size; 793 stat->size = attr->size;
792 stat->blocks = attr->blocks; 794 stat->blocks = attr->blocks;
793 stat->blksize = (1 << inode->i_blkbits); 795
796 if (attr->blksize != 0)
797 blkbits = ilog2(attr->blksize);
798 else
799 blkbits = inode->i_sb->s_blocksize_bits;
800
801 stat->blksize = 1 << blkbits;
794} 802}
795 803
796static int fuse_do_getattr(struct inode *inode, struct kstat *stat, 804static int fuse_do_getattr(struct inode *inode, struct kstat *stat,
@@ -863,6 +871,7 @@ int fuse_update_attributes(struct inode *inode, struct kstat *stat,
863 if (stat) { 871 if (stat) {
864 generic_fillattr(inode, stat); 872 generic_fillattr(inode, stat);
865 stat->mode = fi->orig_i_mode; 873 stat->mode = fi->orig_i_mode;
874 stat->ino = fi->orig_ino;
866 } 875 }
867 } 876 }
868 877
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 504e61b7fd75..b321a688cde7 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -962,7 +962,9 @@ static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
962 if (err) 962 if (err)
963 goto out; 963 goto out;
964 964
965 file_update_time(file); 965 err = file_update_time(file);
966 if (err)
967 goto out;
966 968
967 if (file->f_flags & O_DIRECT) { 969 if (file->f_flags & O_DIRECT) {
968 written = generic_file_direct_write(iocb, iov, &nr_segs, 970 written = generic_file_direct_write(iocb, iov, &nr_segs,
@@ -2171,6 +2173,44 @@ fuse_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov,
2171 return ret; 2173 return ret;
2172} 2174}
2173 2175
2176long fuse_file_fallocate(struct file *file, int mode, loff_t offset,
2177 loff_t length)
2178{
2179 struct fuse_file *ff = file->private_data;
2180 struct fuse_conn *fc = ff->fc;
2181 struct fuse_req *req;
2182 struct fuse_fallocate_in inarg = {
2183 .fh = ff->fh,
2184 .offset = offset,
2185 .length = length,
2186 .mode = mode
2187 };
2188 int err;
2189
2190 if (fc->no_fallocate)
2191 return -EOPNOTSUPP;
2192
2193 req = fuse_get_req(fc);
2194 if (IS_ERR(req))
2195 return PTR_ERR(req);
2196
2197 req->in.h.opcode = FUSE_FALLOCATE;
2198 req->in.h.nodeid = ff->nodeid;
2199 req->in.numargs = 1;
2200 req->in.args[0].size = sizeof(inarg);
2201 req->in.args[0].value = &inarg;
2202 fuse_request_send(fc, req);
2203 err = req->out.h.error;
2204 if (err == -ENOSYS) {
2205 fc->no_fallocate = 1;
2206 err = -EOPNOTSUPP;
2207 }
2208 fuse_put_request(fc, req);
2209
2210 return err;
2211}
2212EXPORT_SYMBOL_GPL(fuse_file_fallocate);
2213
2174static const struct file_operations fuse_file_operations = { 2214static const struct file_operations fuse_file_operations = {
2175 .llseek = fuse_file_llseek, 2215 .llseek = fuse_file_llseek,
2176 .read = do_sync_read, 2216 .read = do_sync_read,
@@ -2188,6 +2228,7 @@ static const struct file_operations fuse_file_operations = {
2188 .unlocked_ioctl = fuse_file_ioctl, 2228 .unlocked_ioctl = fuse_file_ioctl,
2189 .compat_ioctl = fuse_file_compat_ioctl, 2229 .compat_ioctl = fuse_file_compat_ioctl,
2190 .poll = fuse_file_poll, 2230 .poll = fuse_file_poll,
2231 .fallocate = fuse_file_fallocate,
2191}; 2232};
2192 2233
2193static const struct file_operations fuse_direct_io_file_operations = { 2234static const struct file_operations fuse_direct_io_file_operations = {
@@ -2204,6 +2245,7 @@ static const struct file_operations fuse_direct_io_file_operations = {
2204 .unlocked_ioctl = fuse_file_ioctl, 2245 .unlocked_ioctl = fuse_file_ioctl,
2205 .compat_ioctl = fuse_file_compat_ioctl, 2246 .compat_ioctl = fuse_file_compat_ioctl,
2206 .poll = fuse_file_poll, 2247 .poll = fuse_file_poll,
2248 .fallocate = fuse_file_fallocate,
2207 /* no splice_read */ 2249 /* no splice_read */
2208}; 2250};
2209 2251
diff --git a/fs/fuse/fuse_i.h b/fs/fuse/fuse_i.h
index 572cefc78012..771fb6322c07 100644
--- a/fs/fuse/fuse_i.h
+++ b/fs/fuse/fuse_i.h
@@ -82,6 +82,9 @@ struct fuse_inode {
82 preserve the original mode */ 82 preserve the original mode */
83 umode_t orig_i_mode; 83 umode_t orig_i_mode;
84 84
85 /** 64 bit inode number */
86 u64 orig_ino;
87
85 /** Version of last attribute change */ 88 /** Version of last attribute change */
86 u64 attr_version; 89 u64 attr_version;
87 90
@@ -478,6 +481,9 @@ struct fuse_conn {
478 /** Are BSD file locking primitives not implemented by fs? */ 481 /** Are BSD file locking primitives not implemented by fs? */
479 unsigned no_flock:1; 482 unsigned no_flock:1;
480 483
484 /** Is fallocate not implemented by fs? */
485 unsigned no_fallocate:1;
486
481 /** The number of requests waiting for completion */ 487 /** The number of requests waiting for completion */
482 atomic_t num_waiting; 488 atomic_t num_waiting;
483 489
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 56f6dcf30768..1cd61652018c 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -91,6 +91,7 @@ static struct inode *fuse_alloc_inode(struct super_block *sb)
91 fi->nlookup = 0; 91 fi->nlookup = 0;
92 fi->attr_version = 0; 92 fi->attr_version = 0;
93 fi->writectr = 0; 93 fi->writectr = 0;
94 fi->orig_ino = 0;
94 INIT_LIST_HEAD(&fi->write_files); 95 INIT_LIST_HEAD(&fi->write_files);
95 INIT_LIST_HEAD(&fi->queued_writes); 96 INIT_LIST_HEAD(&fi->queued_writes);
96 INIT_LIST_HEAD(&fi->writepages); 97 INIT_LIST_HEAD(&fi->writepages);
@@ -139,6 +140,18 @@ static int fuse_remount_fs(struct super_block *sb, int *flags, char *data)
139 return 0; 140 return 0;
140} 141}
141 142
143/*
144 * ino_t is 32-bits on 32-bit arch. We have to squash the 64-bit value down
145 * so that it will fit.
146 */
147static ino_t fuse_squash_ino(u64 ino64)
148{
149 ino_t ino = (ino_t) ino64;
150 if (sizeof(ino_t) < sizeof(u64))
151 ino ^= ino64 >> (sizeof(u64) - sizeof(ino_t)) * 8;
152 return ino;
153}
154
142void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr, 155void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
143 u64 attr_valid) 156 u64 attr_valid)
144{ 157{
@@ -148,7 +161,7 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
148 fi->attr_version = ++fc->attr_version; 161 fi->attr_version = ++fc->attr_version;
149 fi->i_time = attr_valid; 162 fi->i_time = attr_valid;
150 163
151 inode->i_ino = attr->ino; 164 inode->i_ino = fuse_squash_ino(attr->ino);
152 inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777); 165 inode->i_mode = (inode->i_mode & S_IFMT) | (attr->mode & 07777);
153 set_nlink(inode, attr->nlink); 166 set_nlink(inode, attr->nlink);
154 inode->i_uid = attr->uid; 167 inode->i_uid = attr->uid;
@@ -174,6 +187,8 @@ void fuse_change_attributes_common(struct inode *inode, struct fuse_attr *attr,
174 fi->orig_i_mode = inode->i_mode; 187 fi->orig_i_mode = inode->i_mode;
175 if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS)) 188 if (!(fc->flags & FUSE_DEFAULT_PERMISSIONS))
176 inode->i_mode &= ~S_ISVTX; 189 inode->i_mode &= ~S_ISVTX;
190
191 fi->orig_ino = attr->ino;
177} 192}
178 193
179void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr, 194void fuse_change_attributes(struct inode *inode, struct fuse_attr *attr,
@@ -627,12 +642,10 @@ static struct dentry *fuse_get_dentry(struct super_block *sb,
627 return ERR_PTR(err); 642 return ERR_PTR(err);
628} 643}
629 644
630static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len, 645static int fuse_encode_fh(struct inode *inode, u32 *fh, int *max_len,
631 int connectable) 646 struct inode *parent)
632{ 647{
633 struct inode *inode = dentry->d_inode; 648 int len = parent ? 6 : 3;
634 bool encode_parent = connectable && !S_ISDIR(inode->i_mode);
635 int len = encode_parent ? 6 : 3;
636 u64 nodeid; 649 u64 nodeid;
637 u32 generation; 650 u32 generation;
638 651
@@ -648,14 +661,9 @@ static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
648 fh[1] = (u32)(nodeid & 0xffffffff); 661 fh[1] = (u32)(nodeid & 0xffffffff);
649 fh[2] = generation; 662 fh[2] = generation;
650 663
651 if (encode_parent) { 664 if (parent) {
652 struct inode *parent;
653
654 spin_lock(&dentry->d_lock);
655 parent = dentry->d_parent->d_inode;
656 nodeid = get_fuse_inode(parent)->nodeid; 665 nodeid = get_fuse_inode(parent)->nodeid;
657 generation = parent->i_generation; 666 generation = parent->i_generation;
658 spin_unlock(&dentry->d_lock);
659 667
660 fh[3] = (u32)(nodeid >> 32); 668 fh[3] = (u32)(nodeid >> 32);
661 fh[4] = (u32)(nodeid & 0xffffffff); 669 fh[4] = (u32)(nodeid & 0xffffffff);
@@ -663,7 +671,7 @@ static int fuse_encode_fh(struct dentry *dentry, u32 *fh, int *max_len,
663 } 671 }
664 672
665 *max_len = len; 673 *max_len = len;
666 return encode_parent ? 0x82 : 0x81; 674 return parent ? 0x82 : 0x81;
667} 675}
668 676
669static struct dentry *fuse_fh_to_dentry(struct super_block *sb, 677static struct dentry *fuse_fh_to_dentry(struct super_block *sb,
diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c
index 70ba891654f8..e8ed6d4a6181 100644
--- a/fs/gfs2/export.c
+++ b/fs/gfs2/export.c
@@ -28,15 +28,14 @@
28#define GFS2_LARGE_FH_SIZE 8 28#define GFS2_LARGE_FH_SIZE 8
29#define GFS2_OLD_FH_SIZE 10 29#define GFS2_OLD_FH_SIZE 10
30 30
31static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len, 31static int gfs2_encode_fh(struct inode *inode, __u32 *p, int *len,
32 int connectable) 32 struct inode *parent)
33{ 33{
34 __be32 *fh = (__force __be32 *)p; 34 __be32 *fh = (__force __be32 *)p;
35 struct inode *inode = dentry->d_inode;
36 struct super_block *sb = inode->i_sb; 35 struct super_block *sb = inode->i_sb;
37 struct gfs2_inode *ip = GFS2_I(inode); 36 struct gfs2_inode *ip = GFS2_I(inode);
38 37
39 if (connectable && (*len < GFS2_LARGE_FH_SIZE)) { 38 if (parent && (*len < GFS2_LARGE_FH_SIZE)) {
40 *len = GFS2_LARGE_FH_SIZE; 39 *len = GFS2_LARGE_FH_SIZE;
41 return 255; 40 return 255;
42 } else if (*len < GFS2_SMALL_FH_SIZE) { 41 } else if (*len < GFS2_SMALL_FH_SIZE) {
@@ -50,14 +49,10 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
50 fh[3] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF); 49 fh[3] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF);
51 *len = GFS2_SMALL_FH_SIZE; 50 *len = GFS2_SMALL_FH_SIZE;
52 51
53 if (!connectable || inode == sb->s_root->d_inode) 52 if (!parent || inode == sb->s_root->d_inode)
54 return *len; 53 return *len;
55 54
56 spin_lock(&dentry->d_lock); 55 ip = GFS2_I(parent);
57 inode = dentry->d_parent->d_inode;
58 ip = GFS2_I(inode);
59 igrab(inode);
60 spin_unlock(&dentry->d_lock);
61 56
62 fh[4] = cpu_to_be32(ip->i_no_formal_ino >> 32); 57 fh[4] = cpu_to_be32(ip->i_no_formal_ino >> 32);
63 fh[5] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF); 58 fh[5] = cpu_to_be32(ip->i_no_formal_ino & 0xFFFFFFFF);
@@ -65,8 +60,6 @@ static int gfs2_encode_fh(struct dentry *dentry, __u32 *p, int *len,
65 fh[7] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF); 60 fh[7] = cpu_to_be32(ip->i_no_addr & 0xFFFFFFFF);
66 *len = GFS2_LARGE_FH_SIZE; 61 *len = GFS2_LARGE_FH_SIZE;
67 62
68 iput(inode);
69
70 return *len; 63 return *len;
71} 64}
72 65
diff --git a/fs/hfsplus/ioctl.c b/fs/hfsplus/ioctl.c
index c640ba57074b..09addc8615fa 100644
--- a/fs/hfsplus/ioctl.c
+++ b/fs/hfsplus/ioctl.c
@@ -31,6 +31,7 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
31 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); 31 struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb);
32 struct hfsplus_vh *vh = sbi->s_vhdr; 32 struct hfsplus_vh *vh = sbi->s_vhdr;
33 struct hfsplus_vh *bvh = sbi->s_backup_vhdr; 33 struct hfsplus_vh *bvh = sbi->s_backup_vhdr;
34 u32 cnid = (unsigned long)dentry->d_fsdata;
34 35
35 if (!capable(CAP_SYS_ADMIN)) 36 if (!capable(CAP_SYS_ADMIN))
36 return -EPERM; 37 return -EPERM;
@@ -41,8 +42,12 @@ static int hfsplus_ioctl_bless(struct file *file, int __user *user_flags)
41 vh->finder_info[0] = bvh->finder_info[0] = 42 vh->finder_info[0] = bvh->finder_info[0] =
42 cpu_to_be32(parent_ino(dentry)); 43 cpu_to_be32(parent_ino(dentry));
43 44
44 /* Bootloader */ 45 /*
45 vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(inode->i_ino); 46 * Bootloader. Just using the inode here breaks in the case of
47 * hard links - the firmware wants the ID of the hard link file,
48 * but the inode points at the indirect inode
49 */
50 vh->finder_info[1] = bvh->finder_info[1] = cpu_to_be32(cnid);
46 51
47 /* Per spec, the OS X system folder - same as finder_info[0] here */ 52 /* Per spec, the OS X system folder - same as finder_info[0] here */
48 vh->finder_info[5] = bvh->finder_info[5] = 53 vh->finder_info[5] = bvh->finder_info[5] =
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index 7daf4b852d1c..90effcccca9a 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -56,7 +56,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
56 DECLARE_COMPLETION_ONSTACK(wait); 56 DECLARE_COMPLETION_ONSTACK(wait);
57 struct bio *bio; 57 struct bio *bio;
58 int ret = 0; 58 int ret = 0;
59 unsigned int io_size; 59 u64 io_size;
60 loff_t start; 60 loff_t start;
61 int offset; 61 int offset;
62 62
diff --git a/fs/hpfs/alloc.c b/fs/hpfs/alloc.c
index 7a5eb2c718c8..cdb84a838068 100644
--- a/fs/hpfs/alloc.c
+++ b/fs/hpfs/alloc.c
@@ -16,9 +16,9 @@
16static int chk_if_allocated(struct super_block *s, secno sec, char *msg) 16static int chk_if_allocated(struct super_block *s, secno sec, char *msg)
17{ 17{
18 struct quad_buffer_head qbh; 18 struct quad_buffer_head qbh;
19 u32 *bmp; 19 __le32 *bmp;
20 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail; 20 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "chk"))) goto fail;
21 if ((cpu_to_le32(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) { 21 if ((le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) >> (sec & 0x1f)) & 1) {
22 hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec); 22 hpfs_error(s, "sector '%s' - %08x not allocated in bitmap", msg, sec);
23 goto fail1; 23 goto fail1;
24 } 24 }
@@ -62,7 +62,7 @@ int hpfs_chk_sectors(struct super_block *s, secno start, int len, char *msg)
62static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward) 62static secno alloc_in_bmp(struct super_block *s, secno near, unsigned n, unsigned forward)
63{ 63{
64 struct quad_buffer_head qbh; 64 struct quad_buffer_head qbh;
65 unsigned *bmp; 65 __le32 *bmp;
66 unsigned bs = near & ~0x3fff; 66 unsigned bs = near & ~0x3fff;
67 unsigned nr = (near & 0x3fff) & ~(n - 1); 67 unsigned nr = (near & 0x3fff) & ~(n - 1);
68 /*unsigned mnr;*/ 68 /*unsigned mnr;*/
@@ -236,7 +236,7 @@ static secno alloc_in_dirband(struct super_block *s, secno near)
236int hpfs_alloc_if_possible(struct super_block *s, secno sec) 236int hpfs_alloc_if_possible(struct super_block *s, secno sec)
237{ 237{
238 struct quad_buffer_head qbh; 238 struct quad_buffer_head qbh;
239 u32 *bmp; 239 __le32 *bmp;
240 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end; 240 if (!(bmp = hpfs_map_bitmap(s, sec >> 14, &qbh, "aip"))) goto end;
241 if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) { 241 if (le32_to_cpu(bmp[(sec & 0x3fff) >> 5]) & (1 << (sec & 0x1f))) {
242 bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f))); 242 bmp[(sec & 0x3fff) >> 5] &= cpu_to_le32(~(1 << (sec & 0x1f)));
@@ -254,7 +254,7 @@ int hpfs_alloc_if_possible(struct super_block *s, secno sec)
254void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n) 254void hpfs_free_sectors(struct super_block *s, secno sec, unsigned n)
255{ 255{
256 struct quad_buffer_head qbh; 256 struct quad_buffer_head qbh;
257 u32 *bmp; 257 __le32 *bmp;
258 struct hpfs_sb_info *sbi = hpfs_sb(s); 258 struct hpfs_sb_info *sbi = hpfs_sb(s);
259 /*printk("2 - ");*/ 259 /*printk("2 - ");*/
260 if (!n) return; 260 if (!n) return;
@@ -299,7 +299,7 @@ int hpfs_check_free_dnodes(struct super_block *s, int n)
299 int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14; 299 int n_bmps = (hpfs_sb(s)->sb_fs_size + 0x4000 - 1) >> 14;
300 int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff; 300 int b = hpfs_sb(s)->sb_c_bitmap & 0x0fffffff;
301 int i, j; 301 int i, j;
302 u32 *bmp; 302 __le32 *bmp;
303 struct quad_buffer_head qbh; 303 struct quad_buffer_head qbh;
304 if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) { 304 if ((bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
305 for (j = 0; j < 512; j++) { 305 for (j = 0; j < 512; j++) {
@@ -351,7 +351,7 @@ void hpfs_free_dnode(struct super_block *s, dnode_secno dno)
351 hpfs_free_sectors(s, dno, 4); 351 hpfs_free_sectors(s, dno, 4);
352 } else { 352 } else {
353 struct quad_buffer_head qbh; 353 struct quad_buffer_head qbh;
354 u32 *bmp; 354 __le32 *bmp;
355 unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4; 355 unsigned ssec = (dno - hpfs_sb(s)->sb_dirband_start) / 4;
356 if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) { 356 if (!(bmp = hpfs_map_dnode_bitmap(s, &qbh))) {
357 return; 357 return;
diff --git a/fs/hpfs/anode.c b/fs/hpfs/anode.c
index 08b503e8ed29..4bae4a4a60b1 100644
--- a/fs/hpfs/anode.c
+++ b/fs/hpfs/anode.c
@@ -20,7 +20,7 @@ secno hpfs_bplus_lookup(struct super_block *s, struct inode *inode,
20 int c1, c2 = 0; 20 int c1, c2 = 0;
21 go_down: 21 go_down:
22 if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1; 22 if (hpfs_sb(s)->sb_chk) if (hpfs_stop_cycles(s, a, &c1, &c2, "hpfs_bplus_lookup")) return -1;
23 if (btree->internal) { 23 if (bp_internal(btree)) {
24 for (i = 0; i < btree->n_used_nodes; i++) 24 for (i = 0; i < btree->n_used_nodes; i++)
25 if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) { 25 if (le32_to_cpu(btree->u.internal[i].file_secno) > sec) {
26 a = le32_to_cpu(btree->u.internal[i].down); 26 a = le32_to_cpu(btree->u.internal[i].down);
@@ -82,7 +82,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
82 brelse(bh); 82 brelse(bh);
83 return -1; 83 return -1;
84 } 84 }
85 if (btree->internal) { 85 if (bp_internal(btree)) {
86 a = le32_to_cpu(btree->u.internal[n].down); 86 a = le32_to_cpu(btree->u.internal[n].down);
87 btree->u.internal[n].file_secno = cpu_to_le32(-1); 87 btree->u.internal[n].file_secno = cpu_to_le32(-1);
88 mark_buffer_dirty(bh); 88 mark_buffer_dirty(bh);
@@ -129,12 +129,12 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
129 } 129 }
130 if (a == node && fnod) { 130 if (a == node && fnod) {
131 anode->up = cpu_to_le32(node); 131 anode->up = cpu_to_le32(node);
132 anode->btree.fnode_parent = 1; 132 anode->btree.flags |= BP_fnode_parent;
133 anode->btree.n_used_nodes = btree->n_used_nodes; 133 anode->btree.n_used_nodes = btree->n_used_nodes;
134 anode->btree.first_free = btree->first_free; 134 anode->btree.first_free = btree->first_free;
135 anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes; 135 anode->btree.n_free_nodes = 40 - anode->btree.n_used_nodes;
136 memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12); 136 memcpy(&anode->u, &btree->u, btree->n_used_nodes * 12);
137 btree->internal = 1; 137 btree->flags |= BP_internal;
138 btree->n_free_nodes = 11; 138 btree->n_free_nodes = 11;
139 btree->n_used_nodes = 1; 139 btree->n_used_nodes = 1;
140 btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree); 140 btree->first_free = cpu_to_le16((char *)&(btree->u.internal[1]) - (char *)btree);
@@ -184,7 +184,10 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
184 hpfs_free_sectors(s, ra, 1); 184 hpfs_free_sectors(s, ra, 1);
185 if ((anode = hpfs_map_anode(s, na, &bh))) { 185 if ((anode = hpfs_map_anode(s, na, &bh))) {
186 anode->up = cpu_to_le32(up); 186 anode->up = cpu_to_le32(up);
187 anode->btree.fnode_parent = up == node && fnod; 187 if (up == node && fnod)
188 anode->btree.flags |= BP_fnode_parent;
189 else
190 anode->btree.flags &= ~BP_fnode_parent;
188 mark_buffer_dirty(bh); 191 mark_buffer_dirty(bh);
189 brelse(bh); 192 brelse(bh);
190 } 193 }
@@ -198,7 +201,7 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
198 if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) { 201 if ((new_anode = hpfs_alloc_anode(s, a, &na, &bh))) {
199 anode = new_anode; 202 anode = new_anode;
200 /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/ 203 /*anode->up = cpu_to_le32(up != -1 ? up : ra);*/
201 anode->btree.internal = 1; 204 anode->btree.flags |= BP_internal;
202 anode->btree.n_used_nodes = 1; 205 anode->btree.n_used_nodes = 1;
203 anode->btree.n_free_nodes = 59; 206 anode->btree.n_free_nodes = 59;
204 anode->btree.first_free = cpu_to_le16(16); 207 anode->btree.first_free = cpu_to_le16(16);
@@ -215,7 +218,8 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
215 } 218 }
216 if ((anode = hpfs_map_anode(s, na, &bh))) { 219 if ((anode = hpfs_map_anode(s, na, &bh))) {
217 anode->up = cpu_to_le32(node); 220 anode->up = cpu_to_le32(node);
218 if (fnod) anode->btree.fnode_parent = 1; 221 if (fnod)
222 anode->btree.flags |= BP_fnode_parent;
219 mark_buffer_dirty(bh); 223 mark_buffer_dirty(bh);
220 brelse(bh); 224 brelse(bh);
221 } 225 }
@@ -234,18 +238,19 @@ secno hpfs_add_sector_to_btree(struct super_block *s, secno node, int fnod, unsi
234 } 238 }
235 ranode->up = cpu_to_le32(node); 239 ranode->up = cpu_to_le32(node);
236 memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free)); 240 memcpy(&ranode->btree, btree, le16_to_cpu(btree->first_free));
237 if (fnod) ranode->btree.fnode_parent = 1; 241 if (fnod)
238 ranode->btree.n_free_nodes = (ranode->btree.internal ? 60 : 40) - ranode->btree.n_used_nodes; 242 ranode->btree.flags |= BP_fnode_parent;
239 if (ranode->btree.internal) for (n = 0; n < ranode->btree.n_used_nodes; n++) { 243 ranode->btree.n_free_nodes = (bp_internal(&ranode->btree) ? 60 : 40) - ranode->btree.n_used_nodes;
244 if (bp_internal(&ranode->btree)) for (n = 0; n < ranode->btree.n_used_nodes; n++) {
240 struct anode *unode; 245 struct anode *unode;
241 if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) { 246 if ((unode = hpfs_map_anode(s, le32_to_cpu(ranode->u.internal[n].down), &bh1))) {
242 unode->up = cpu_to_le32(ra); 247 unode->up = cpu_to_le32(ra);
243 unode->btree.fnode_parent = 0; 248 unode->btree.flags &= ~BP_fnode_parent;
244 mark_buffer_dirty(bh1); 249 mark_buffer_dirty(bh1);
245 brelse(bh1); 250 brelse(bh1);
246 } 251 }
247 } 252 }
248 btree->internal = 1; 253 btree->flags |= BP_internal;
249 btree->n_free_nodes = fnod ? 10 : 58; 254 btree->n_free_nodes = fnod ? 10 : 58;
250 btree->n_used_nodes = 2; 255 btree->n_used_nodes = 2;
251 btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree); 256 btree->first_free = cpu_to_le16((char *)&btree->u.internal[2] - (char *)btree);
@@ -278,7 +283,7 @@ void hpfs_remove_btree(struct super_block *s, struct bplus_header *btree)
278 int d1, d2; 283 int d1, d2;
279 go_down: 284 go_down:
280 d2 = 0; 285 d2 = 0;
281 while (btree1->internal) { 286 while (bp_internal(btree1)) {
282 ano = le32_to_cpu(btree1->u.internal[pos].down); 287 ano = le32_to_cpu(btree1->u.internal[pos].down);
283 if (level) brelse(bh); 288 if (level) brelse(bh);
284 if (hpfs_sb(s)->sb_chk) 289 if (hpfs_sb(s)->sb_chk)
@@ -412,13 +417,13 @@ void hpfs_truncate_btree(struct super_block *s, secno f, int fno, unsigned secs)
412 btree->n_free_nodes = 8; 417 btree->n_free_nodes = 8;
413 btree->n_used_nodes = 0; 418 btree->n_used_nodes = 0;
414 btree->first_free = cpu_to_le16(8); 419 btree->first_free = cpu_to_le16(8);
415 btree->internal = 0; 420 btree->flags &= ~BP_internal;
416 mark_buffer_dirty(bh); 421 mark_buffer_dirty(bh);
417 } else hpfs_free_sectors(s, f, 1); 422 } else hpfs_free_sectors(s, f, 1);
418 brelse(bh); 423 brelse(bh);
419 return; 424 return;
420 } 425 }
421 while (btree->internal) { 426 while (bp_internal(btree)) {
422 nodes = btree->n_used_nodes + btree->n_free_nodes; 427 nodes = btree->n_used_nodes + btree->n_free_nodes;
423 for (i = 0; i < btree->n_used_nodes; i++) 428 for (i = 0; i < btree->n_used_nodes; i++)
424 if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f; 429 if (le32_to_cpu(btree->u.internal[i].file_secno) >= secs) goto f;
@@ -479,13 +484,13 @@ void hpfs_remove_fnode(struct super_block *s, fnode_secno fno)
479 struct extended_attribute *ea; 484 struct extended_attribute *ea;
480 struct extended_attribute *ea_end; 485 struct extended_attribute *ea_end;
481 if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return; 486 if (!(fnode = hpfs_map_fnode(s, fno, &bh))) return;
482 if (!fnode->dirflag) hpfs_remove_btree(s, &fnode->btree); 487 if (!fnode_is_dir(fnode)) hpfs_remove_btree(s, &fnode->btree);
483 else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno)); 488 else hpfs_remove_dtree(s, le32_to_cpu(fnode->u.external[0].disk_secno));
484 ea_end = fnode_end_ea(fnode); 489 ea_end = fnode_end_ea(fnode);
485 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) 490 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
486 if (ea->indirect) 491 if (ea_indirect(ea))
487 hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); 492 hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
488 hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l)); 493 hpfs_ea_ext_remove(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l));
489 brelse(bh); 494 brelse(bh);
490 hpfs_free_sectors(s, fno, 1); 495 hpfs_free_sectors(s, fno, 1);
491} 496}
diff --git a/fs/hpfs/buffer.c b/fs/hpfs/buffer.c
index 9ecde27d1e29..f49d1498aa2e 100644
--- a/fs/hpfs/buffer.c
+++ b/fs/hpfs/buffer.c
@@ -156,7 +156,6 @@ void hpfs_brelse4(struct quad_buffer_head *qbh)
156 156
157void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh) 157void hpfs_mark_4buffers_dirty(struct quad_buffer_head *qbh)
158{ 158{
159 PRINTK(("hpfs_mark_4buffers_dirty\n"));
160 memcpy(qbh->bh[0]->b_data, qbh->data, 512); 159 memcpy(qbh->bh[0]->b_data, qbh->data, 512);
161 memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512); 160 memcpy(qbh->bh[1]->b_data, qbh->data + 512, 512);
162 memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512); 161 memcpy(qbh->bh[2]->b_data, qbh->data + 2 * 512, 512);
diff --git a/fs/hpfs/dir.c b/fs/hpfs/dir.c
index 2fa0089a02a8..b8472f803f4e 100644
--- a/fs/hpfs/dir.c
+++ b/fs/hpfs/dir.c
@@ -87,7 +87,7 @@ static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
87 ret = -EIOERROR; 87 ret = -EIOERROR;
88 goto out; 88 goto out;
89 } 89 }
90 if (!fno->dirflag) { 90 if (!fnode_is_dir(fno)) {
91 e = 1; 91 e = 1;
92 hpfs_error(inode->i_sb, "not a directory, fnode %08lx", 92 hpfs_error(inode->i_sb, "not a directory, fnode %08lx",
93 (unsigned long)inode->i_ino); 93 (unsigned long)inode->i_ino);
diff --git a/fs/hpfs/dnode.c b/fs/hpfs/dnode.c
index 1e0e2ac30fd3..3228c524ebe5 100644
--- a/fs/hpfs/dnode.c
+++ b/fs/hpfs/dnode.c
@@ -153,7 +153,7 @@ static void set_last_pointer(struct super_block *s, struct dnode *d, dnode_secno
153 } 153 }
154 de->length = cpu_to_le16(36); 154 de->length = cpu_to_le16(36);
155 de->down = 1; 155 de->down = 1;
156 *(dnode_secno *)((char *)de + 32) = cpu_to_le32(ptr); 156 *(__le32 *)((char *)de + 32) = cpu_to_le32(ptr);
157 } 157 }
158} 158}
159 159
@@ -177,7 +177,7 @@ struct hpfs_dirent *hpfs_add_de(struct super_block *s, struct dnode *d,
177 memmove((char *)de + d_size, de, (char *)de_end - (char *)de); 177 memmove((char *)de + d_size, de, (char *)de_end - (char *)de);
178 memset(de, 0, d_size); 178 memset(de, 0, d_size);
179 if (down_ptr) { 179 if (down_ptr) {
180 *(dnode_secno *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr); 180 *(__le32 *)((char *)de + d_size - 4) = cpu_to_le32(down_ptr);
181 de->down = 1; 181 de->down = 1;
182 } 182 }
183 de->length = cpu_to_le16(d_size); 183 de->length = cpu_to_le16(d_size);
@@ -656,7 +656,7 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
656 del->down = 0; 656 del->down = 0;
657 d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4); 657 d1->first_free = cpu_to_le32(le32_to_cpu(d1->first_free) - 4);
658 } else if (down) 658 } else if (down)
659 *(dnode_secno *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down); 659 *(__le32 *) ((void *) del + le16_to_cpu(del->length) - 4) = cpu_to_le32(down);
660 } else goto endm; 660 } else goto endm;
661 if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) { 661 if (!(de_cp = kmalloc(le16_to_cpu(de_prev->length), GFP_NOFS))) {
662 printk("HPFS: out of memory for dtree balancing\n"); 662 printk("HPFS: out of memory for dtree balancing\n");
@@ -672,7 +672,7 @@ static void delete_empty_dnode(struct inode *i, dnode_secno dno)
672 de_prev->down = 1; 672 de_prev->down = 1;
673 dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4); 673 dnode->first_free = cpu_to_le32(le32_to_cpu(dnode->first_free) + 4);
674 } 674 }
675 *(dnode_secno *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown); 675 *(__le32 *) ((void *) de_prev + le16_to_cpu(de_prev->length) - 4) = cpu_to_le32(ndown);
676 hpfs_mark_4buffers_dirty(&qbh); 676 hpfs_mark_4buffers_dirty(&qbh);
677 hpfs_brelse4(&qbh); 677 hpfs_brelse4(&qbh);
678 for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4); 678 for_all_poss(i, hpfs_pos_subst, ((loff_t)up << 4) | (p - 1), 4);
@@ -1015,7 +1015,7 @@ struct hpfs_dirent *map_fnode_dirent(struct super_block *s, fnode_secno fno,
1015 kfree(name2); 1015 kfree(name2);
1016 return NULL; 1016 return NULL;
1017 } 1017 }
1018 if (!upf->dirflag) { 1018 if (!fnode_is_dir(upf)) {
1019 brelse(bh); 1019 brelse(bh);
1020 hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up)); 1020 hpfs_error(s, "fnode %08x has non-directory parent %08x", fno, le32_to_cpu(f->up));
1021 kfree(name2); 1021 kfree(name2);
diff --git a/fs/hpfs/ea.c b/fs/hpfs/ea.c
index d8b84d113c89..bcaafcd2666a 100644
--- a/fs/hpfs/ea.c
+++ b/fs/hpfs/ea.c
@@ -23,15 +23,15 @@ void hpfs_ea_ext_remove(struct super_block *s, secno a, int ano, unsigned len)
23 return; 23 return;
24 } 24 }
25 if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return; 25 if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
26 if (ea->indirect) { 26 if (ea_indirect(ea)) {
27 if (ea_valuelen(ea) != 8) { 27 if (ea_valuelen(ea) != 8) {
28 hpfs_error(s, "ea->indirect set while ea->valuelen!=8, %s %08x, pos %08x", 28 hpfs_error(s, "ea_indirect(ea) set while ea->valuelen!=8, %s %08x, pos %08x",
29 ano ? "anode" : "sectors", a, pos); 29 ano ? "anode" : "sectors", a, pos);
30 return; 30 return;
31 } 31 }
32 if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 9, ex+4)) 32 if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 9, ex+4))
33 return; 33 return;
34 hpfs_ea_remove(s, ea_sec(ea), ea->anode, ea_len(ea)); 34 hpfs_ea_remove(s, ea_sec(ea), ea_in_anode(ea), ea_len(ea));
35 } 35 }
36 pos += ea->namelen + ea_valuelen(ea) + 5; 36 pos += ea->namelen + ea_valuelen(ea) + 5;
37 } 37 }
@@ -81,7 +81,7 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
81 struct extended_attribute *ea_end = fnode_end_ea(fnode); 81 struct extended_attribute *ea_end = fnode_end_ea(fnode);
82 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) 82 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
83 if (!strcmp(ea->name, key)) { 83 if (!strcmp(ea->name, key)) {
84 if (ea->indirect) 84 if (ea_indirect(ea))
85 goto indirect; 85 goto indirect;
86 if (ea_valuelen(ea) >= size) 86 if (ea_valuelen(ea) >= size)
87 return -EINVAL; 87 return -EINVAL;
@@ -91,7 +91,7 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
91 } 91 }
92 a = le32_to_cpu(fnode->ea_secno); 92 a = le32_to_cpu(fnode->ea_secno);
93 len = le32_to_cpu(fnode->ea_size_l); 93 len = le32_to_cpu(fnode->ea_size_l);
94 ano = fnode->ea_anode; 94 ano = fnode_in_anode(fnode);
95 pos = 0; 95 pos = 0;
96 while (pos < len) { 96 while (pos < len) {
97 ea = (struct extended_attribute *)ex; 97 ea = (struct extended_attribute *)ex;
@@ -101,10 +101,10 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
101 return -EIO; 101 return -EIO;
102 } 102 }
103 if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return -EIO; 103 if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return -EIO;
104 if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4)) 104 if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
105 return -EIO; 105 return -EIO;
106 if (!strcmp(ea->name, key)) { 106 if (!strcmp(ea->name, key)) {
107 if (ea->indirect) 107 if (ea_indirect(ea))
108 goto indirect; 108 goto indirect;
109 if (ea_valuelen(ea) >= size) 109 if (ea_valuelen(ea) >= size)
110 return -EINVAL; 110 return -EINVAL;
@@ -119,7 +119,7 @@ int hpfs_read_ea(struct super_block *s, struct fnode *fnode, char *key,
119indirect: 119indirect:
120 if (ea_len(ea) >= size) 120 if (ea_len(ea) >= size)
121 return -EINVAL; 121 return -EINVAL;
122 if (hpfs_ea_read(s, ea_sec(ea), ea->anode, 0, ea_len(ea), buf)) 122 if (hpfs_ea_read(s, ea_sec(ea), ea_in_anode(ea), 0, ea_len(ea), buf))
123 return -EIO; 123 return -EIO;
124 buf[ea_len(ea)] = 0; 124 buf[ea_len(ea)] = 0;
125 return 0; 125 return 0;
@@ -136,8 +136,8 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
136 struct extended_attribute *ea_end = fnode_end_ea(fnode); 136 struct extended_attribute *ea_end = fnode_end_ea(fnode);
137 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) 137 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
138 if (!strcmp(ea->name, key)) { 138 if (!strcmp(ea->name, key)) {
139 if (ea->indirect) 139 if (ea_indirect(ea))
140 return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea)); 140 return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea));
141 if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { 141 if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
142 printk("HPFS: out of memory for EA\n"); 142 printk("HPFS: out of memory for EA\n");
143 return NULL; 143 return NULL;
@@ -148,7 +148,7 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
148 } 148 }
149 a = le32_to_cpu(fnode->ea_secno); 149 a = le32_to_cpu(fnode->ea_secno);
150 len = le32_to_cpu(fnode->ea_size_l); 150 len = le32_to_cpu(fnode->ea_size_l);
151 ano = fnode->ea_anode; 151 ano = fnode_in_anode(fnode);
152 pos = 0; 152 pos = 0;
153 while (pos < len) { 153 while (pos < len) {
154 char ex[4 + 255 + 1 + 8]; 154 char ex[4 + 255 + 1 + 8];
@@ -159,11 +159,11 @@ char *hpfs_get_ea(struct super_block *s, struct fnode *fnode, char *key, int *si
159 return NULL; 159 return NULL;
160 } 160 }
161 if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return NULL; 161 if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return NULL;
162 if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4)) 162 if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
163 return NULL; 163 return NULL;
164 if (!strcmp(ea->name, key)) { 164 if (!strcmp(ea->name, key)) {
165 if (ea->indirect) 165 if (ea_indirect(ea))
166 return get_indirect_ea(s, ea->anode, ea_sec(ea), *size = ea_len(ea)); 166 return get_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), *size = ea_len(ea));
167 if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) { 167 if (!(ret = kmalloc((*size = ea_valuelen(ea)) + 1, GFP_NOFS))) {
168 printk("HPFS: out of memory for EA\n"); 168 printk("HPFS: out of memory for EA\n");
169 return NULL; 169 return NULL;
@@ -199,9 +199,9 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
199 struct extended_attribute *ea_end = fnode_end_ea(fnode); 199 struct extended_attribute *ea_end = fnode_end_ea(fnode);
200 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea)) 200 for (ea = fnode_ea(fnode); ea < ea_end; ea = next_ea(ea))
201 if (!strcmp(ea->name, key)) { 201 if (!strcmp(ea->name, key)) {
202 if (ea->indirect) { 202 if (ea_indirect(ea)) {
203 if (ea_len(ea) == size) 203 if (ea_len(ea) == size)
204 set_indirect_ea(s, ea->anode, ea_sec(ea), data, size); 204 set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size);
205 } else if (ea_valuelen(ea) == size) { 205 } else if (ea_valuelen(ea) == size) {
206 memcpy(ea_data(ea), data, size); 206 memcpy(ea_data(ea), data, size);
207 } 207 }
@@ -209,7 +209,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
209 } 209 }
210 a = le32_to_cpu(fnode->ea_secno); 210 a = le32_to_cpu(fnode->ea_secno);
211 len = le32_to_cpu(fnode->ea_size_l); 211 len = le32_to_cpu(fnode->ea_size_l);
212 ano = fnode->ea_anode; 212 ano = fnode_in_anode(fnode);
213 pos = 0; 213 pos = 0;
214 while (pos < len) { 214 while (pos < len) {
215 char ex[4 + 255 + 1 + 8]; 215 char ex[4 + 255 + 1 + 8];
@@ -220,12 +220,12 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
220 return; 220 return;
221 } 221 }
222 if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return; 222 if (hpfs_ea_read(s, a, ano, pos, 4, ex)) return;
223 if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea->indirect ? 8 : 0), ex + 4)) 223 if (hpfs_ea_read(s, a, ano, pos + 4, ea->namelen + 1 + (ea_indirect(ea) ? 8 : 0), ex + 4))
224 return; 224 return;
225 if (!strcmp(ea->name, key)) { 225 if (!strcmp(ea->name, key)) {
226 if (ea->indirect) { 226 if (ea_indirect(ea)) {
227 if (ea_len(ea) == size) 227 if (ea_len(ea) == size)
228 set_indirect_ea(s, ea->anode, ea_sec(ea), data, size); 228 set_indirect_ea(s, ea_in_anode(ea), ea_sec(ea), data, size);
229 } 229 }
230 else { 230 else {
231 if (ea_valuelen(ea) == size) 231 if (ea_valuelen(ea) == size)
@@ -246,7 +246,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
246 if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) { 246 if (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200) {
247 hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x", 247 hpfs_error(s, "fnode %08lx: ea_offs == %03x, ea_size_s == %03x",
248 (unsigned long)inode->i_ino, 248 (unsigned long)inode->i_ino,
249 le32_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s)); 249 le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s));
250 return; 250 return;
251 } 251 }
252 if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) && 252 if ((le16_to_cpu(fnode->ea_size_s) || !le32_to_cpu(fnode->ea_size_l)) &&
@@ -276,7 +276,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
276 fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s)); 276 fnode->ea_size_l = cpu_to_le32(le16_to_cpu(fnode->ea_size_s));
277 fnode->ea_size_s = cpu_to_le16(0); 277 fnode->ea_size_s = cpu_to_le16(0);
278 fnode->ea_secno = cpu_to_le32(n); 278 fnode->ea_secno = cpu_to_le32(n);
279 fnode->ea_anode = cpu_to_le32(0); 279 fnode->flags &= ~FNODE_anode;
280 mark_buffer_dirty(bh); 280 mark_buffer_dirty(bh);
281 brelse(bh); 281 brelse(bh);
282 } 282 }
@@ -288,9 +288,9 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
288 secno q = hpfs_alloc_sector(s, fno, 1, 0); 288 secno q = hpfs_alloc_sector(s, fno, 1, 0);
289 if (!q) goto bail; 289 if (!q) goto bail;
290 fnode->ea_secno = cpu_to_le32(q); 290 fnode->ea_secno = cpu_to_le32(q);
291 fnode->ea_anode = 0; 291 fnode->flags &= ~FNODE_anode;
292 len++; 292 len++;
293 } else if (!fnode->ea_anode) { 293 } else if (!fnode_in_anode(fnode)) {
294 if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) { 294 if (hpfs_alloc_if_possible(s, le32_to_cpu(fnode->ea_secno) + len)) {
295 len++; 295 len++;
296 } else { 296 } else {
@@ -310,7 +310,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
310 anode->u.external[0].length = cpu_to_le32(len); 310 anode->u.external[0].length = cpu_to_le32(len);
311 mark_buffer_dirty(bh); 311 mark_buffer_dirty(bh);
312 brelse(bh); 312 brelse(bh);
313 fnode->ea_anode = 1; 313 fnode->flags |= FNODE_anode;
314 fnode->ea_secno = cpu_to_le32(a_s);*/ 314 fnode->ea_secno = cpu_to_le32(a_s);*/
315 secno new_sec; 315 secno new_sec;
316 int i; 316 int i;
@@ -338,7 +338,7 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
338 len = (pos + 511) >> 9; 338 len = (pos + 511) >> 9;
339 } 339 }
340 } 340 }
341 if (fnode->ea_anode) { 341 if (fnode_in_anode(fnode)) {
342 if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno), 342 if (hpfs_add_sector_to_btree(s, le32_to_cpu(fnode->ea_secno),
343 0, len) != -1) { 343 0, len) != -1) {
344 len++; 344 len++;
@@ -351,16 +351,16 @@ void hpfs_set_ea(struct inode *inode, struct fnode *fnode, const char *key,
351 h[1] = strlen(key); 351 h[1] = strlen(key);
352 h[2] = size & 0xff; 352 h[2] = size & 0xff;
353 h[3] = size >> 8; 353 h[3] = size >> 8;
354 if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail; 354 if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l), 4, h)) goto bail;
355 if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail; 355 if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 4, h[1] + 1, key)) goto bail;
356 if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode->ea_anode, le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail; 356 if (hpfs_ea_write(s, le32_to_cpu(fnode->ea_secno), fnode_in_anode(fnode), le32_to_cpu(fnode->ea_size_l) + 5 + h[1], size, data)) goto bail;
357 fnode->ea_size_l = cpu_to_le32(pos); 357 fnode->ea_size_l = cpu_to_le32(pos);
358 ret: 358 ret:
359 hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size; 359 hpfs_i(inode)->i_ea_size += 5 + strlen(key) + size;
360 return; 360 return;
361 bail: 361 bail:
362 if (le32_to_cpu(fnode->ea_secno)) 362 if (le32_to_cpu(fnode->ea_secno))
363 if (fnode->ea_anode) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9); 363 if (fnode_in_anode(fnode)) hpfs_truncate_btree(s, le32_to_cpu(fnode->ea_secno), 1, (le32_to_cpu(fnode->ea_size_l) + 511) >> 9);
364 else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9)); 364 else hpfs_free_sectors(s, le32_to_cpu(fnode->ea_secno) + ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9), len - ((le32_to_cpu(fnode->ea_size_l) + 511) >> 9));
365 else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0); 365 else fnode->ea_secno = fnode->ea_size_l = cpu_to_le32(0);
366} 366}
diff --git a/fs/hpfs/hpfs.h b/fs/hpfs/hpfs.h
index 8b0650aae328..cce025aff1b1 100644
--- a/fs/hpfs/hpfs.h
+++ b/fs/hpfs/hpfs.h
@@ -51,11 +51,11 @@ struct hpfs_boot_block
51 u8 n_rootdir_entries[2]; 51 u8 n_rootdir_entries[2];
52 u8 n_sectors_s[2]; 52 u8 n_sectors_s[2];
53 u8 media_byte; 53 u8 media_byte;
54 u16 sectors_per_fat; 54 __le16 sectors_per_fat;
55 u16 sectors_per_track; 55 __le16 sectors_per_track;
56 u16 heads_per_cyl; 56 __le16 heads_per_cyl;
57 u32 n_hidden_sectors; 57 __le32 n_hidden_sectors;
58 u32 n_sectors_l; /* size of partition */ 58 __le32 n_sectors_l; /* size of partition */
59 u8 drive_number; 59 u8 drive_number;
60 u8 mbz; 60 u8 mbz;
61 u8 sig_28h; /* 28h */ 61 u8 sig_28h; /* 28h */
@@ -63,7 +63,7 @@ struct hpfs_boot_block
63 u8 vol_label[11]; 63 u8 vol_label[11];
64 u8 sig_hpfs[8]; /* "HPFS " */ 64 u8 sig_hpfs[8]; /* "HPFS " */
65 u8 pad[448]; 65 u8 pad[448];
66 u16 magic; /* aa55 */ 66 __le16 magic; /* aa55 */
67}; 67};
68 68
69 69
@@ -75,28 +75,28 @@ struct hpfs_boot_block
75 75
76struct hpfs_super_block 76struct hpfs_super_block
77{ 77{
78 u32 magic; /* f995 e849 */ 78 __le32 magic; /* f995 e849 */
79 u32 magic1; /* fa53 e9c5, more magic? */ 79 __le32 magic1; /* fa53 e9c5, more magic? */
80 u8 version; /* version of a filesystem usually 2 */ 80 u8 version; /* version of a filesystem usually 2 */
81 u8 funcversion; /* functional version - oldest version 81 u8 funcversion; /* functional version - oldest version
82 of filesystem that can understand 82 of filesystem that can understand
83 this disk */ 83 this disk */
84 u16 zero; /* 0 */ 84 __le16 zero; /* 0 */
85 fnode_secno root; /* fnode of root directory */ 85 __le32 root; /* fnode of root directory */
86 secno n_sectors; /* size of filesystem */ 86 __le32 n_sectors; /* size of filesystem */
87 u32 n_badblocks; /* number of bad blocks */ 87 __le32 n_badblocks; /* number of bad blocks */
88 secno bitmaps; /* pointers to free space bit maps */ 88 __le32 bitmaps; /* pointers to free space bit maps */
89 u32 zero1; /* 0 */ 89 __le32 zero1; /* 0 */
90 secno badblocks; /* bad block list */ 90 __le32 badblocks; /* bad block list */
91 u32 zero3; /* 0 */ 91 __le32 zero3; /* 0 */
92 time32_t last_chkdsk; /* date last checked, 0 if never */ 92 __le32 last_chkdsk; /* date last checked, 0 if never */
93 time32_t last_optimize; /* date last optimized, 0 if never */ 93 __le32 last_optimize; /* date last optimized, 0 if never */
94 secno n_dir_band; /* number of sectors in dir band */ 94 __le32 n_dir_band; /* number of sectors in dir band */
95 secno dir_band_start; /* first sector in dir band */ 95 __le32 dir_band_start; /* first sector in dir band */
96 secno dir_band_end; /* last sector in dir band */ 96 __le32 dir_band_end; /* last sector in dir band */
97 secno dir_band_bitmap; /* free space map, 1 dnode per bit */ 97 __le32 dir_band_bitmap; /* free space map, 1 dnode per bit */
98 u8 volume_name[32]; /* not used */ 98 u8 volume_name[32]; /* not used */
99 secno user_id_table; /* 8 preallocated sectors - user id */ 99 __le32 user_id_table; /* 8 preallocated sectors - user id */
100 u32 zero6[103]; /* 0 */ 100 u32 zero6[103]; /* 0 */
101}; 101};
102 102
@@ -109,8 +109,8 @@ struct hpfs_super_block
109 109
110struct hpfs_spare_block 110struct hpfs_spare_block
111{ 111{
112 u32 magic; /* f991 1849 */ 112 __le32 magic; /* f991 1849 */
113 u32 magic1; /* fa52 29c5, more magic? */ 113 __le32 magic1; /* fa52 29c5, more magic? */
114 114
115#ifdef __LITTLE_ENDIAN 115#ifdef __LITTLE_ENDIAN
116 u8 dirty: 1; /* 0 clean, 1 "improperly stopped" */ 116 u8 dirty: 1; /* 0 clean, 1 "improperly stopped" */
@@ -153,21 +153,21 @@ struct hpfs_spare_block
153 u8 mm_contlgulty; 153 u8 mm_contlgulty;
154 u8 unused; 154 u8 unused;
155 155
156 secno hotfix_map; /* info about remapped bad sectors */ 156 __le32 hotfix_map; /* info about remapped bad sectors */
157 u32 n_spares_used; /* number of hotfixes */ 157 __le32 n_spares_used; /* number of hotfixes */
158 u32 n_spares; /* number of spares in hotfix map */ 158 __le32 n_spares; /* number of spares in hotfix map */
159 u32 n_dnode_spares_free; /* spare dnodes unused */ 159 __le32 n_dnode_spares_free; /* spare dnodes unused */
160 u32 n_dnode_spares; /* length of spare_dnodes[] list, 160 __le32 n_dnode_spares; /* length of spare_dnodes[] list,
161 follows in this block*/ 161 follows in this block*/
162 secno code_page_dir; /* code page directory block */ 162 __le32 code_page_dir; /* code page directory block */
163 u32 n_code_pages; /* number of code pages */ 163 __le32 n_code_pages; /* number of code pages */
164 u32 super_crc; /* on HPFS386 and LAN Server this is 164 __le32 super_crc; /* on HPFS386 and LAN Server this is
165 checksum of superblock, on normal 165 checksum of superblock, on normal
166 OS/2 unused */ 166 OS/2 unused */
167 u32 spare_crc; /* on HPFS386 checksum of spareblock */ 167 __le32 spare_crc; /* on HPFS386 checksum of spareblock */
168 u32 zero1[15]; /* unused */ 168 __le32 zero1[15]; /* unused */
169 dnode_secno spare_dnodes[100]; /* emergency free dnode list */ 169 __le32 spare_dnodes[100]; /* emergency free dnode list */
170 u32 zero2[1]; /* room for more? */ 170 __le32 zero2[1]; /* room for more? */
171}; 171};
172 172
173/* The bad block list is 4 sectors long. The first word must be zero, 173/* The bad block list is 4 sectors long. The first word must be zero,
@@ -202,18 +202,18 @@ struct hpfs_spare_block
202 202
203struct code_page_directory 203struct code_page_directory
204{ 204{
205 u32 magic; /* 4945 21f7 */ 205 __le32 magic; /* 4945 21f7 */
206 u32 n_code_pages; /* number of pointers following */ 206 __le32 n_code_pages; /* number of pointers following */
207 u32 zero1[2]; 207 __le32 zero1[2];
208 struct { 208 struct {
209 u16 ix; /* index */ 209 __le16 ix; /* index */
210 u16 code_page_number; /* code page number */ 210 __le16 code_page_number; /* code page number */
211 u32 bounds; /* matches corresponding word 211 __le32 bounds; /* matches corresponding word
212 in data block */ 212 in data block */
213 secno code_page_data; /* sector number of a code_page_data 213 __le32 code_page_data; /* sector number of a code_page_data
214 containing c.p. array */ 214 containing c.p. array */
215 u16 index; /* index in c.p. array in that sector*/ 215 __le16 index; /* index in c.p. array in that sector*/
216 u16 unknown; /* some unknown value; usually 0; 216 __le16 unknown; /* some unknown value; usually 0;
217 2 in Japanese version */ 217 2 in Japanese version */
218 } array[31]; /* unknown length */ 218 } array[31]; /* unknown length */
219}; 219};
@@ -224,19 +224,19 @@ struct code_page_directory
224 224
225struct code_page_data 225struct code_page_data
226{ 226{
227 u32 magic; /* 8945 21f7 */ 227 __le32 magic; /* 8945 21f7 */
228 u32 n_used; /* # elements used in c_p_data[] */ 228 __le32 n_used; /* # elements used in c_p_data[] */
229 u32 bounds[3]; /* looks a bit like 229 __le32 bounds[3]; /* looks a bit like
230 (beg1,end1), (beg2,end2) 230 (beg1,end1), (beg2,end2)
231 one byte each */ 231 one byte each */
232 u16 offs[3]; /* offsets from start of sector 232 __le16 offs[3]; /* offsets from start of sector
233 to start of c_p_data[ix] */ 233 to start of c_p_data[ix] */
234 struct { 234 struct {
235 u16 ix; /* index */ 235 __le16 ix; /* index */
236 u16 code_page_number; /* code page number */ 236 __le16 code_page_number; /* code page number */
237 u16 unknown; /* the same as in cp directory */ 237 __le16 unknown; /* the same as in cp directory */
238 u8 map[128]; /* upcase table for chars 80..ff */ 238 u8 map[128]; /* upcase table for chars 80..ff */
239 u16 zero2; 239 __le16 zero2;
240 } code_page[3]; 240 } code_page[3];
241 u8 incognita[78]; 241 u8 incognita[78];
242}; 242};
@@ -278,8 +278,8 @@ struct code_page_data
278#define DNODE_MAGIC 0x77e40aae 278#define DNODE_MAGIC 0x77e40aae
279 279
280struct dnode { 280struct dnode {
281 u32 magic; /* 77e4 0aae */ 281 __le32 magic; /* 77e4 0aae */
282 u32 first_free; /* offset from start of dnode to 282 __le32 first_free; /* offset from start of dnode to
283 first free dir entry */ 283 first free dir entry */
284#ifdef __LITTLE_ENDIAN 284#ifdef __LITTLE_ENDIAN
285 u8 root_dnode: 1; /* Is it root dnode? */ 285 u8 root_dnode: 1; /* Is it root dnode? */
@@ -293,14 +293,14 @@ struct dnode {
293 u8 root_dnode: 1; /* Is it root dnode? */ 293 u8 root_dnode: 1; /* Is it root dnode? */
294#endif 294#endif
295 u8 increment_me2[3]; 295 u8 increment_me2[3];
296 secno up; /* (root dnode) directory's fnode 296 __le32 up; /* (root dnode) directory's fnode
297 (nonroot) parent dnode */ 297 (nonroot) parent dnode */
298 dnode_secno self; /* pointer to this dnode */ 298 __le32 self; /* pointer to this dnode */
299 u8 dirent[2028]; /* one or more dirents */ 299 u8 dirent[2028]; /* one or more dirents */
300}; 300};
301 301
302struct hpfs_dirent { 302struct hpfs_dirent {
303 u16 length; /* offset to next dirent */ 303 __le16 length; /* offset to next dirent */
304 304
305#ifdef __LITTLE_ENDIAN 305#ifdef __LITTLE_ENDIAN
306 u8 first: 1; /* set on phony ^A^A (".") entry */ 306 u8 first: 1; /* set on phony ^A^A (".") entry */
@@ -346,12 +346,12 @@ struct hpfs_dirent {
346 u8 read_only: 1; /* dos attrib */ 346 u8 read_only: 1; /* dos attrib */
347#endif 347#endif
348 348
349 fnode_secno fnode; /* fnode giving allocation info */ 349 __le32 fnode; /* fnode giving allocation info */
350 time32_t write_date; /* mtime */ 350 __le32 write_date; /* mtime */
351 u32 file_size; /* file length, bytes */ 351 __le32 file_size; /* file length, bytes */
352 time32_t read_date; /* atime */ 352 __le32 read_date; /* atime */
353 time32_t creation_date; /* ctime */ 353 __le32 creation_date; /* ctime */
354 u32 ea_size; /* total EA length, bytes */ 354 __le32 ea_size; /* total EA length, bytes */
355 u8 no_of_acls; /* number of ACL's (low 3 bits) */ 355 u8 no_of_acls; /* number of ACL's (low 3 bits) */
356 u8 ix; /* code page index (of filename), see 356 u8 ix; /* code page index (of filename), see
357 struct code_page_data */ 357 struct code_page_data */
@@ -375,50 +375,36 @@ struct hpfs_dirent {
375 375
376struct bplus_leaf_node 376struct bplus_leaf_node
377{ 377{
378 u32 file_secno; /* first file sector in extent */ 378 __le32 file_secno; /* first file sector in extent */
379 u32 length; /* length, sectors */ 379 __le32 length; /* length, sectors */
380 secno disk_secno; /* first corresponding disk sector */ 380 __le32 disk_secno; /* first corresponding disk sector */
381}; 381};
382 382
383struct bplus_internal_node 383struct bplus_internal_node
384{ 384{
385 u32 file_secno; /* subtree maps sectors < this */ 385 __le32 file_secno; /* subtree maps sectors < this */
386 anode_secno down; /* pointer to subtree */ 386 __le32 down; /* pointer to subtree */
387}; 387};
388 388
389enum {
390 BP_hbff = 1,
391 BP_fnode_parent = 0x20,
392 BP_binary_search = 0x40,
393 BP_internal = 0x80
394};
389struct bplus_header 395struct bplus_header
390{ 396{
391#ifdef __LITTLE_ENDIAN 397 u8 flags; /* bit 0 - high bit of first free entry offset
392 u8 hbff: 1; /* high bit of first free entry offset */ 398 bit 5 - we're pointed to by an fnode,
393 u8 flag1234: 4;
394 u8 fnode_parent: 1; /* ? we're pointed to by an fnode,
395 the data btree or some ea or the
396 main ea bootage pointer ea_secno */
397 /* also can get set in fnodes, which
398 may be a chkdsk glitch or may mean
399 this bit is irrelevant in fnodes,
400 or this interpretation is all wet */
401 u8 binary_search: 1; /* suggest binary search (unused) */
402 u8 internal: 1; /* 1 -> (internal) tree of anodes
403 0 -> (leaf) list of extents */
404#else
405 u8 internal: 1; /* 1 -> (internal) tree of anodes
406 0 -> (leaf) list of extents */
407 u8 binary_search: 1; /* suggest binary search (unused) */
408 u8 fnode_parent: 1; /* ? we're pointed to by an fnode,
409 the data btree or some ea or the 399 the data btree or some ea or the
410 main ea bootage pointer ea_secno */ 400 main ea bootage pointer ea_secno
411 /* also can get set in fnodes, which 401 bit 6 - suggest binary search (unused)
412 may be a chkdsk glitch or may mean 402 bit 7 - 1 -> (internal) tree of anodes
413 this bit is irrelevant in fnodes, 403 0 -> (leaf) list of extents */
414 or this interpretation is all wet */
415 u8 flag1234: 4;
416 u8 hbff: 1; /* high bit of first free entry offset */
417#endif
418 u8 fill[3]; 404 u8 fill[3];
419 u8 n_free_nodes; /* free nodes in following array */ 405 u8 n_free_nodes; /* free nodes in following array */
420 u8 n_used_nodes; /* used nodes in following array */ 406 u8 n_used_nodes; /* used nodes in following array */
421 u16 first_free; /* offset from start of header to 407 __le16 first_free; /* offset from start of header to
422 first free node in array */ 408 first free node in array */
423 union { 409 union {
424 struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving 410 struct bplus_internal_node internal[0]; /* (internal) 2-word entries giving
@@ -428,6 +414,16 @@ struct bplus_header
428 } u; 414 } u;
429}; 415};
430 416
417static inline bool bp_internal(struct bplus_header *bp)
418{
419 return bp->flags & BP_internal;
420}
421
422static inline bool bp_fnode_parent(struct bplus_header *bp)
423{
424 return bp->flags & BP_fnode_parent;
425}
426
431/* fnode: root of allocation b+ tree, and EA's */ 427/* fnode: root of allocation b+ tree, and EA's */
432 428
433/* Every file and every directory has one fnode, pointed to by the directory 429/* Every file and every directory has one fnode, pointed to by the directory
@@ -436,62 +432,56 @@ struct bplus_header
436 432
437#define FNODE_MAGIC 0xf7e40aae 433#define FNODE_MAGIC 0xf7e40aae
438 434
435enum {FNODE_anode = cpu_to_le16(2), FNODE_dir = cpu_to_le16(256)};
439struct fnode 436struct fnode
440{ 437{
441 u32 magic; /* f7e4 0aae */ 438 __le32 magic; /* f7e4 0aae */
442 u32 zero1[2]; /* read history */ 439 __le32 zero1[2]; /* read history */
443 u8 len, name[15]; /* true length, truncated name */ 440 u8 len, name[15]; /* true length, truncated name */
444 fnode_secno up; /* pointer to file's directory fnode */ 441 __le32 up; /* pointer to file's directory fnode */
445 secno acl_size_l; 442 __le32 acl_size_l;
446 secno acl_secno; 443 __le32 acl_secno;
447 u16 acl_size_s; 444 __le16 acl_size_s;
448 u8 acl_anode; 445 u8 acl_anode;
449 u8 zero2; /* history bit count */ 446 u8 zero2; /* history bit count */
450 u32 ea_size_l; /* length of disk-resident ea's */ 447 __le32 ea_size_l; /* length of disk-resident ea's */
451 secno ea_secno; /* first sector of disk-resident ea's*/ 448 __le32 ea_secno; /* first sector of disk-resident ea's*/
452 u16 ea_size_s; /* length of fnode-resident ea's */ 449 __le16 ea_size_s; /* length of fnode-resident ea's */
453
454#ifdef __LITTLE_ENDIAN
455 u8 flag0: 1;
456 u8 ea_anode: 1; /* 1 -> ea_secno is an anode */
457 u8 flag234567: 6;
458#else
459 u8 flag234567: 6;
460 u8 ea_anode: 1; /* 1 -> ea_secno is an anode */
461 u8 flag0: 1;
462#endif
463 450
464#ifdef __LITTLE_ENDIAN 451 __le16 flags; /* bit 1 set -> ea_secno is an anode */
465 u8 dirflag: 1; /* 1 -> directory. first & only extent 452 /* bit 8 set -> directory. first & only extent
466 points to dnode. */
467 u8 flag9012345: 7;
468#else
469 u8 flag9012345: 7;
470 u8 dirflag: 1; /* 1 -> directory. first & only extent
471 points to dnode. */ 453 points to dnode. */
472#endif
473
474 struct bplus_header btree; /* b+ tree, 8 extents or 12 subtrees */ 454 struct bplus_header btree; /* b+ tree, 8 extents or 12 subtrees */
475 union { 455 union {
476 struct bplus_leaf_node external[8]; 456 struct bplus_leaf_node external[8];
477 struct bplus_internal_node internal[12]; 457 struct bplus_internal_node internal[12];
478 } u; 458 } u;
479 459
480 u32 file_size; /* file length, bytes */ 460 __le32 file_size; /* file length, bytes */
481 u32 n_needea; /* number of EA's with NEEDEA set */ 461 __le32 n_needea; /* number of EA's with NEEDEA set */
482 u8 user_id[16]; /* unused */ 462 u8 user_id[16]; /* unused */
483 u16 ea_offs; /* offset from start of fnode 463 __le16 ea_offs; /* offset from start of fnode
484 to first fnode-resident ea */ 464 to first fnode-resident ea */
485 u8 dasd_limit_treshhold; 465 u8 dasd_limit_treshhold;
486 u8 dasd_limit_delta; 466 u8 dasd_limit_delta;
487 u32 dasd_limit; 467 __le32 dasd_limit;
488 u32 dasd_usage; 468 __le32 dasd_usage;
489 u8 ea[316]; /* zero or more EA's, packed together 469 u8 ea[316]; /* zero or more EA's, packed together
490 with no alignment padding. 470 with no alignment padding.
491 (Do not use this name, get here 471 (Do not use this name, get here
492 via fnode + ea_offs. I think.) */ 472 via fnode + ea_offs. I think.) */
493}; 473};
494 474
475static inline bool fnode_in_anode(struct fnode *p)
476{
477 return (p->flags & FNODE_anode) != 0;
478}
479
480static inline bool fnode_is_dir(struct fnode *p)
481{
482 return (p->flags & FNODE_dir) != 0;
483}
484
495 485
496/* anode: 99.44% pure allocation tree */ 486/* anode: 99.44% pure allocation tree */
497 487
@@ -499,9 +489,9 @@ struct fnode
499 489
500struct anode 490struct anode
501{ 491{
502 u32 magic; /* 37e4 0aae */ 492 __le32 magic; /* 37e4 0aae */
503 anode_secno self; /* pointer to this anode */ 493 __le32 self; /* pointer to this anode */
504 secno up; /* parent anode or fnode */ 494 __le32 up; /* parent anode or fnode */
505 495
506 struct bplus_header btree; /* b+tree, 40 extents or 60 subtrees */ 496 struct bplus_header btree; /* b+tree, 40 extents or 60 subtrees */
507 union { 497 union {
@@ -509,7 +499,7 @@ struct anode
509 struct bplus_internal_node internal[60]; 499 struct bplus_internal_node internal[60];
510 } u; 500 } u;
511 501
512 u32 fill[3]; /* unused */ 502 __le32 fill[3]; /* unused */
513}; 503};
514 504
515 505
@@ -528,32 +518,23 @@ struct anode
528 run, or in multiple runs. Flags in the fnode tell whether the EA list 518 run, or in multiple runs. Flags in the fnode tell whether the EA list
529 is immediate, in a single run, or in multiple runs. */ 519 is immediate, in a single run, or in multiple runs. */
530 520
521enum {EA_indirect = 1, EA_anode = 2, EA_needea = 128 };
531struct extended_attribute 522struct extended_attribute
532{ 523{
533#ifdef __LITTLE_ENDIAN 524 u8 flags; /* bit 0 set -> value gives sector number
534 u8 indirect: 1; /* 1 -> value gives sector number
535 where real value starts */ 525 where real value starts */
536 u8 anode: 1; /* 1 -> sector is an anode 526 /* bit 1 set -> sector is an anode
537 that points to fragmented value */ 527 that points to fragmented value */
538 u8 flag23456: 5; 528 /* bit 7 set -> required ea */
539 u8 needea: 1; /* required ea */
540#else
541 u8 needea: 1; /* required ea */
542 u8 flag23456: 5;
543 u8 anode: 1; /* 1 -> sector is an anode
544 that points to fragmented value */
545 u8 indirect: 1; /* 1 -> value gives sector number
546 where real value starts */
547#endif
548 u8 namelen; /* length of name, bytes */ 529 u8 namelen; /* length of name, bytes */
549 u8 valuelen_lo; /* length of value, bytes */ 530 u8 valuelen_lo; /* length of value, bytes */
550 u8 valuelen_hi; /* length of value, bytes */ 531 u8 valuelen_hi; /* length of value, bytes */
551 u8 name[0]; 532 u8 name[];
552 /* 533 /*
553 u8 name[namelen]; ascii attrib name 534 u8 name[namelen]; ascii attrib name
554 u8 nul; terminating '\0', not counted 535 u8 nul; terminating '\0', not counted
555 u8 value[valuelen]; value, arbitrary 536 u8 value[valuelen]; value, arbitrary
556 if this.indirect, valuelen is 8 and the value is 537 if this.flags & 1, valuelen is 8 and the value is
557 u32 length; real length of value, bytes 538 u32 length; real length of value, bytes
558 secno secno; sector address where it starts 539 secno secno; sector address where it starts
559 if this.anode, the above sector number is the root of an anode tree 540 if this.anode, the above sector number is the root of an anode tree
@@ -561,6 +542,16 @@ struct extended_attribute
561 */ 542 */
562}; 543};
563 544
545static inline bool ea_indirect(struct extended_attribute *ea)
546{
547 return ea->flags & EA_indirect;
548}
549
550static inline bool ea_in_anode(struct extended_attribute *ea)
551{
552 return ea->flags & EA_anode;
553}
554
564/* 555/*
565 Local Variables: 556 Local Variables:
566 comment-column: 40 557 comment-column: 40
diff --git a/fs/hpfs/hpfs_fn.h b/fs/hpfs/hpfs_fn.h
index de946170ebb1..c07ef1f1ced6 100644
--- a/fs/hpfs/hpfs_fn.h
+++ b/fs/hpfs/hpfs_fn.h
@@ -35,13 +35,6 @@
35 35
36#define CHKCOND(x,y) if (!(x)) printk y 36#define CHKCOND(x,y) if (!(x)) printk y
37 37
38#ifdef DBG
39#define PRINTK(x) printk x
40#else
41#undef PRINTK
42#define PRINTK(x)
43#endif
44
45struct hpfs_inode_info { 38struct hpfs_inode_info {
46 loff_t mmu_private; 39 loff_t mmu_private;
47 ino_t i_parent_dir; /* (directories) gives fnode of parent dir */ 40 ino_t i_parent_dir; /* (directories) gives fnode of parent dir */
@@ -82,7 +75,7 @@ struct hpfs_sb_info {
82 unsigned char *sb_cp_table; /* code page tables: */ 75 unsigned char *sb_cp_table; /* code page tables: */
83 /* 128 bytes uppercasing table & */ 76 /* 128 bytes uppercasing table & */
84 /* 128 bytes lowercasing table */ 77 /* 128 bytes lowercasing table */
85 unsigned *sb_bmp_dir; /* main bitmap directory */ 78 __le32 *sb_bmp_dir; /* main bitmap directory */
86 unsigned sb_c_bitmap; /* current bitmap */ 79 unsigned sb_c_bitmap; /* current bitmap */
87 unsigned sb_max_fwd_alloc; /* max forwad allocation */ 80 unsigned sb_max_fwd_alloc; /* max forwad allocation */
88 int sb_timeshift; 81 int sb_timeshift;
@@ -100,7 +93,7 @@ struct quad_buffer_head {
100static inline dnode_secno de_down_pointer (struct hpfs_dirent *de) 93static inline dnode_secno de_down_pointer (struct hpfs_dirent *de)
101{ 94{
102 CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n")); 95 CHKCOND(de->down,("HPFS: de_down_pointer: !de->down\n"));
103 return le32_to_cpu(*(dnode_secno *) ((void *) de + le16_to_cpu(de->length) - 4)); 96 return le32_to_cpu(*(__le32 *) ((void *) de + le16_to_cpu(de->length) - 4));
104} 97}
105 98
106/* The first dir entry in a dnode */ 99/* The first dir entry in a dnode */
@@ -148,12 +141,12 @@ static inline struct extended_attribute *next_ea(struct extended_attribute *ea)
148 141
149static inline secno ea_sec(struct extended_attribute *ea) 142static inline secno ea_sec(struct extended_attribute *ea)
150{ 143{
151 return le32_to_cpu(get_unaligned((secno *)((char *)ea + 9 + ea->namelen))); 144 return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 9 + ea->namelen)));
152} 145}
153 146
154static inline secno ea_len(struct extended_attribute *ea) 147static inline secno ea_len(struct extended_attribute *ea)
155{ 148{
156 return le32_to_cpu(get_unaligned((secno *)((char *)ea + 5 + ea->namelen))); 149 return le32_to_cpu(get_unaligned((__le32 *)((char *)ea + 5 + ea->namelen)));
157} 150}
158 151
159static inline char *ea_data(struct extended_attribute *ea) 152static inline char *ea_data(struct extended_attribute *ea)
@@ -178,7 +171,7 @@ static inline void copy_de(struct hpfs_dirent *dst, struct hpfs_dirent *src)
178 dst->not_8x3 = n; 171 dst->not_8x3 = n;
179} 172}
180 173
181static inline unsigned tstbits(u32 *bmp, unsigned b, unsigned n) 174static inline unsigned tstbits(__le32 *bmp, unsigned b, unsigned n)
182{ 175{
183 int i; 176 int i;
184 if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n; 177 if ((b >= 0x4000) || (b + n - 1 >= 0x4000)) return n;
@@ -275,10 +268,10 @@ void hpfs_evict_inode(struct inode *);
275 268
276/* map.c */ 269/* map.c */
277 270
278unsigned *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *); 271__le32 *hpfs_map_dnode_bitmap(struct super_block *, struct quad_buffer_head *);
279unsigned *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *); 272__le32 *hpfs_map_bitmap(struct super_block *, unsigned, struct quad_buffer_head *, char *);
280unsigned char *hpfs_load_code_page(struct super_block *, secno); 273unsigned char *hpfs_load_code_page(struct super_block *, secno);
281secno *hpfs_load_bitmap_directory(struct super_block *, secno bmp); 274__le32 *hpfs_load_bitmap_directory(struct super_block *, secno bmp);
282struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **); 275struct fnode *hpfs_map_fnode(struct super_block *s, ino_t, struct buffer_head **);
283struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **); 276struct anode *hpfs_map_anode(struct super_block *s, anode_secno, struct buffer_head **);
284struct dnode *hpfs_map_dnode(struct super_block *s, dnode_secno, struct quad_buffer_head *); 277struct dnode *hpfs_map_dnode(struct super_block *s, dnode_secno, struct quad_buffer_head *);
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index b43066cbdc6a..ed671e0ea784 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -110,7 +110,7 @@ void hpfs_read_inode(struct inode *i)
110 } 110 }
111 } 111 }
112 } 112 }
113 if (fnode->dirflag) { 113 if (fnode_is_dir(fnode)) {
114 int n_dnodes, n_subdirs; 114 int n_dnodes, n_subdirs;
115 i->i_mode |= S_IFDIR; 115 i->i_mode |= S_IFDIR;
116 i->i_op = &hpfs_dir_iops; 116 i->i_op = &hpfs_dir_iops;
diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c
index a790821366a7..4acb19d78359 100644
--- a/fs/hpfs/map.c
+++ b/fs/hpfs/map.c
@@ -8,12 +8,12 @@
8 8
9#include "hpfs_fn.h" 9#include "hpfs_fn.h"
10 10
11unsigned *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh) 11__le32 *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh)
12{ 12{
13 return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0); 13 return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0);
14} 14}
15 15
16unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block, 16__le32 *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block,
17 struct quad_buffer_head *qbh, char *id) 17 struct quad_buffer_head *qbh, char *id)
18{ 18{
19 secno sec; 19 secno sec;
@@ -89,18 +89,18 @@ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps)
89 return cp_table; 89 return cp_table;
90} 90}
91 91
92secno *hpfs_load_bitmap_directory(struct super_block *s, secno bmp) 92__le32 *hpfs_load_bitmap_directory(struct super_block *s, secno bmp)
93{ 93{
94 struct buffer_head *bh; 94 struct buffer_head *bh;
95 int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21; 95 int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21;
96 int i; 96 int i;
97 secno *b; 97 __le32 *b;
98 if (!(b = kmalloc(n * 512, GFP_KERNEL))) { 98 if (!(b = kmalloc(n * 512, GFP_KERNEL))) {
99 printk("HPFS: can't allocate memory for bitmap directory\n"); 99 printk("HPFS: can't allocate memory for bitmap directory\n");
100 return NULL; 100 return NULL;
101 } 101 }
102 for (i=0;i<n;i++) { 102 for (i=0;i<n;i++) {
103 secno *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1); 103 __le32 *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1);
104 if (!d) { 104 if (!d) {
105 kfree(b); 105 kfree(b);
106 return NULL; 106 return NULL;
@@ -130,16 +130,16 @@ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_hea
130 (unsigned long)ino); 130 (unsigned long)ino);
131 goto bail; 131 goto bail;
132 } 132 }
133 if (!fnode->dirflag) { 133 if (!fnode_is_dir(fnode)) {
134 if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != 134 if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes !=
135 (fnode->btree.internal ? 12 : 8)) { 135 (bp_internal(&fnode->btree) ? 12 : 8)) {
136 hpfs_error(s, 136 hpfs_error(s,
137 "bad number of nodes in fnode %08lx", 137 "bad number of nodes in fnode %08lx",
138 (unsigned long)ino); 138 (unsigned long)ino);
139 goto bail; 139 goto bail;
140 } 140 }
141 if (le16_to_cpu(fnode->btree.first_free) != 141 if (le16_to_cpu(fnode->btree.first_free) !=
142 8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) { 142 8 + fnode->btree.n_used_nodes * (bp_internal(&fnode->btree) ? 8 : 12)) {
143 hpfs_error(s, 143 hpfs_error(s,
144 "bad first_free pointer in fnode %08lx", 144 "bad first_free pointer in fnode %08lx",
145 (unsigned long)ino); 145 (unsigned long)ino);
@@ -187,12 +187,12 @@ struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buff
187 goto bail; 187 goto bail;
188 } 188 }
189 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != 189 if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes !=
190 (anode->btree.internal ? 60 : 40)) { 190 (bp_internal(&anode->btree) ? 60 : 40)) {
191 hpfs_error(s, "bad number of nodes in anode %08x", ano); 191 hpfs_error(s, "bad number of nodes in anode %08x", ano);
192 goto bail; 192 goto bail;
193 } 193 }
194 if (le16_to_cpu(anode->btree.first_free) != 194 if (le16_to_cpu(anode->btree.first_free) !=
195 8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) { 195 8 + anode->btree.n_used_nodes * (bp_internal(&anode->btree) ? 8 : 12)) {
196 hpfs_error(s, "bad first_free pointer in anode %08x", ano); 196 hpfs_error(s, "bad first_free pointer in anode %08x", ano);
197 goto bail; 197 goto bail;
198 } 198 }
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 30dd7b10b507..9083ef8af58c 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -70,7 +70,7 @@ static int hpfs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode)
70 fnode->len = len; 70 fnode->len = len;
71 memcpy(fnode->name, name, len > 15 ? 15 : len); 71 memcpy(fnode->name, name, len > 15 ? 15 : len);
72 fnode->up = cpu_to_le32(dir->i_ino); 72 fnode->up = cpu_to_le32(dir->i_ino);
73 fnode->dirflag = 1; 73 fnode->flags |= FNODE_dir;
74 fnode->btree.n_free_nodes = 7; 74 fnode->btree.n_free_nodes = 7;
75 fnode->btree.n_used_nodes = 1; 75 fnode->btree.n_used_nodes = 1;
76 fnode->btree.first_free = cpu_to_le16(0x14); 76 fnode->btree.first_free = cpu_to_le16(0x14);
diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c
index 54f6eccb79d9..706a12c083ea 100644
--- a/fs/hpfs/super.c
+++ b/fs/hpfs/super.c
@@ -572,7 +572,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
572 mark_buffer_dirty(bh2); 572 mark_buffer_dirty(bh2);
573 } 573 }
574 574
575 if (le32_to_cpu(spareblock->hotfixes_used) || le32_to_cpu(spareblock->n_spares_used)) { 575 if (spareblock->hotfixes_used || spareblock->n_spares_used) {
576 if (errs >= 2) { 576 if (errs >= 2) {
577 printk("HPFS: Hotfixes not supported here, try chkdsk\n"); 577 printk("HPFS: Hotfixes not supported here, try chkdsk\n");
578 mark_dirty(s, 0); 578 mark_dirty(s, 0);
@@ -645,7 +645,7 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent)
645 root->i_mtime.tv_nsec = 0; 645 root->i_mtime.tv_nsec = 0;
646 root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date)); 646 root->i_ctime.tv_sec = local_to_gmt(s, le32_to_cpu(de->creation_date));
647 root->i_ctime.tv_nsec = 0; 647 root->i_ctime.tv_nsec = 0;
648 hpfs_i(root)->i_ea_size = le16_to_cpu(de->ea_size); 648 hpfs_i(root)->i_ea_size = le32_to_cpu(de->ea_size);
649 hpfs_i(root)->i_parent_dir = root->i_ino; 649 hpfs_i(root)->i_parent_dir = root->i_ino;
650 if (root->i_size == -1) 650 if (root->i_size == -1)
651 root->i_size = 2048; 651 root->i_size = 2048;
diff --git a/fs/inode.c b/fs/inode.c
index 6bc8761cc333..c99163b1b310 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -1487,10 +1487,30 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode,
1487 return 0; 1487 return 0;
1488} 1488}
1489 1489
1490/*
1491 * This does the actual work of updating an inodes time or version. Must have
1492 * had called mnt_want_write() before calling this.
1493 */
1494static int update_time(struct inode *inode, struct timespec *time, int flags)
1495{
1496 if (inode->i_op->update_time)
1497 return inode->i_op->update_time(inode, time, flags);
1498
1499 if (flags & S_ATIME)
1500 inode->i_atime = *time;
1501 if (flags & S_VERSION)
1502 inode_inc_iversion(inode);
1503 if (flags & S_CTIME)
1504 inode->i_ctime = *time;
1505 if (flags & S_MTIME)
1506 inode->i_mtime = *time;
1507 mark_inode_dirty_sync(inode);
1508 return 0;
1509}
1510
1490/** 1511/**
1491 * touch_atime - update the access time 1512 * touch_atime - update the access time
1492 * @mnt: mount the inode is accessed on 1513 * @path: the &struct path to update
1493 * @dentry: dentry accessed
1494 * 1514 *
1495 * Update the accessed time on an inode and mark it for writeback. 1515 * Update the accessed time on an inode and mark it for writeback.
1496 * This function automatically handles read only file systems and media, 1516 * This function automatically handles read only file systems and media,
@@ -1525,12 +1545,83 @@ void touch_atime(struct path *path)
1525 if (mnt_want_write(mnt)) 1545 if (mnt_want_write(mnt))
1526 return; 1546 return;
1527 1547
1528 inode->i_atime = now; 1548 /*
1529 mark_inode_dirty_sync(inode); 1549 * File systems can error out when updating inodes if they need to
1550 * allocate new space to modify an inode (such is the case for
1551 * Btrfs), but since we touch atime while walking down the path we
1552 * really don't care if we failed to update the atime of the file,
1553 * so just ignore the return value.
1554 */
1555 update_time(inode, &now, S_ATIME);
1530 mnt_drop_write(mnt); 1556 mnt_drop_write(mnt);
1531} 1557}
1532EXPORT_SYMBOL(touch_atime); 1558EXPORT_SYMBOL(touch_atime);
1533 1559
1560/*
1561 * The logic we want is
1562 *
1563 * if suid or (sgid and xgrp)
1564 * remove privs
1565 */
1566int should_remove_suid(struct dentry *dentry)
1567{
1568 umode_t mode = dentry->d_inode->i_mode;
1569 int kill = 0;
1570
1571 /* suid always must be killed */
1572 if (unlikely(mode & S_ISUID))
1573 kill = ATTR_KILL_SUID;
1574
1575 /*
1576 * sgid without any exec bits is just a mandatory locking mark; leave
1577 * it alone. If some exec bits are set, it's a real sgid; kill it.
1578 */
1579 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1580 kill |= ATTR_KILL_SGID;
1581
1582 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1583 return kill;
1584
1585 return 0;
1586}
1587EXPORT_SYMBOL(should_remove_suid);
1588
1589static int __remove_suid(struct dentry *dentry, int kill)
1590{
1591 struct iattr newattrs;
1592
1593 newattrs.ia_valid = ATTR_FORCE | kill;
1594 return notify_change(dentry, &newattrs);
1595}
1596
1597int file_remove_suid(struct file *file)
1598{
1599 struct dentry *dentry = file->f_path.dentry;
1600 struct inode *inode = dentry->d_inode;
1601 int killsuid;
1602 int killpriv;
1603 int error = 0;
1604
1605 /* Fast path for nothing security related */
1606 if (IS_NOSEC(inode))
1607 return 0;
1608
1609 killsuid = should_remove_suid(dentry);
1610 killpriv = security_inode_need_killpriv(dentry);
1611
1612 if (killpriv < 0)
1613 return killpriv;
1614 if (killpriv)
1615 error = security_inode_killpriv(dentry);
1616 if (!error && killsuid)
1617 error = __remove_suid(dentry, killsuid);
1618 if (!error && (inode->i_sb->s_flags & MS_NOSEC))
1619 inode->i_flags |= S_NOSEC;
1620
1621 return error;
1622}
1623EXPORT_SYMBOL(file_remove_suid);
1624
1534/** 1625/**
1535 * file_update_time - update mtime and ctime time 1626 * file_update_time - update mtime and ctime time
1536 * @file: file accessed 1627 * @file: file accessed
@@ -1540,18 +1631,20 @@ EXPORT_SYMBOL(touch_atime);
1540 * usage in the file write path of filesystems, and filesystems may 1631 * usage in the file write path of filesystems, and filesystems may
1541 * choose to explicitly ignore update via this function with the 1632 * choose to explicitly ignore update via this function with the
1542 * S_NOCMTIME inode flag, e.g. for network filesystem where these 1633 * S_NOCMTIME inode flag, e.g. for network filesystem where these
1543 * timestamps are handled by the server. 1634 * timestamps are handled by the server. This can return an error for
1635 * file systems who need to allocate space in order to update an inode.
1544 */ 1636 */
1545 1637
1546void file_update_time(struct file *file) 1638int file_update_time(struct file *file)
1547{ 1639{
1548 struct inode *inode = file->f_path.dentry->d_inode; 1640 struct inode *inode = file->f_path.dentry->d_inode;
1549 struct timespec now; 1641 struct timespec now;
1550 enum { S_MTIME = 1, S_CTIME = 2, S_VERSION = 4 } sync_it = 0; 1642 int sync_it = 0;
1643 int ret;
1551 1644
1552 /* First try to exhaust all avenues to not sync */ 1645 /* First try to exhaust all avenues to not sync */
1553 if (IS_NOCMTIME(inode)) 1646 if (IS_NOCMTIME(inode))
1554 return; 1647 return 0;
1555 1648
1556 now = current_fs_time(inode->i_sb); 1649 now = current_fs_time(inode->i_sb);
1557 if (!timespec_equal(&inode->i_mtime, &now)) 1650 if (!timespec_equal(&inode->i_mtime, &now))
@@ -1564,21 +1657,16 @@ void file_update_time(struct file *file)
1564 sync_it |= S_VERSION; 1657 sync_it |= S_VERSION;
1565 1658
1566 if (!sync_it) 1659 if (!sync_it)
1567 return; 1660 return 0;
1568 1661
1569 /* Finally allowed to write? Takes lock. */ 1662 /* Finally allowed to write? Takes lock. */
1570 if (mnt_want_write_file(file)) 1663 if (mnt_want_write_file(file))
1571 return; 1664 return 0;
1572 1665
1573 /* Only change inode inside the lock region */ 1666 ret = update_time(inode, &now, sync_it);
1574 if (sync_it & S_VERSION)
1575 inode_inc_iversion(inode);
1576 if (sync_it & S_CTIME)
1577 inode->i_ctime = now;
1578 if (sync_it & S_MTIME)
1579 inode->i_mtime = now;
1580 mark_inode_dirty_sync(inode);
1581 mnt_drop_write_file(file); 1667 mnt_drop_write_file(file);
1668
1669 return ret;
1582} 1670}
1583EXPORT_SYMBOL(file_update_time); 1671EXPORT_SYMBOL(file_update_time);
1584 1672
@@ -1748,3 +1836,50 @@ bool inode_owner_or_capable(const struct inode *inode)
1748 return false; 1836 return false;
1749} 1837}
1750EXPORT_SYMBOL(inode_owner_or_capable); 1838EXPORT_SYMBOL(inode_owner_or_capable);
1839
1840/*
1841 * Direct i/o helper functions
1842 */
1843static void __inode_dio_wait(struct inode *inode)
1844{
1845 wait_queue_head_t *wq = bit_waitqueue(&inode->i_state, __I_DIO_WAKEUP);
1846 DEFINE_WAIT_BIT(q, &inode->i_state, __I_DIO_WAKEUP);
1847
1848 do {
1849 prepare_to_wait(wq, &q.wait, TASK_UNINTERRUPTIBLE);
1850 if (atomic_read(&inode->i_dio_count))
1851 schedule();
1852 } while (atomic_read(&inode->i_dio_count));
1853 finish_wait(wq, &q.wait);
1854}
1855
1856/**
1857 * inode_dio_wait - wait for outstanding DIO requests to finish
1858 * @inode: inode to wait for
1859 *
1860 * Waits for all pending direct I/O requests to finish so that we can
1861 * proceed with a truncate or equivalent operation.
1862 *
1863 * Must be called under a lock that serializes taking new references
1864 * to i_dio_count, usually by inode->i_mutex.
1865 */
1866void inode_dio_wait(struct inode *inode)
1867{
1868 if (atomic_read(&inode->i_dio_count))
1869 __inode_dio_wait(inode);
1870}
1871EXPORT_SYMBOL(inode_dio_wait);
1872
1873/*
1874 * inode_dio_done - signal finish of a direct I/O requests
1875 * @inode: inode the direct I/O happens on
1876 *
1877 * This is called once we've finished processing a direct I/O request,
1878 * and is used to wake up callers waiting for direct I/O to be quiesced.
1879 */
1880void inode_dio_done(struct inode *inode)
1881{
1882 if (atomic_dec_and_test(&inode->i_dio_count))
1883 wake_up_bit(&inode->i_state, __I_DIO_WAKEUP);
1884}
1885EXPORT_SYMBOL(inode_dio_done);
diff --git a/fs/internal.h b/fs/internal.h
index 9962c59ba280..18bc216ea09d 100644
--- a/fs/internal.h
+++ b/fs/internal.h
@@ -56,7 +56,7 @@ extern int sb_prepare_remount_readonly(struct super_block *);
56 56
57extern void __init mnt_init(void); 57extern void __init mnt_init(void);
58 58
59DECLARE_BRLOCK(vfsmount_lock); 59extern struct lglock vfsmount_lock;
60 60
61 61
62/* 62/*
@@ -100,6 +100,7 @@ extern struct file *do_file_open_root(struct dentry *, struct vfsmount *,
100 100
101extern long do_handle_open(int mountdirfd, 101extern long do_handle_open(int mountdirfd,
102 struct file_handle __user *ufh, int open_flag); 102 struct file_handle __user *ufh, int open_flag);
103extern int open_check_o_direct(struct file *f);
103 104
104/* 105/*
105 * inode.c 106 * inode.c
diff --git a/fs/isofs/export.c b/fs/isofs/export.c
index dd4687ff30d0..aa4356d09eee 100644
--- a/fs/isofs/export.c
+++ b/fs/isofs/export.c
@@ -107,12 +107,11 @@ static struct dentry *isofs_export_get_parent(struct dentry *child)
107} 107}
108 108
109static int 109static int
110isofs_export_encode_fh(struct dentry *dentry, 110isofs_export_encode_fh(struct inode *inode,
111 __u32 *fh32, 111 __u32 *fh32,
112 int *max_len, 112 int *max_len,
113 int connectable) 113 struct inode *parent)
114{ 114{
115 struct inode * inode = dentry->d_inode;
116 struct iso_inode_info * ei = ISOFS_I(inode); 115 struct iso_inode_info * ei = ISOFS_I(inode);
117 int len = *max_len; 116 int len = *max_len;
118 int type = 1; 117 int type = 1;
@@ -124,7 +123,7 @@ isofs_export_encode_fh(struct dentry *dentry,
124 * offset of the inode and the upper 16 bits of fh32[1] to 123 * offset of the inode and the upper 16 bits of fh32[1] to
125 * hold the offset of the parent. 124 * hold the offset of the parent.
126 */ 125 */
127 if (connectable && (len < 5)) { 126 if (parent && (len < 5)) {
128 *max_len = 5; 127 *max_len = 5;
129 return 255; 128 return 255;
130 } else if (len < 3) { 129 } else if (len < 3) {
@@ -136,16 +135,12 @@ isofs_export_encode_fh(struct dentry *dentry,
136 fh32[0] = ei->i_iget5_block; 135 fh32[0] = ei->i_iget5_block;
137 fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */ 136 fh16[2] = (__u16)ei->i_iget5_offset; /* fh16 [sic] */
138 fh32[2] = inode->i_generation; 137 fh32[2] = inode->i_generation;
139 if (connectable && !S_ISDIR(inode->i_mode)) { 138 if (parent) {
140 struct inode *parent;
141 struct iso_inode_info *eparent; 139 struct iso_inode_info *eparent;
142 spin_lock(&dentry->d_lock);
143 parent = dentry->d_parent->d_inode;
144 eparent = ISOFS_I(parent); 140 eparent = ISOFS_I(parent);
145 fh32[3] = eparent->i_iget5_block; 141 fh32[3] = eparent->i_iget5_block;
146 fh16[3] = (__u16)eparent->i_iget5_offset; /* fh16 [sic] */ 142 fh16[3] = (__u16)eparent->i_iget5_offset; /* fh16 [sic] */
147 fh32[4] = parent->i_generation; 143 fh32[4] = parent->i_generation;
148 spin_unlock(&dentry->d_lock);
149 len = 5; 144 len = 5;
150 type = 2; 145 type = 2;
151 } 146 }
diff --git a/fs/jbd2/Kconfig b/fs/jbd2/Kconfig
index f32f346f4b0a..69a48c2944da 100644
--- a/fs/jbd2/Kconfig
+++ b/fs/jbd2/Kconfig
@@ -1,6 +1,8 @@
1config JBD2 1config JBD2
2 tristate 2 tristate
3 select CRC32 3 select CRC32
4 select CRYPTO
5 select CRYPTO_CRC32C
4 help 6 help
5 This is a generic journaling layer for block devices that support 7 This is a generic journaling layer for block devices that support
6 both 32-bit and 64-bit block numbers. It is currently used by 8 both 32-bit and 64-bit block numbers. It is currently used by
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 840f70f50792..216f4299f65e 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -85,6 +85,24 @@ nope:
85 __brelse(bh); 85 __brelse(bh);
86} 86}
87 87
88static void jbd2_commit_block_csum_set(journal_t *j,
89 struct journal_head *descriptor)
90{
91 struct commit_header *h;
92 __u32 csum;
93
94 if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
95 return;
96
97 h = (struct commit_header *)(jh2bh(descriptor)->b_data);
98 h->h_chksum_type = 0;
99 h->h_chksum_size = 0;
100 h->h_chksum[0] = 0;
101 csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
102 j->j_blocksize);
103 h->h_chksum[0] = cpu_to_be32(csum);
104}
105
88/* 106/*
89 * Done it all: now submit the commit record. We should have 107 * Done it all: now submit the commit record. We should have
90 * cleaned up our previous buffers by now, so if we are in abort 108 * cleaned up our previous buffers by now, so if we are in abort
@@ -128,6 +146,7 @@ static int journal_submit_commit_record(journal_t *journal,
128 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE; 146 tmp->h_chksum_size = JBD2_CRC32_CHKSUM_SIZE;
129 tmp->h_chksum[0] = cpu_to_be32(crc32_sum); 147 tmp->h_chksum[0] = cpu_to_be32(crc32_sum);
130 } 148 }
149 jbd2_commit_block_csum_set(journal, descriptor);
131 150
132 JBUFFER_TRACE(descriptor, "submit commit block"); 151 JBUFFER_TRACE(descriptor, "submit commit block");
133 lock_buffer(bh); 152 lock_buffer(bh);
@@ -301,6 +320,44 @@ static void write_tag_block(int tag_bytes, journal_block_tag_t *tag,
301 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1); 320 tag->t_blocknr_high = cpu_to_be32((block >> 31) >> 1);
302} 321}
303 322
323static void jbd2_descr_block_csum_set(journal_t *j,
324 struct journal_head *descriptor)
325{
326 struct jbd2_journal_block_tail *tail;
327 __u32 csum;
328
329 if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
330 return;
331
332 tail = (struct jbd2_journal_block_tail *)
333 (jh2bh(descriptor)->b_data + j->j_blocksize -
334 sizeof(struct jbd2_journal_block_tail));
335 tail->t_checksum = 0;
336 csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
337 j->j_blocksize);
338 tail->t_checksum = cpu_to_be32(csum);
339}
340
341static void jbd2_block_tag_csum_set(journal_t *j, journal_block_tag_t *tag,
342 struct buffer_head *bh, __u32 sequence)
343{
344 struct page *page = bh->b_page;
345 __u8 *addr;
346 __u32 csum;
347
348 if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
349 return;
350
351 sequence = cpu_to_be32(sequence);
352 addr = kmap_atomic(page, KM_USER0);
353 csum = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
354 sizeof(sequence));
355 csum = jbd2_chksum(j, csum, addr + offset_in_page(bh->b_data),
356 bh->b_size);
357 kunmap_atomic(addr, KM_USER0);
358
359 tag->t_checksum = cpu_to_be32(csum);
360}
304/* 361/*
305 * jbd2_journal_commit_transaction 362 * jbd2_journal_commit_transaction
306 * 363 *
@@ -334,6 +391,10 @@ void jbd2_journal_commit_transaction(journal_t *journal)
334 unsigned long first_block; 391 unsigned long first_block;
335 tid_t first_tid; 392 tid_t first_tid;
336 int update_tail; 393 int update_tail;
394 int csum_size = 0;
395
396 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
397 csum_size = sizeof(struct jbd2_journal_block_tail);
337 398
338 /* 399 /*
339 * First job: lock down the current transaction and wait for 400 * First job: lock down the current transaction and wait for
@@ -627,7 +688,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
627 688
628 tag = (journal_block_tag_t *) tagp; 689 tag = (journal_block_tag_t *) tagp;
629 write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr); 690 write_tag_block(tag_bytes, tag, jh2bh(jh)->b_blocknr);
630 tag->t_flags = cpu_to_be32(tag_flag); 691 tag->t_flags = cpu_to_be16(tag_flag);
692 jbd2_block_tag_csum_set(journal, tag, jh2bh(new_jh),
693 commit_transaction->t_tid);
631 tagp += tag_bytes; 694 tagp += tag_bytes;
632 space_left -= tag_bytes; 695 space_left -= tag_bytes;
633 696
@@ -643,7 +706,7 @@ void jbd2_journal_commit_transaction(journal_t *journal)
643 706
644 if (bufs == journal->j_wbufsize || 707 if (bufs == journal->j_wbufsize ||
645 commit_transaction->t_buffers == NULL || 708 commit_transaction->t_buffers == NULL ||
646 space_left < tag_bytes + 16) { 709 space_left < tag_bytes + 16 + csum_size) {
647 710
648 jbd_debug(4, "JBD2: Submit %d IOs\n", bufs); 711 jbd_debug(4, "JBD2: Submit %d IOs\n", bufs);
649 712
@@ -651,8 +714,9 @@ void jbd2_journal_commit_transaction(journal_t *journal)
651 submitting the IOs. "tag" still points to 714 submitting the IOs. "tag" still points to
652 the last tag we set up. */ 715 the last tag we set up. */
653 716
654 tag->t_flags |= cpu_to_be32(JBD2_FLAG_LAST_TAG); 717 tag->t_flags |= cpu_to_be16(JBD2_FLAG_LAST_TAG);
655 718
719 jbd2_descr_block_csum_set(journal, descriptor);
656start_journal_io: 720start_journal_io:
657 for (i = 0; i < bufs; i++) { 721 for (i = 0; i < bufs; i++) {
658 struct buffer_head *bh = wbuf[i]; 722 struct buffer_head *bh = wbuf[i];
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 1afb701622b0..e9a3c4c85594 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -97,6 +97,43 @@ EXPORT_SYMBOL(jbd2_inode_cache);
97static void __journal_abort_soft (journal_t *journal, int errno); 97static void __journal_abort_soft (journal_t *journal, int errno);
98static int jbd2_journal_create_slab(size_t slab_size); 98static int jbd2_journal_create_slab(size_t slab_size);
99 99
100/* Checksumming functions */
101int jbd2_verify_csum_type(journal_t *j, journal_superblock_t *sb)
102{
103 if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
104 return 1;
105
106 return sb->s_checksum_type == JBD2_CRC32C_CHKSUM;
107}
108
109static __u32 jbd2_superblock_csum(journal_t *j, journal_superblock_t *sb)
110{
111 __u32 csum, old_csum;
112
113 old_csum = sb->s_checksum;
114 sb->s_checksum = 0;
115 csum = jbd2_chksum(j, ~0, (char *)sb, sizeof(journal_superblock_t));
116 sb->s_checksum = old_csum;
117
118 return cpu_to_be32(csum);
119}
120
121int jbd2_superblock_csum_verify(journal_t *j, journal_superblock_t *sb)
122{
123 if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
124 return 1;
125
126 return sb->s_checksum == jbd2_superblock_csum(j, sb);
127}
128
129void jbd2_superblock_csum_set(journal_t *j, journal_superblock_t *sb)
130{
131 if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
132 return;
133
134 sb->s_checksum = jbd2_superblock_csum(j, sb);
135}
136
100/* 137/*
101 * Helper function used to manage commit timeouts 138 * Helper function used to manage commit timeouts
102 */ 139 */
@@ -1348,6 +1385,7 @@ static void jbd2_journal_update_sb_errno(journal_t *journal)
1348 jbd_debug(1, "JBD2: updating superblock error (errno %d)\n", 1385 jbd_debug(1, "JBD2: updating superblock error (errno %d)\n",
1349 journal->j_errno); 1386 journal->j_errno);
1350 sb->s_errno = cpu_to_be32(journal->j_errno); 1387 sb->s_errno = cpu_to_be32(journal->j_errno);
1388 jbd2_superblock_csum_set(journal, sb);
1351 read_unlock(&journal->j_state_lock); 1389 read_unlock(&journal->j_state_lock);
1352 1390
1353 jbd2_write_superblock(journal, WRITE_SYNC); 1391 jbd2_write_superblock(journal, WRITE_SYNC);
@@ -1376,6 +1414,9 @@ static int journal_get_superblock(journal_t *journal)
1376 } 1414 }
1377 } 1415 }
1378 1416
1417 if (buffer_verified(bh))
1418 return 0;
1419
1379 sb = journal->j_superblock; 1420 sb = journal->j_superblock;
1380 1421
1381 err = -EINVAL; 1422 err = -EINVAL;
@@ -1413,6 +1454,43 @@ static int journal_get_superblock(journal_t *journal)
1413 goto out; 1454 goto out;
1414 } 1455 }
1415 1456
1457 if (JBD2_HAS_COMPAT_FEATURE(journal, JBD2_FEATURE_COMPAT_CHECKSUM) &&
1458 JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
1459 /* Can't have checksum v1 and v2 on at the same time! */
1460 printk(KERN_ERR "JBD: Can't enable checksumming v1 and v2 "
1461 "at the same time!\n");
1462 goto out;
1463 }
1464
1465 if (!jbd2_verify_csum_type(journal, sb)) {
1466 printk(KERN_ERR "JBD: Unknown checksum type\n");
1467 goto out;
1468 }
1469
1470 /* Load the checksum driver */
1471 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
1472 journal->j_chksum_driver = crypto_alloc_shash("crc32c", 0, 0);
1473 if (IS_ERR(journal->j_chksum_driver)) {
1474 printk(KERN_ERR "JBD: Cannot load crc32c driver.\n");
1475 err = PTR_ERR(journal->j_chksum_driver);
1476 journal->j_chksum_driver = NULL;
1477 goto out;
1478 }
1479 }
1480
1481 /* Check superblock checksum */
1482 if (!jbd2_superblock_csum_verify(journal, sb)) {
1483 printk(KERN_ERR "JBD: journal checksum error\n");
1484 goto out;
1485 }
1486
1487 /* Precompute checksum seed for all metadata */
1488 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
1489 journal->j_csum_seed = jbd2_chksum(journal, ~0, sb->s_uuid,
1490 sizeof(sb->s_uuid));
1491
1492 set_buffer_verified(bh);
1493
1416 return 0; 1494 return 0;
1417 1495
1418out: 1496out:
@@ -1564,6 +1642,8 @@ int jbd2_journal_destroy(journal_t *journal)
1564 iput(journal->j_inode); 1642 iput(journal->j_inode);
1565 if (journal->j_revoke) 1643 if (journal->j_revoke)
1566 jbd2_journal_destroy_revoke(journal); 1644 jbd2_journal_destroy_revoke(journal);
1645 if (journal->j_chksum_driver)
1646 crypto_free_shash(journal->j_chksum_driver);
1567 kfree(journal->j_wbuf); 1647 kfree(journal->j_wbuf);
1568 kfree(journal); 1648 kfree(journal);
1569 1649
@@ -1653,6 +1733,10 @@ int jbd2_journal_check_available_features (journal_t *journal, unsigned long com
1653int jbd2_journal_set_features (journal_t *journal, unsigned long compat, 1733int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
1654 unsigned long ro, unsigned long incompat) 1734 unsigned long ro, unsigned long incompat)
1655{ 1735{
1736#define INCOMPAT_FEATURE_ON(f) \
1737 ((incompat & (f)) && !(sb->s_feature_incompat & cpu_to_be32(f)))
1738#define COMPAT_FEATURE_ON(f) \
1739 ((compat & (f)) && !(sb->s_feature_compat & cpu_to_be32(f)))
1656 journal_superblock_t *sb; 1740 journal_superblock_t *sb;
1657 1741
1658 if (jbd2_journal_check_used_features(journal, compat, ro, incompat)) 1742 if (jbd2_journal_check_used_features(journal, compat, ro, incompat))
@@ -1661,16 +1745,54 @@ int jbd2_journal_set_features (journal_t *journal, unsigned long compat,
1661 if (!jbd2_journal_check_available_features(journal, compat, ro, incompat)) 1745 if (!jbd2_journal_check_available_features(journal, compat, ro, incompat))
1662 return 0; 1746 return 0;
1663 1747
1748 /* Asking for checksumming v2 and v1? Only give them v2. */
1749 if (incompat & JBD2_FEATURE_INCOMPAT_CSUM_V2 &&
1750 compat & JBD2_FEATURE_COMPAT_CHECKSUM)
1751 compat &= ~JBD2_FEATURE_COMPAT_CHECKSUM;
1752
1664 jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n", 1753 jbd_debug(1, "Setting new features 0x%lx/0x%lx/0x%lx\n",
1665 compat, ro, incompat); 1754 compat, ro, incompat);
1666 1755
1667 sb = journal->j_superblock; 1756 sb = journal->j_superblock;
1668 1757
1758 /* If enabling v2 checksums, update superblock */
1759 if (INCOMPAT_FEATURE_ON(JBD2_FEATURE_INCOMPAT_CSUM_V2)) {
1760 sb->s_checksum_type = JBD2_CRC32C_CHKSUM;
1761 sb->s_feature_compat &=
1762 ~cpu_to_be32(JBD2_FEATURE_COMPAT_CHECKSUM);
1763
1764 /* Load the checksum driver */
1765 if (journal->j_chksum_driver == NULL) {
1766 journal->j_chksum_driver = crypto_alloc_shash("crc32c",
1767 0, 0);
1768 if (IS_ERR(journal->j_chksum_driver)) {
1769 printk(KERN_ERR "JBD: Cannot load crc32c "
1770 "driver.\n");
1771 journal->j_chksum_driver = NULL;
1772 return 0;
1773 }
1774 }
1775
1776 /* Precompute checksum seed for all metadata */
1777 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
1778 JBD2_FEATURE_INCOMPAT_CSUM_V2))
1779 journal->j_csum_seed = jbd2_chksum(journal, ~0,
1780 sb->s_uuid,
1781 sizeof(sb->s_uuid));
1782 }
1783
1784 /* If enabling v1 checksums, downgrade superblock */
1785 if (COMPAT_FEATURE_ON(JBD2_FEATURE_COMPAT_CHECKSUM))
1786 sb->s_feature_incompat &=
1787 ~cpu_to_be32(JBD2_FEATURE_INCOMPAT_CSUM_V2);
1788
1669 sb->s_feature_compat |= cpu_to_be32(compat); 1789 sb->s_feature_compat |= cpu_to_be32(compat);
1670 sb->s_feature_ro_compat |= cpu_to_be32(ro); 1790 sb->s_feature_ro_compat |= cpu_to_be32(ro);
1671 sb->s_feature_incompat |= cpu_to_be32(incompat); 1791 sb->s_feature_incompat |= cpu_to_be32(incompat);
1672 1792
1673 return 1; 1793 return 1;
1794#undef COMPAT_FEATURE_ON
1795#undef INCOMPAT_FEATURE_ON
1674} 1796}
1675 1797
1676/* 1798/*
@@ -1975,10 +2097,16 @@ int jbd2_journal_blocks_per_page(struct inode *inode)
1975 */ 2097 */
1976size_t journal_tag_bytes(journal_t *journal) 2098size_t journal_tag_bytes(journal_t *journal)
1977{ 2099{
2100 journal_block_tag_t tag;
2101 size_t x = 0;
2102
2103 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
2104 x += sizeof(tag.t_checksum);
2105
1978 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) 2106 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
1979 return JBD2_TAG_SIZE64; 2107 return x + JBD2_TAG_SIZE64;
1980 else 2108 else
1981 return JBD2_TAG_SIZE32; 2109 return x + JBD2_TAG_SIZE32;
1982} 2110}
1983 2111
1984/* 2112/*
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index c1a03354a22f..0131e4362534 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -174,6 +174,25 @@ static int jread(struct buffer_head **bhp, journal_t *journal,
174 return 0; 174 return 0;
175} 175}
176 176
177static int jbd2_descr_block_csum_verify(journal_t *j,
178 void *buf)
179{
180 struct jbd2_journal_block_tail *tail;
181 __u32 provided, calculated;
182
183 if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
184 return 1;
185
186 tail = (struct jbd2_journal_block_tail *)(buf + j->j_blocksize -
187 sizeof(struct jbd2_journal_block_tail));
188 provided = tail->t_checksum;
189 tail->t_checksum = 0;
190 calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
191 tail->t_checksum = provided;
192
193 provided = be32_to_cpu(provided);
194 return provided == calculated;
195}
177 196
178/* 197/*
179 * Count the number of in-use tags in a journal descriptor block. 198 * Count the number of in-use tags in a journal descriptor block.
@@ -186,6 +205,9 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
186 int nr = 0, size = journal->j_blocksize; 205 int nr = 0, size = journal->j_blocksize;
187 int tag_bytes = journal_tag_bytes(journal); 206 int tag_bytes = journal_tag_bytes(journal);
188 207
208 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
209 size -= sizeof(struct jbd2_journal_block_tail);
210
189 tagp = &bh->b_data[sizeof(journal_header_t)]; 211 tagp = &bh->b_data[sizeof(journal_header_t)];
190 212
191 while ((tagp - bh->b_data + tag_bytes) <= size) { 213 while ((tagp - bh->b_data + tag_bytes) <= size) {
@@ -193,10 +215,10 @@ static int count_tags(journal_t *journal, struct buffer_head *bh)
193 215
194 nr++; 216 nr++;
195 tagp += tag_bytes; 217 tagp += tag_bytes;
196 if (!(tag->t_flags & cpu_to_be32(JBD2_FLAG_SAME_UUID))) 218 if (!(tag->t_flags & cpu_to_be16(JBD2_FLAG_SAME_UUID)))
197 tagp += 16; 219 tagp += 16;
198 220
199 if (tag->t_flags & cpu_to_be32(JBD2_FLAG_LAST_TAG)) 221 if (tag->t_flags & cpu_to_be16(JBD2_FLAG_LAST_TAG))
200 break; 222 break;
201 } 223 }
202 224
@@ -353,6 +375,41 @@ static int calc_chksums(journal_t *journal, struct buffer_head *bh,
353 return 0; 375 return 0;
354} 376}
355 377
378static int jbd2_commit_block_csum_verify(journal_t *j, void *buf)
379{
380 struct commit_header *h;
381 __u32 provided, calculated;
382
383 if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
384 return 1;
385
386 h = buf;
387 provided = h->h_chksum[0];
388 h->h_chksum[0] = 0;
389 calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
390 h->h_chksum[0] = provided;
391
392 provided = be32_to_cpu(provided);
393 return provided == calculated;
394}
395
396static int jbd2_block_tag_csum_verify(journal_t *j, journal_block_tag_t *tag,
397 void *buf, __u32 sequence)
398{
399 __u32 provided, calculated;
400
401 if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
402 return 1;
403
404 sequence = cpu_to_be32(sequence);
405 calculated = jbd2_chksum(j, j->j_csum_seed, (__u8 *)&sequence,
406 sizeof(sequence));
407 calculated = jbd2_chksum(j, calculated, buf, j->j_blocksize);
408 provided = be32_to_cpu(tag->t_checksum);
409
410 return provided == cpu_to_be32(calculated);
411}
412
356static int do_one_pass(journal_t *journal, 413static int do_one_pass(journal_t *journal,
357 struct recovery_info *info, enum passtype pass) 414 struct recovery_info *info, enum passtype pass)
358{ 415{
@@ -366,6 +423,7 @@ static int do_one_pass(journal_t *journal,
366 int blocktype; 423 int blocktype;
367 int tag_bytes = journal_tag_bytes(journal); 424 int tag_bytes = journal_tag_bytes(journal);
368 __u32 crc32_sum = ~0; /* Transactional Checksums */ 425 __u32 crc32_sum = ~0; /* Transactional Checksums */
426 int descr_csum_size = 0;
369 427
370 /* 428 /*
371 * First thing is to establish what we expect to find in the log 429 * First thing is to establish what we expect to find in the log
@@ -451,6 +509,18 @@ static int do_one_pass(journal_t *journal,
451 509
452 switch(blocktype) { 510 switch(blocktype) {
453 case JBD2_DESCRIPTOR_BLOCK: 511 case JBD2_DESCRIPTOR_BLOCK:
512 /* Verify checksum first */
513 if (JBD2_HAS_INCOMPAT_FEATURE(journal,
514 JBD2_FEATURE_INCOMPAT_CSUM_V2))
515 descr_csum_size =
516 sizeof(struct jbd2_journal_block_tail);
517 if (descr_csum_size > 0 &&
518 !jbd2_descr_block_csum_verify(journal,
519 bh->b_data)) {
520 err = -EIO;
521 goto failed;
522 }
523
454 /* If it is a valid descriptor block, replay it 524 /* If it is a valid descriptor block, replay it
455 * in pass REPLAY; if journal_checksums enabled, then 525 * in pass REPLAY; if journal_checksums enabled, then
456 * calculate checksums in PASS_SCAN, otherwise, 526 * calculate checksums in PASS_SCAN, otherwise,
@@ -481,11 +551,11 @@ static int do_one_pass(journal_t *journal,
481 551
482 tagp = &bh->b_data[sizeof(journal_header_t)]; 552 tagp = &bh->b_data[sizeof(journal_header_t)];
483 while ((tagp - bh->b_data + tag_bytes) 553 while ((tagp - bh->b_data + tag_bytes)
484 <= journal->j_blocksize) { 554 <= journal->j_blocksize - descr_csum_size) {
485 unsigned long io_block; 555 unsigned long io_block;
486 556
487 tag = (journal_block_tag_t *) tagp; 557 tag = (journal_block_tag_t *) tagp;
488 flags = be32_to_cpu(tag->t_flags); 558 flags = be16_to_cpu(tag->t_flags);
489 559
490 io_block = next_log_block++; 560 io_block = next_log_block++;
491 wrap(journal, next_log_block); 561 wrap(journal, next_log_block);
@@ -516,6 +586,19 @@ static int do_one_pass(journal_t *journal,
516 goto skip_write; 586 goto skip_write;
517 } 587 }
518 588
589 /* Look for block corruption */
590 if (!jbd2_block_tag_csum_verify(
591 journal, tag, obh->b_data,
592 be32_to_cpu(tmp->h_sequence))) {
593 brelse(obh);
594 success = -EIO;
595 printk(KERN_ERR "JBD: Invalid "
596 "checksum recovering "
597 "block %llu in log\n",
598 blocknr);
599 continue;
600 }
601
519 /* Find a buffer for the new 602 /* Find a buffer for the new
520 * data being restored */ 603 * data being restored */
521 nbh = __getblk(journal->j_fs_dev, 604 nbh = __getblk(journal->j_fs_dev,
@@ -650,6 +733,19 @@ static int do_one_pass(journal_t *journal,
650 } 733 }
651 crc32_sum = ~0; 734 crc32_sum = ~0;
652 } 735 }
736 if (pass == PASS_SCAN &&
737 !jbd2_commit_block_csum_verify(journal,
738 bh->b_data)) {
739 info->end_transaction = next_commit_ID;
740
741 if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
742 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)) {
743 journal->j_failed_commit =
744 next_commit_ID;
745 brelse(bh);
746 break;
747 }
748 }
653 brelse(bh); 749 brelse(bh);
654 next_commit_ID++; 750 next_commit_ID++;
655 continue; 751 continue;
@@ -706,6 +802,25 @@ static int do_one_pass(journal_t *journal,
706 return err; 802 return err;
707} 803}
708 804
805static int jbd2_revoke_block_csum_verify(journal_t *j,
806 void *buf)
807{
808 struct jbd2_journal_revoke_tail *tail;
809 __u32 provided, calculated;
810
811 if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
812 return 1;
813
814 tail = (struct jbd2_journal_revoke_tail *)(buf + j->j_blocksize -
815 sizeof(struct jbd2_journal_revoke_tail));
816 provided = tail->r_checksum;
817 tail->r_checksum = 0;
818 calculated = jbd2_chksum(j, j->j_csum_seed, buf, j->j_blocksize);
819 tail->r_checksum = provided;
820
821 provided = be32_to_cpu(provided);
822 return provided == calculated;
823}
709 824
710/* Scan a revoke record, marking all blocks mentioned as revoked. */ 825/* Scan a revoke record, marking all blocks mentioned as revoked. */
711 826
@@ -720,6 +835,9 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
720 offset = sizeof(jbd2_journal_revoke_header_t); 835 offset = sizeof(jbd2_journal_revoke_header_t);
721 max = be32_to_cpu(header->r_count); 836 max = be32_to_cpu(header->r_count);
722 837
838 if (!jbd2_revoke_block_csum_verify(journal, header))
839 return -EINVAL;
840
723 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) 841 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
724 record_len = 8; 842 record_len = 8;
725 843
diff --git a/fs/jbd2/revoke.c b/fs/jbd2/revoke.c
index 6973705d6a3d..f30b80b4ce8b 100644
--- a/fs/jbd2/revoke.c
+++ b/fs/jbd2/revoke.c
@@ -578,6 +578,7 @@ static void write_one_revoke_record(journal_t *journal,
578 struct jbd2_revoke_record_s *record, 578 struct jbd2_revoke_record_s *record,
579 int write_op) 579 int write_op)
580{ 580{
581 int csum_size = 0;
581 struct journal_head *descriptor; 582 struct journal_head *descriptor;
582 int offset; 583 int offset;
583 journal_header_t *header; 584 journal_header_t *header;
@@ -592,9 +593,13 @@ static void write_one_revoke_record(journal_t *journal,
592 descriptor = *descriptorp; 593 descriptor = *descriptorp;
593 offset = *offsetp; 594 offset = *offsetp;
594 595
596 /* Do we need to leave space at the end for a checksum? */
597 if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_CSUM_V2))
598 csum_size = sizeof(struct jbd2_journal_revoke_tail);
599
595 /* Make sure we have a descriptor with space left for the record */ 600 /* Make sure we have a descriptor with space left for the record */
596 if (descriptor) { 601 if (descriptor) {
597 if (offset == journal->j_blocksize) { 602 if (offset >= journal->j_blocksize - csum_size) {
598 flush_descriptor(journal, descriptor, offset, write_op); 603 flush_descriptor(journal, descriptor, offset, write_op);
599 descriptor = NULL; 604 descriptor = NULL;
600 } 605 }
@@ -631,6 +636,24 @@ static void write_one_revoke_record(journal_t *journal,
631 *offsetp = offset; 636 *offsetp = offset;
632} 637}
633 638
639static void jbd2_revoke_csum_set(journal_t *j,
640 struct journal_head *descriptor)
641{
642 struct jbd2_journal_revoke_tail *tail;
643 __u32 csum;
644
645 if (!JBD2_HAS_INCOMPAT_FEATURE(j, JBD2_FEATURE_INCOMPAT_CSUM_V2))
646 return;
647
648 tail = (struct jbd2_journal_revoke_tail *)
649 (jh2bh(descriptor)->b_data + j->j_blocksize -
650 sizeof(struct jbd2_journal_revoke_tail));
651 tail->r_checksum = 0;
652 csum = jbd2_chksum(j, j->j_csum_seed, jh2bh(descriptor)->b_data,
653 j->j_blocksize);
654 tail->r_checksum = cpu_to_be32(csum);
655}
656
634/* 657/*
635 * Flush a revoke descriptor out to the journal. If we are aborting, 658 * Flush a revoke descriptor out to the journal. If we are aborting,
636 * this is a noop; otherwise we are generating a buffer which needs to 659 * this is a noop; otherwise we are generating a buffer which needs to
@@ -652,6 +675,8 @@ static void flush_descriptor(journal_t *journal,
652 675
653 header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data; 676 header = (jbd2_journal_revoke_header_t *) jh2bh(descriptor)->b_data;
654 header->r_count = cpu_to_be32(offset); 677 header->r_count = cpu_to_be32(offset);
678 jbd2_revoke_csum_set(journal, descriptor);
679
655 set_buffer_jwrite(bh); 680 set_buffer_jwrite(bh);
656 BUFFER_TRACE(bh, "write"); 681 BUFFER_TRACE(bh, "write");
657 set_buffer_dirty(bh); 682 set_buffer_dirty(bh);
diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c
index ddcd3549c6c2..fb1ab9533b67 100644
--- a/fs/jbd2/transaction.c
+++ b/fs/jbd2/transaction.c
@@ -162,8 +162,8 @@ static int start_this_handle(journal_t *journal, handle_t *handle,
162 162
163alloc_transaction: 163alloc_transaction:
164 if (!journal->j_running_transaction) { 164 if (!journal->j_running_transaction) {
165 new_transaction = kmem_cache_alloc(transaction_cache, 165 new_transaction = kmem_cache_zalloc(transaction_cache,
166 gfp_mask | __GFP_ZERO); 166 gfp_mask);
167 if (!new_transaction) { 167 if (!new_transaction) {
168 /* 168 /*
169 * If __GFP_FS is not present, then we may be 169 * If __GFP_FS is not present, then we may be
diff --git a/fs/jffs2/jffs2_fs_sb.h b/fs/jffs2/jffs2_fs_sb.h
index 55a0c1dceadf..413ef89c2d1b 100644
--- a/fs/jffs2/jffs2_fs_sb.h
+++ b/fs/jffs2/jffs2_fs_sb.h
@@ -32,6 +32,13 @@ struct jffs2_inodirty;
32struct jffs2_mount_opts { 32struct jffs2_mount_opts {
33 bool override_compr; 33 bool override_compr;
34 unsigned int compr; 34 unsigned int compr;
35
36 /* The size of the reserved pool. The reserved pool is the JFFS2 flash
37 * space which may only be used by root cannot be used by the other
38 * users. This is implemented simply by means of not allowing the
39 * latter users to write to the file system if the amount if the
40 * available space is less then 'rp_size'. */
41 unsigned int rp_size;
35}; 42};
36 43
37/* A struct for the overall file system control. Pointers to 44/* A struct for the overall file system control. Pointers to
@@ -126,6 +133,10 @@ struct jffs2_sb_info {
126 struct jffs2_inodirty *wbuf_inodes; 133 struct jffs2_inodirty *wbuf_inodes;
127 struct rw_semaphore wbuf_sem; /* Protects the write buffer */ 134 struct rw_semaphore wbuf_sem; /* Protects the write buffer */
128 135
136 struct delayed_work wbuf_dwork; /* write-buffer write-out work */
137 int wbuf_queued; /* non-zero delayed work is queued */
138 spinlock_t wbuf_dwork_lock; /* protects wbuf_dwork and and wbuf_queued */
139
129 unsigned char *oobbuf; 140 unsigned char *oobbuf;
130 int oobavail; /* How many bytes are available for JFFS2 in OOB */ 141 int oobavail; /* How many bytes are available for JFFS2 in OOB */
131#endif 142#endif
diff --git a/fs/jffs2/nodemgmt.c b/fs/jffs2/nodemgmt.c
index 6784d1e7a7eb..0c96eb52c797 100644
--- a/fs/jffs2/nodemgmt.c
+++ b/fs/jffs2/nodemgmt.c
@@ -18,6 +18,37 @@
18#include "nodelist.h" 18#include "nodelist.h"
19#include "debug.h" 19#include "debug.h"
20 20
21/*
22 * Check whether the user is allowed to write.
23 */
24static int jffs2_rp_can_write(struct jffs2_sb_info *c)
25{
26 uint32_t avail;
27 struct jffs2_mount_opts *opts = &c->mount_opts;
28
29 avail = c->dirty_size + c->free_size + c->unchecked_size +
30 c->erasing_size - c->resv_blocks_write * c->sector_size
31 - c->nospc_dirty_size;
32
33 if (avail < 2 * opts->rp_size)
34 jffs2_dbg(1, "rpsize %u, dirty_size %u, free_size %u, "
35 "erasing_size %u, unchecked_size %u, "
36 "nr_erasing_blocks %u, avail %u, resrv %u\n",
37 opts->rp_size, c->dirty_size, c->free_size,
38 c->erasing_size, c->unchecked_size,
39 c->nr_erasing_blocks, avail, c->nospc_dirty_size);
40
41 if (avail > opts->rp_size)
42 return 1;
43
44 /* Always allow root */
45 if (capable(CAP_SYS_RESOURCE))
46 return 1;
47
48 jffs2_dbg(1, "forbid writing\n");
49 return 0;
50}
51
21/** 52/**
22 * jffs2_reserve_space - request physical space to write nodes to flash 53 * jffs2_reserve_space - request physical space to write nodes to flash
23 * @c: superblock info 54 * @c: superblock info
@@ -55,6 +86,15 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
55 86
56 spin_lock(&c->erase_completion_lock); 87 spin_lock(&c->erase_completion_lock);
57 88
89 /*
90 * Check if the free space is greater then size of the reserved pool.
91 * If not, only allow root to proceed with writing.
92 */
93 if (prio != ALLOC_DELETION && !jffs2_rp_can_write(c)) {
94 ret = -ENOSPC;
95 goto out;
96 }
97
58 /* this needs a little more thought (true <tglx> :)) */ 98 /* this needs a little more thought (true <tglx> :)) */
59 while(ret == -EAGAIN) { 99 while(ret == -EAGAIN) {
60 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) { 100 while(c->nr_free_blocks + c->nr_erasing_blocks < blocksneeded) {
@@ -158,6 +198,8 @@ int jffs2_reserve_space(struct jffs2_sb_info *c, uint32_t minsize,
158 jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret); 198 jffs2_dbg(1, "%s(): ret is %d\n", __func__, ret);
159 } 199 }
160 } 200 }
201
202out:
161 spin_unlock(&c->erase_completion_lock); 203 spin_unlock(&c->erase_completion_lock);
162 if (!ret) 204 if (!ret)
163 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1); 205 ret = jffs2_prealloc_raw_node_refs(c, c->nextblock, 1);
diff --git a/fs/jffs2/os-linux.h b/fs/jffs2/os-linux.h
index 1cd3aec9d9ae..bcd983d7e7f9 100644
--- a/fs/jffs2/os-linux.h
+++ b/fs/jffs2/os-linux.h
@@ -95,6 +95,7 @@ static inline void jffs2_init_inode_info(struct jffs2_inode_info *f)
95#define jffs2_ubivol(c) (0) 95#define jffs2_ubivol(c) (0)
96#define jffs2_ubivol_setup(c) (0) 96#define jffs2_ubivol_setup(c) (0)
97#define jffs2_ubivol_cleanup(c) do {} while (0) 97#define jffs2_ubivol_cleanup(c) do {} while (0)
98#define jffs2_dirty_trigger(c) do {} while (0)
98 99
99#else /* NAND and/or ECC'd NOR support present */ 100#else /* NAND and/or ECC'd NOR support present */
100 101
@@ -135,14 +136,10 @@ void jffs2_ubivol_cleanup(struct jffs2_sb_info *c);
135#define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE)) 136#define jffs2_nor_wbuf_flash(c) (c->mtd->type == MTD_NORFLASH && ! (c->mtd->flags & MTD_BIT_WRITEABLE))
136int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c); 137int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c);
137void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c); 138void jffs2_nor_wbuf_flash_cleanup(struct jffs2_sb_info *c);
139void jffs2_dirty_trigger(struct jffs2_sb_info *c);
138 140
139#endif /* WRITEBUFFER */ 141#endif /* WRITEBUFFER */
140 142
141static inline void jffs2_dirty_trigger(struct jffs2_sb_info *c)
142{
143 OFNI_BS_2SFFJ(c)->s_dirt = 1;
144}
145
146/* background.c */ 143/* background.c */
147int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c); 144int jffs2_start_garbage_collect_thread(struct jffs2_sb_info *c);
148void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c); 145void jffs2_stop_garbage_collect_thread(struct jffs2_sb_info *c);
diff --git a/fs/jffs2/readinode.c b/fs/jffs2/readinode.c
index dc0437e84763..1ea349fff68b 100644
--- a/fs/jffs2/readinode.c
+++ b/fs/jffs2/readinode.c
@@ -1266,19 +1266,25 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
1266 /* Symlink's inode data is the target path. Read it and 1266 /* Symlink's inode data is the target path. Read it and
1267 * keep in RAM to facilitate quick follow symlink 1267 * keep in RAM to facilitate quick follow symlink
1268 * operation. */ 1268 * operation. */
1269 f->target = kmalloc(je32_to_cpu(latest_node->csize) + 1, GFP_KERNEL); 1269 uint32_t csize = je32_to_cpu(latest_node->csize);
1270 if (csize > JFFS2_MAX_NAME_LEN) {
1271 mutex_unlock(&f->sem);
1272 jffs2_do_clear_inode(c, f);
1273 return -ENAMETOOLONG;
1274 }
1275 f->target = kmalloc(csize + 1, GFP_KERNEL);
1270 if (!f->target) { 1276 if (!f->target) {
1271 JFFS2_ERROR("can't allocate %d bytes of memory for the symlink target path cache\n", je32_to_cpu(latest_node->csize)); 1277 JFFS2_ERROR("can't allocate %u bytes of memory for the symlink target path cache\n", csize);
1272 mutex_unlock(&f->sem); 1278 mutex_unlock(&f->sem);
1273 jffs2_do_clear_inode(c, f); 1279 jffs2_do_clear_inode(c, f);
1274 return -ENOMEM; 1280 return -ENOMEM;
1275 } 1281 }
1276 1282
1277 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node), 1283 ret = jffs2_flash_read(c, ref_offset(rii.latest_ref) + sizeof(*latest_node),
1278 je32_to_cpu(latest_node->csize), &retlen, (char *)f->target); 1284 csize, &retlen, (char *)f->target);
1279 1285
1280 if (ret || retlen != je32_to_cpu(latest_node->csize)) { 1286 if (ret || retlen != csize) {
1281 if (retlen != je32_to_cpu(latest_node->csize)) 1287 if (retlen != csize)
1282 ret = -EIO; 1288 ret = -EIO;
1283 kfree(f->target); 1289 kfree(f->target);
1284 f->target = NULL; 1290 f->target = NULL;
@@ -1287,7 +1293,7 @@ static int jffs2_do_read_inode_internal(struct jffs2_sb_info *c,
1287 return ret; 1293 return ret;
1288 } 1294 }
1289 1295
1290 f->target[je32_to_cpu(latest_node->csize)] = '\0'; 1296 f->target[csize] = '\0';
1291 dbg_readinode("symlink's target '%s' cached\n", f->target); 1297 dbg_readinode("symlink's target '%s' cached\n", f->target);
1292 } 1298 }
1293 1299
@@ -1415,6 +1421,7 @@ int jffs2_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *i
1415 mutex_unlock(&f->sem); 1421 mutex_unlock(&f->sem);
1416 jffs2_do_clear_inode(c, f); 1422 jffs2_do_clear_inode(c, f);
1417 } 1423 }
1424 jffs2_xattr_do_crccheck_inode(c, ic);
1418 kfree (f); 1425 kfree (f);
1419 return ret; 1426 return ret;
1420} 1427}
diff --git a/fs/jffs2/super.c b/fs/jffs2/super.c
index f9916f312bd8..61ea41389f90 100644
--- a/fs/jffs2/super.c
+++ b/fs/jffs2/super.c
@@ -63,21 +63,6 @@ static void jffs2_i_init_once(void *foo)
63 inode_init_once(&f->vfs_inode); 63 inode_init_once(&f->vfs_inode);
64} 64}
65 65
66static void jffs2_write_super(struct super_block *sb)
67{
68 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
69
70 lock_super(sb);
71 sb->s_dirt = 0;
72
73 if (!(sb->s_flags & MS_RDONLY)) {
74 jffs2_dbg(1, "%s()\n", __func__);
75 jffs2_flush_wbuf_gc(c, 0);
76 }
77
78 unlock_super(sb);
79}
80
81static const char *jffs2_compr_name(unsigned int compr) 66static const char *jffs2_compr_name(unsigned int compr)
82{ 67{
83 switch (compr) { 68 switch (compr) {
@@ -105,6 +90,8 @@ static int jffs2_show_options(struct seq_file *s, struct dentry *root)
105 90
106 if (opts->override_compr) 91 if (opts->override_compr)
107 seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr)); 92 seq_printf(s, ",compr=%s", jffs2_compr_name(opts->compr));
93 if (opts->rp_size)
94 seq_printf(s, ",rp_size=%u", opts->rp_size / 1024);
108 95
109 return 0; 96 return 0;
110} 97}
@@ -113,8 +100,6 @@ static int jffs2_sync_fs(struct super_block *sb, int wait)
113{ 100{
114 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb); 101 struct jffs2_sb_info *c = JFFS2_SB_INFO(sb);
115 102
116 jffs2_write_super(sb);
117
118 mutex_lock(&c->alloc_sem); 103 mutex_lock(&c->alloc_sem);
119 jffs2_flush_wbuf_pad(c); 104 jffs2_flush_wbuf_pad(c);
120 mutex_unlock(&c->alloc_sem); 105 mutex_unlock(&c->alloc_sem);
@@ -171,15 +156,18 @@ static const struct export_operations jffs2_export_ops = {
171 * JFFS2 mount options. 156 * JFFS2 mount options.
172 * 157 *
173 * Opt_override_compr: override default compressor 158 * Opt_override_compr: override default compressor
159 * Opt_rp_size: size of reserved pool in KiB
174 * Opt_err: just end of array marker 160 * Opt_err: just end of array marker
175 */ 161 */
176enum { 162enum {
177 Opt_override_compr, 163 Opt_override_compr,
164 Opt_rp_size,
178 Opt_err, 165 Opt_err,
179}; 166};
180 167
181static const match_table_t tokens = { 168static const match_table_t tokens = {
182 {Opt_override_compr, "compr=%s"}, 169 {Opt_override_compr, "compr=%s"},
170 {Opt_rp_size, "rp_size=%u"},
183 {Opt_err, NULL}, 171 {Opt_err, NULL},
184}; 172};
185 173
@@ -187,6 +175,7 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
187{ 175{
188 substring_t args[MAX_OPT_ARGS]; 176 substring_t args[MAX_OPT_ARGS];
189 char *p, *name; 177 char *p, *name;
178 unsigned int opt;
190 179
191 if (!data) 180 if (!data)
192 return 0; 181 return 0;
@@ -224,6 +213,17 @@ static int jffs2_parse_options(struct jffs2_sb_info *c, char *data)
224 kfree(name); 213 kfree(name);
225 c->mount_opts.override_compr = true; 214 c->mount_opts.override_compr = true;
226 break; 215 break;
216 case Opt_rp_size:
217 if (match_int(&args[0], &opt))
218 return -EINVAL;
219 opt *= 1024;
220 if (opt > c->mtd->size) {
221 pr_warn("Too large reserve pool specified, max "
222 "is %llu KB\n", c->mtd->size / 1024);
223 return -EINVAL;
224 }
225 c->mount_opts.rp_size = opt;
226 break;
227 default: 227 default:
228 pr_err("Error: unrecognized mount option '%s' or missing value\n", 228 pr_err("Error: unrecognized mount option '%s' or missing value\n",
229 p); 229 p);
@@ -251,7 +251,6 @@ static const struct super_operations jffs2_super_operations =
251 .alloc_inode = jffs2_alloc_inode, 251 .alloc_inode = jffs2_alloc_inode,
252 .destroy_inode =jffs2_destroy_inode, 252 .destroy_inode =jffs2_destroy_inode,
253 .put_super = jffs2_put_super, 253 .put_super = jffs2_put_super,
254 .write_super = jffs2_write_super,
255 .statfs = jffs2_statfs, 254 .statfs = jffs2_statfs,
256 .remount_fs = jffs2_remount_fs, 255 .remount_fs = jffs2_remount_fs,
257 .evict_inode = jffs2_evict_inode, 256 .evict_inode = jffs2_evict_inode,
@@ -319,9 +318,6 @@ static void jffs2_put_super (struct super_block *sb)
319 318
320 jffs2_dbg(2, "%s()\n", __func__); 319 jffs2_dbg(2, "%s()\n", __func__);
321 320
322 if (sb->s_dirt)
323 jffs2_write_super(sb);
324
325 mutex_lock(&c->alloc_sem); 321 mutex_lock(&c->alloc_sem);
326 jffs2_flush_wbuf_pad(c); 322 jffs2_flush_wbuf_pad(c);
327 mutex_unlock(&c->alloc_sem); 323 mutex_unlock(&c->alloc_sem);
diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
index 74d9be19df3f..6f4529d3697f 100644
--- a/fs/jffs2/wbuf.c
+++ b/fs/jffs2/wbuf.c
@@ -20,6 +20,7 @@
20#include <linux/mtd/nand.h> 20#include <linux/mtd/nand.h>
21#include <linux/jiffies.h> 21#include <linux/jiffies.h>
22#include <linux/sched.h> 22#include <linux/sched.h>
23#include <linux/writeback.h>
23 24
24#include "nodelist.h" 25#include "nodelist.h"
25 26
@@ -85,7 +86,7 @@ static void jffs2_wbuf_dirties_inode(struct jffs2_sb_info *c, uint32_t ino)
85{ 86{
86 struct jffs2_inodirty *new; 87 struct jffs2_inodirty *new;
87 88
88 /* Mark the superblock dirty so that kupdated will flush... */ 89 /* Schedule delayed write-buffer write-out */
89 jffs2_dirty_trigger(c); 90 jffs2_dirty_trigger(c);
90 91
91 if (jffs2_wbuf_pending_for_ino(c, ino)) 92 if (jffs2_wbuf_pending_for_ino(c, ino))
@@ -1148,6 +1149,47 @@ int jffs2_write_nand_badblock(struct jffs2_sb_info *c, struct jffs2_eraseblock *
1148 return 1; 1149 return 1;
1149} 1150}
1150 1151
1152static struct jffs2_sb_info *work_to_sb(struct work_struct *work)
1153{
1154 struct delayed_work *dwork;
1155
1156 dwork = container_of(work, struct delayed_work, work);
1157 return container_of(dwork, struct jffs2_sb_info, wbuf_dwork);
1158}
1159
1160static void delayed_wbuf_sync(struct work_struct *work)
1161{
1162 struct jffs2_sb_info *c = work_to_sb(work);
1163 struct super_block *sb = OFNI_BS_2SFFJ(c);
1164
1165 spin_lock(&c->wbuf_dwork_lock);
1166 c->wbuf_queued = 0;
1167 spin_unlock(&c->wbuf_dwork_lock);
1168
1169 if (!(sb->s_flags & MS_RDONLY)) {
1170 jffs2_dbg(1, "%s()\n", __func__);
1171 jffs2_flush_wbuf_gc(c, 0);
1172 }
1173}
1174
1175void jffs2_dirty_trigger(struct jffs2_sb_info *c)
1176{
1177 struct super_block *sb = OFNI_BS_2SFFJ(c);
1178 unsigned long delay;
1179
1180 if (sb->s_flags & MS_RDONLY)
1181 return;
1182
1183 spin_lock(&c->wbuf_dwork_lock);
1184 if (!c->wbuf_queued) {
1185 jffs2_dbg(1, "%s()\n", __func__);
1186 delay = msecs_to_jiffies(dirty_writeback_interval * 10);
1187 queue_delayed_work(system_long_wq, &c->wbuf_dwork, delay);
1188 c->wbuf_queued = 1;
1189 }
1190 spin_unlock(&c->wbuf_dwork_lock);
1191}
1192
1151int jffs2_nand_flash_setup(struct jffs2_sb_info *c) 1193int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1152{ 1194{
1153 struct nand_ecclayout *oinfo = c->mtd->ecclayout; 1195 struct nand_ecclayout *oinfo = c->mtd->ecclayout;
@@ -1169,6 +1211,8 @@ int jffs2_nand_flash_setup(struct jffs2_sb_info *c)
1169 1211
1170 /* Initialise write buffer */ 1212 /* Initialise write buffer */
1171 init_rwsem(&c->wbuf_sem); 1213 init_rwsem(&c->wbuf_sem);
1214 spin_lock_init(&c->wbuf_dwork_lock);
1215 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1172 c->wbuf_pagesize = c->mtd->writesize; 1216 c->wbuf_pagesize = c->mtd->writesize;
1173 c->wbuf_ofs = 0xFFFFFFFF; 1217 c->wbuf_ofs = 0xFFFFFFFF;
1174 1218
@@ -1207,8 +1251,8 @@ int jffs2_dataflash_setup(struct jffs2_sb_info *c) {
1207 1251
1208 /* Initialize write buffer */ 1252 /* Initialize write buffer */
1209 init_rwsem(&c->wbuf_sem); 1253 init_rwsem(&c->wbuf_sem);
1210 1254 spin_lock_init(&c->wbuf_dwork_lock);
1211 1255 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1212 c->wbuf_pagesize = c->mtd->erasesize; 1256 c->wbuf_pagesize = c->mtd->erasesize;
1213 1257
1214 /* Find a suitable c->sector_size 1258 /* Find a suitable c->sector_size
@@ -1267,6 +1311,9 @@ int jffs2_nor_wbuf_flash_setup(struct jffs2_sb_info *c) {
1267 1311
1268 /* Initialize write buffer */ 1312 /* Initialize write buffer */
1269 init_rwsem(&c->wbuf_sem); 1313 init_rwsem(&c->wbuf_sem);
1314 spin_lock_init(&c->wbuf_dwork_lock);
1315 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1316
1270 c->wbuf_pagesize = c->mtd->writesize; 1317 c->wbuf_pagesize = c->mtd->writesize;
1271 c->wbuf_ofs = 0xFFFFFFFF; 1318 c->wbuf_ofs = 0xFFFFFFFF;
1272 1319
@@ -1299,6 +1346,8 @@ int jffs2_ubivol_setup(struct jffs2_sb_info *c) {
1299 return 0; 1346 return 0;
1300 1347
1301 init_rwsem(&c->wbuf_sem); 1348 init_rwsem(&c->wbuf_sem);
1349 spin_lock_init(&c->wbuf_dwork_lock);
1350 INIT_DELAYED_WORK(&c->wbuf_dwork, delayed_wbuf_sync);
1302 1351
1303 c->wbuf_pagesize = c->mtd->writesize; 1352 c->wbuf_pagesize = c->mtd->writesize;
1304 c->wbuf_ofs = 0xFFFFFFFF; 1353 c->wbuf_ofs = 0xFFFFFFFF;
diff --git a/fs/jffs2/xattr.c b/fs/jffs2/xattr.c
index b55b803eddcb..3034e970eb9a 100644
--- a/fs/jffs2/xattr.c
+++ b/fs/jffs2/xattr.c
@@ -11,6 +11,8 @@
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13 13
14#define JFFS2_XATTR_IS_CORRUPTED 1
15
14#include <linux/kernel.h> 16#include <linux/kernel.h>
15#include <linux/slab.h> 17#include <linux/slab.h>
16#include <linux/fs.h> 18#include <linux/fs.h>
@@ -153,7 +155,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
153 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", 155 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
154 offset, je32_to_cpu(rx.hdr_crc), crc); 156 offset, je32_to_cpu(rx.hdr_crc), crc);
155 xd->flags |= JFFS2_XFLAGS_INVALID; 157 xd->flags |= JFFS2_XFLAGS_INVALID;
156 return -EIO; 158 return JFFS2_XATTR_IS_CORRUPTED;
157 } 159 }
158 totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len)); 160 totlen = PAD(sizeof(rx) + rx.name_len + 1 + je16_to_cpu(rx.value_len));
159 if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK 161 if (je16_to_cpu(rx.magic) != JFFS2_MAGIC_BITMASK
@@ -169,7 +171,7 @@ static int do_verify_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_dat
169 je32_to_cpu(rx.xid), xd->xid, 171 je32_to_cpu(rx.xid), xd->xid,
170 je32_to_cpu(rx.version), xd->version); 172 je32_to_cpu(rx.version), xd->version);
171 xd->flags |= JFFS2_XFLAGS_INVALID; 173 xd->flags |= JFFS2_XFLAGS_INVALID;
172 return -EIO; 174 return JFFS2_XATTR_IS_CORRUPTED;
173 } 175 }
174 xd->xprefix = rx.xprefix; 176 xd->xprefix = rx.xprefix;
175 xd->name_len = rx.name_len; 177 xd->name_len = rx.name_len;
@@ -227,12 +229,12 @@ static int do_load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum
227 data[xd->name_len] = '\0'; 229 data[xd->name_len] = '\0';
228 crc = crc32(0, data, length); 230 crc = crc32(0, data, length);
229 if (crc != xd->data_crc) { 231 if (crc != xd->data_crc) {
230 JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XREF)" 232 JFFS2_WARNING("node CRC failed (JFFS2_NODETYPE_XATTR)"
231 " at %#08x, read: 0x%08x calculated: 0x%08x\n", 233 " at %#08x, read: 0x%08x calculated: 0x%08x\n",
232 ref_offset(xd->node), xd->data_crc, crc); 234 ref_offset(xd->node), xd->data_crc, crc);
233 kfree(data); 235 kfree(data);
234 xd->flags |= JFFS2_XFLAGS_INVALID; 236 xd->flags |= JFFS2_XFLAGS_INVALID;
235 return -EIO; 237 return JFFS2_XATTR_IS_CORRUPTED;
236 } 238 }
237 239
238 xd->flags |= JFFS2_XFLAGS_HOT; 240 xd->flags |= JFFS2_XFLAGS_HOT;
@@ -270,7 +272,7 @@ static int load_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datum *x
270 if (xd->xname) 272 if (xd->xname)
271 return 0; 273 return 0;
272 if (xd->flags & JFFS2_XFLAGS_INVALID) 274 if (xd->flags & JFFS2_XFLAGS_INVALID)
273 return -EIO; 275 return JFFS2_XATTR_IS_CORRUPTED;
274 if (unlikely(is_xattr_datum_unchecked(c, xd))) 276 if (unlikely(is_xattr_datum_unchecked(c, xd)))
275 rc = do_verify_xattr_datum(c, xd); 277 rc = do_verify_xattr_datum(c, xd);
276 if (!rc) 278 if (!rc)
@@ -435,6 +437,8 @@ static void unrefer_xattr_datum(struct jffs2_sb_info *c, struct jffs2_xattr_datu
435 * is called to release xattr related objects when unmounting. 437 * is called to release xattr related objects when unmounting.
436 * check_xattr_ref_inode(c, ic) 438 * check_xattr_ref_inode(c, ic)
437 * is used to confirm inode does not have duplicate xattr name/value pair. 439 * is used to confirm inode does not have duplicate xattr name/value pair.
440 * jffs2_xattr_do_crccheck_inode(c, ic)
441 * is used to force xattr data integrity check during the initial gc scan.
438 * -------------------------------------------------- */ 442 * -------------------------------------------------- */
439static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref) 443static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref)
440{ 444{
@@ -462,7 +466,7 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
462 if (crc != je32_to_cpu(rr.node_crc)) { 466 if (crc != je32_to_cpu(rr.node_crc)) {
463 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n", 467 JFFS2_ERROR("node CRC failed at %#08x, read=%#08x, calc=%#08x\n",
464 offset, je32_to_cpu(rr.node_crc), crc); 468 offset, je32_to_cpu(rr.node_crc), crc);
465 return -EIO; 469 return JFFS2_XATTR_IS_CORRUPTED;
466 } 470 }
467 if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK 471 if (je16_to_cpu(rr.magic) != JFFS2_MAGIC_BITMASK
468 || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF 472 || je16_to_cpu(rr.nodetype) != JFFS2_NODETYPE_XREF
@@ -472,7 +476,7 @@ static int verify_xattr_ref(struct jffs2_sb_info *c, struct jffs2_xattr_ref *ref
472 offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK, 476 offset, je16_to_cpu(rr.magic), JFFS2_MAGIC_BITMASK,
473 je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF, 477 je16_to_cpu(rr.nodetype), JFFS2_NODETYPE_XREF,
474 je32_to_cpu(rr.totlen), PAD(sizeof(rr))); 478 je32_to_cpu(rr.totlen), PAD(sizeof(rr)));
475 return -EIO; 479 return JFFS2_XATTR_IS_CORRUPTED;
476 } 480 }
477 ref->ino = je32_to_cpu(rr.ino); 481 ref->ino = je32_to_cpu(rr.ino);
478 ref->xid = je32_to_cpu(rr.xid); 482 ref->xid = je32_to_cpu(rr.xid);
@@ -682,6 +686,11 @@ static int check_xattr_ref_inode(struct jffs2_sb_info *c, struct jffs2_inode_cac
682 return rc; 686 return rc;
683} 687}
684 688
689void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic)
690{
691 check_xattr_ref_inode(c, ic);
692}
693
685/* -------- xattr subsystem functions --------------- 694/* -------- xattr subsystem functions ---------------
686 * jffs2_init_xattr_subsystem(c) 695 * jffs2_init_xattr_subsystem(c)
687 * is used to initialize semaphore and list_head, and some variables. 696 * is used to initialize semaphore and list_head, and some variables.
diff --git a/fs/jffs2/xattr.h b/fs/jffs2/xattr.h
index 7be4beb306f3..467ff376ee26 100644
--- a/fs/jffs2/xattr.h
+++ b/fs/jffs2/xattr.h
@@ -77,6 +77,7 @@ extern void jffs2_clear_xattr_subsystem(struct jffs2_sb_info *c);
77extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c, 77extern struct jffs2_xattr_datum *jffs2_setup_xattr_datum(struct jffs2_sb_info *c,
78 uint32_t xid, uint32_t version); 78 uint32_t xid, uint32_t version);
79 79
80extern void jffs2_xattr_do_crccheck_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
80extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); 81extern void jffs2_xattr_delete_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
81extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic); 82extern void jffs2_xattr_free_inode(struct jffs2_sb_info *c, struct jffs2_inode_cache *ic);
82 83
@@ -108,6 +109,7 @@ extern ssize_t jffs2_listxattr(struct dentry *, char *, size_t);
108#define jffs2_build_xattr_subsystem(c) 109#define jffs2_build_xattr_subsystem(c)
109#define jffs2_clear_xattr_subsystem(c) 110#define jffs2_clear_xattr_subsystem(c)
110 111
112#define jffs2_xattr_do_crccheck_inode(c, ic)
111#define jffs2_xattr_delete_inode(c, ic) 113#define jffs2_xattr_delete_inode(c, ic)
112#define jffs2_xattr_free_inode(c, ic) 114#define jffs2_xattr_free_inode(c, ic)
113#define jffs2_verify_xattr(c) (1) 115#define jffs2_verify_xattr(c) (1)
diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c
index ba1dc2eebd1e..ca0a08001449 100644
--- a/fs/lockd/clntlock.c
+++ b/fs/lockd/clntlock.c
@@ -56,7 +56,7 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
56 u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4; 56 u32 nlm_version = (nlm_init->nfs_version == 2) ? 1 : 4;
57 int status; 57 int status;
58 58
59 status = lockd_up(); 59 status = lockd_up(nlm_init->net);
60 if (status < 0) 60 if (status < 0)
61 return ERR_PTR(status); 61 return ERR_PTR(status);
62 62
@@ -65,7 +65,7 @@ struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init)
65 nlm_init->hostname, nlm_init->noresvport, 65 nlm_init->hostname, nlm_init->noresvport,
66 nlm_init->net); 66 nlm_init->net);
67 if (host == NULL) { 67 if (host == NULL) {
68 lockd_down(); 68 lockd_down(nlm_init->net);
69 return ERR_PTR(-ENOLCK); 69 return ERR_PTR(-ENOLCK);
70 } 70 }
71 71
@@ -80,8 +80,10 @@ EXPORT_SYMBOL_GPL(nlmclnt_init);
80 */ 80 */
81void nlmclnt_done(struct nlm_host *host) 81void nlmclnt_done(struct nlm_host *host)
82{ 82{
83 struct net *net = host->net;
84
83 nlmclnt_release_host(host); 85 nlmclnt_release_host(host);
84 lockd_down(); 86 lockd_down(net);
85} 87}
86EXPORT_SYMBOL_GPL(nlmclnt_done); 88EXPORT_SYMBOL_GPL(nlmclnt_done);
87 89
@@ -220,11 +222,12 @@ reclaimer(void *ptr)
220 struct nlm_wait *block; 222 struct nlm_wait *block;
221 struct file_lock *fl, *next; 223 struct file_lock *fl, *next;
222 u32 nsmstate; 224 u32 nsmstate;
225 struct net *net = host->net;
223 226
224 allow_signal(SIGKILL); 227 allow_signal(SIGKILL);
225 228
226 down_write(&host->h_rwsem); 229 down_write(&host->h_rwsem);
227 lockd_up(); /* note: this cannot fail as lockd is already running */ 230 lockd_up(net); /* note: this cannot fail as lockd is already running */
228 231
229 dprintk("lockd: reclaiming locks for host %s\n", host->h_name); 232 dprintk("lockd: reclaiming locks for host %s\n", host->h_name);
230 233
@@ -275,6 +278,6 @@ restart:
275 278
276 /* Release host handle after use */ 279 /* Release host handle after use */
277 nlmclnt_release_host(host); 280 nlmclnt_release_host(host);
278 lockd_down(); 281 lockd_down(net);
279 return 0; 282 return 0;
280} 283}
diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
index f49b9afc4436..80938fda67e0 100644
--- a/fs/lockd/svc.c
+++ b/fs/lockd/svc.c
@@ -251,39 +251,40 @@ out_err:
251 return err; 251 return err;
252} 252}
253 253
254static int lockd_up_net(struct net *net) 254static int lockd_up_net(struct svc_serv *serv, struct net *net)
255{ 255{
256 struct lockd_net *ln = net_generic(net, lockd_net_id); 256 struct lockd_net *ln = net_generic(net, lockd_net_id);
257 struct svc_serv *serv = nlmsvc_rqst->rq_server;
258 int error; 257 int error;
259 258
260 if (ln->nlmsvc_users) 259 if (ln->nlmsvc_users++)
261 return 0; 260 return 0;
262 261
263 error = svc_rpcb_setup(serv, net); 262 error = svc_bind(serv, net);
264 if (error) 263 if (error)
265 goto err_rpcb; 264 goto err_bind;
266 265
267 error = make_socks(serv, net); 266 error = make_socks(serv, net);
268 if (error < 0) 267 if (error < 0)
269 goto err_socks; 268 goto err_socks;
269 dprintk("lockd_up_net: per-net data created; net=%p\n", net);
270 return 0; 270 return 0;
271 271
272err_socks: 272err_socks:
273 svc_rpcb_cleanup(serv, net); 273 svc_rpcb_cleanup(serv, net);
274err_rpcb: 274err_bind:
275 ln->nlmsvc_users--;
275 return error; 276 return error;
276} 277}
277 278
278static void lockd_down_net(struct net *net) 279static void lockd_down_net(struct svc_serv *serv, struct net *net)
279{ 280{
280 struct lockd_net *ln = net_generic(net, lockd_net_id); 281 struct lockd_net *ln = net_generic(net, lockd_net_id);
281 struct svc_serv *serv = nlmsvc_rqst->rq_server;
282 282
283 if (ln->nlmsvc_users) { 283 if (ln->nlmsvc_users) {
284 if (--ln->nlmsvc_users == 0) { 284 if (--ln->nlmsvc_users == 0) {
285 nlm_shutdown_hosts_net(net); 285 nlm_shutdown_hosts_net(net);
286 svc_shutdown_net(serv, net); 286 svc_shutdown_net(serv, net);
287 dprintk("lockd_down_net: per-net data destroyed; net=%p\n", net);
287 } 288 }
288 } else { 289 } else {
289 printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n", 290 printk(KERN_ERR "lockd_down_net: no users! task=%p, net=%p\n",
@@ -292,22 +293,60 @@ static void lockd_down_net(struct net *net)
292 } 293 }
293} 294}
294 295
295/* 296static int lockd_start_svc(struct svc_serv *serv)
296 * Bring up the lockd process if it's not already up. 297{
297 */ 298 int error;
298int lockd_up(void) 299
300 if (nlmsvc_rqst)
301 return 0;
302
303 /*
304 * Create the kernel thread and wait for it to start.
305 */
306 nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE);
307 if (IS_ERR(nlmsvc_rqst)) {
308 error = PTR_ERR(nlmsvc_rqst);
309 printk(KERN_WARNING
310 "lockd_up: svc_rqst allocation failed, error=%d\n",
311 error);
312 goto out_rqst;
313 }
314
315 svc_sock_update_bufs(serv);
316 serv->sv_maxconn = nlm_max_connections;
317
318 nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
319 if (IS_ERR(nlmsvc_task)) {
320 error = PTR_ERR(nlmsvc_task);
321 printk(KERN_WARNING
322 "lockd_up: kthread_run failed, error=%d\n", error);
323 goto out_task;
324 }
325 dprintk("lockd_up: service started\n");
326 return 0;
327
328out_task:
329 svc_exit_thread(nlmsvc_rqst);
330 nlmsvc_task = NULL;
331out_rqst:
332 nlmsvc_rqst = NULL;
333 return error;
334}
335
336static struct svc_serv *lockd_create_svc(void)
299{ 337{
300 struct svc_serv *serv; 338 struct svc_serv *serv;
301 int error = 0;
302 struct net *net = current->nsproxy->net_ns;
303 339
304 mutex_lock(&nlmsvc_mutex);
305 /* 340 /*
306 * Check whether we're already up and running. 341 * Check whether we're already up and running.
307 */ 342 */
308 if (nlmsvc_rqst) { 343 if (nlmsvc_rqst) {
309 error = lockd_up_net(net); 344 /*
310 goto out; 345 * Note: increase service usage, because later in case of error
346 * svc_destroy() will be called.
347 */
348 svc_get(nlmsvc_rqst->rq_server);
349 return nlmsvc_rqst->rq_server;
311 } 350 }
312 351
313 /* 352 /*
@@ -318,59 +357,53 @@ int lockd_up(void)
318 printk(KERN_WARNING 357 printk(KERN_WARNING
319 "lockd_up: no pid, %d users??\n", nlmsvc_users); 358 "lockd_up: no pid, %d users??\n", nlmsvc_users);
320 359
321 error = -ENOMEM;
322 serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL); 360 serv = svc_create(&nlmsvc_program, LOCKD_BUFSIZE, NULL);
323 if (!serv) { 361 if (!serv) {
324 printk(KERN_WARNING "lockd_up: create service failed\n"); 362 printk(KERN_WARNING "lockd_up: create service failed\n");
325 goto out; 363 return ERR_PTR(-ENOMEM);
326 } 364 }
365 dprintk("lockd_up: service created\n");
366 return serv;
367}
327 368
328 error = make_socks(serv, net); 369/*
329 if (error < 0) 370 * Bring up the lockd process if it's not already up.
330 goto destroy_and_out; 371 */
372int lockd_up(struct net *net)
373{
374 struct svc_serv *serv;
375 int error;
331 376
332 /* 377 mutex_lock(&nlmsvc_mutex);
333 * Create the kernel thread and wait for it to start. 378
334 */ 379 serv = lockd_create_svc();
335 nlmsvc_rqst = svc_prepare_thread(serv, &serv->sv_pools[0], NUMA_NO_NODE); 380 if (IS_ERR(serv)) {
336 if (IS_ERR(nlmsvc_rqst)) { 381 error = PTR_ERR(serv);
337 error = PTR_ERR(nlmsvc_rqst); 382 goto err_create;
338 nlmsvc_rqst = NULL;
339 printk(KERN_WARNING
340 "lockd_up: svc_rqst allocation failed, error=%d\n",
341 error);
342 goto destroy_and_out;
343 } 383 }
344 384
345 svc_sock_update_bufs(serv); 385 error = lockd_up_net(serv, net);
346 serv->sv_maxconn = nlm_max_connections; 386 if (error < 0)
387 goto err_net;
347 388
348 nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name); 389 error = lockd_start_svc(serv);
349 if (IS_ERR(nlmsvc_task)) { 390 if (error < 0)
350 error = PTR_ERR(nlmsvc_task); 391 goto err_start;
351 svc_exit_thread(nlmsvc_rqst);
352 nlmsvc_task = NULL;
353 nlmsvc_rqst = NULL;
354 printk(KERN_WARNING
355 "lockd_up: kthread_run failed, error=%d\n", error);
356 goto destroy_and_out;
357 }
358 392
393 nlmsvc_users++;
359 /* 394 /*
360 * Note: svc_serv structures have an initial use count of 1, 395 * Note: svc_serv structures have an initial use count of 1,
361 * so we exit through here on both success and failure. 396 * so we exit through here on both success and failure.
362 */ 397 */
363destroy_and_out: 398err_net:
364 svc_destroy(serv); 399 svc_destroy(serv);
365out: 400err_create:
366 if (!error) {
367 struct lockd_net *ln = net_generic(net, lockd_net_id);
368
369 ln->nlmsvc_users++;
370 nlmsvc_users++;
371 }
372 mutex_unlock(&nlmsvc_mutex); 401 mutex_unlock(&nlmsvc_mutex);
373 return error; 402 return error;
403
404err_start:
405 lockd_down_net(serv, net);
406 goto err_net;
374} 407}
375EXPORT_SYMBOL_GPL(lockd_up); 408EXPORT_SYMBOL_GPL(lockd_up);
376 409
@@ -378,14 +411,13 @@ EXPORT_SYMBOL_GPL(lockd_up);
378 * Decrement the user count and bring down lockd if we're the last. 411 * Decrement the user count and bring down lockd if we're the last.
379 */ 412 */
380void 413void
381lockd_down(void) 414lockd_down(struct net *net)
382{ 415{
383 mutex_lock(&nlmsvc_mutex); 416 mutex_lock(&nlmsvc_mutex);
417 lockd_down_net(nlmsvc_rqst->rq_server, net);
384 if (nlmsvc_users) { 418 if (nlmsvc_users) {
385 if (--nlmsvc_users) { 419 if (--nlmsvc_users)
386 lockd_down_net(current->nsproxy->net_ns);
387 goto out; 420 goto out;
388 }
389 } else { 421 } else {
390 printk(KERN_ERR "lockd_down: no users! task=%p\n", 422 printk(KERN_ERR "lockd_down: no users! task=%p\n",
391 nlmsvc_task); 423 nlmsvc_task);
@@ -397,7 +429,9 @@ lockd_down(void)
397 BUG(); 429 BUG();
398 } 430 }
399 kthread_stop(nlmsvc_task); 431 kthread_stop(nlmsvc_task);
432 dprintk("lockd_down: service stopped\n");
400 svc_exit_thread(nlmsvc_rqst); 433 svc_exit_thread(nlmsvc_rqst);
434 dprintk("lockd_down: service destroyed\n");
401 nlmsvc_task = NULL; 435 nlmsvc_task = NULL;
402 nlmsvc_rqst = NULL; 436 nlmsvc_rqst = NULL;
403out: 437out:
diff --git a/fs/locks.c b/fs/locks.c
index 4f441e46cef4..814c51d0de47 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -1636,12 +1636,13 @@ EXPORT_SYMBOL(flock_lock_file_wait);
1636SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd) 1636SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1637{ 1637{
1638 struct file *filp; 1638 struct file *filp;
1639 int fput_needed;
1639 struct file_lock *lock; 1640 struct file_lock *lock;
1640 int can_sleep, unlock; 1641 int can_sleep, unlock;
1641 int error; 1642 int error;
1642 1643
1643 error = -EBADF; 1644 error = -EBADF;
1644 filp = fget(fd); 1645 filp = fget_light(fd, &fput_needed);
1645 if (!filp) 1646 if (!filp)
1646 goto out; 1647 goto out;
1647 1648
@@ -1674,7 +1675,7 @@ SYSCALL_DEFINE2(flock, unsigned int, fd, unsigned int, cmd)
1674 locks_free_lock(lock); 1675 locks_free_lock(lock);
1675 1676
1676 out_putf: 1677 out_putf:
1677 fput(filp); 1678 fput_light(filp, fput_needed);
1678 out: 1679 out:
1679 return error; 1680 return error;
1680} 1681}
diff --git a/fs/namei.c b/fs/namei.c
index c651f02c9fec..7d694194024a 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -449,7 +449,7 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
449 mntget(nd->path.mnt); 449 mntget(nd->path.mnt);
450 450
451 rcu_read_unlock(); 451 rcu_read_unlock();
452 br_read_unlock(vfsmount_lock); 452 br_read_unlock(&vfsmount_lock);
453 nd->flags &= ~LOOKUP_RCU; 453 nd->flags &= ~LOOKUP_RCU;
454 return 0; 454 return 0;
455 455
@@ -507,14 +507,14 @@ static int complete_walk(struct nameidata *nd)
507 if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) { 507 if (unlikely(!__d_rcu_to_refcount(dentry, nd->seq))) {
508 spin_unlock(&dentry->d_lock); 508 spin_unlock(&dentry->d_lock);
509 rcu_read_unlock(); 509 rcu_read_unlock();
510 br_read_unlock(vfsmount_lock); 510 br_read_unlock(&vfsmount_lock);
511 return -ECHILD; 511 return -ECHILD;
512 } 512 }
513 BUG_ON(nd->inode != dentry->d_inode); 513 BUG_ON(nd->inode != dentry->d_inode);
514 spin_unlock(&dentry->d_lock); 514 spin_unlock(&dentry->d_lock);
515 mntget(nd->path.mnt); 515 mntget(nd->path.mnt);
516 rcu_read_unlock(); 516 rcu_read_unlock();
517 br_read_unlock(vfsmount_lock); 517 br_read_unlock(&vfsmount_lock);
518 } 518 }
519 519
520 if (likely(!(nd->flags & LOOKUP_JUMPED))) 520 if (likely(!(nd->flags & LOOKUP_JUMPED)))
@@ -681,15 +681,15 @@ int follow_up(struct path *path)
681 struct mount *parent; 681 struct mount *parent;
682 struct dentry *mountpoint; 682 struct dentry *mountpoint;
683 683
684 br_read_lock(vfsmount_lock); 684 br_read_lock(&vfsmount_lock);
685 parent = mnt->mnt_parent; 685 parent = mnt->mnt_parent;
686 if (&parent->mnt == path->mnt) { 686 if (&parent->mnt == path->mnt) {
687 br_read_unlock(vfsmount_lock); 687 br_read_unlock(&vfsmount_lock);
688 return 0; 688 return 0;
689 } 689 }
690 mntget(&parent->mnt); 690 mntget(&parent->mnt);
691 mountpoint = dget(mnt->mnt_mountpoint); 691 mountpoint = dget(mnt->mnt_mountpoint);
692 br_read_unlock(vfsmount_lock); 692 br_read_unlock(&vfsmount_lock);
693 dput(path->dentry); 693 dput(path->dentry);
694 path->dentry = mountpoint; 694 path->dentry = mountpoint;
695 mntput(path->mnt); 695 mntput(path->mnt);
@@ -947,7 +947,7 @@ failed:
947 if (!(nd->flags & LOOKUP_ROOT)) 947 if (!(nd->flags & LOOKUP_ROOT))
948 nd->root.mnt = NULL; 948 nd->root.mnt = NULL;
949 rcu_read_unlock(); 949 rcu_read_unlock();
950 br_read_unlock(vfsmount_lock); 950 br_read_unlock(&vfsmount_lock);
951 return -ECHILD; 951 return -ECHILD;
952} 952}
953 953
@@ -1125,8 +1125,8 @@ static struct dentry *__lookup_hash(struct qstr *name,
1125 * small and for now I'd prefer to have fast path as straight as possible. 1125 * small and for now I'd prefer to have fast path as straight as possible.
1126 * It _is_ time-critical. 1126 * It _is_ time-critical.
1127 */ 1127 */
1128static int do_lookup(struct nameidata *nd, struct qstr *name, 1128static int lookup_fast(struct nameidata *nd, struct qstr *name,
1129 struct path *path, struct inode **inode) 1129 struct path *path, struct inode **inode)
1130{ 1130{
1131 struct vfsmount *mnt = nd->path.mnt; 1131 struct vfsmount *mnt = nd->path.mnt;
1132 struct dentry *dentry, *parent = nd->path.dentry; 1132 struct dentry *dentry, *parent = nd->path.dentry;
@@ -1208,7 +1208,7 @@ unlazy:
1208 goto need_lookup; 1208 goto need_lookup;
1209 } 1209 }
1210 } 1210 }
1211done: 1211
1212 path->mnt = mnt; 1212 path->mnt = mnt;
1213 path->dentry = dentry; 1213 path->dentry = dentry;
1214 err = follow_managed(path, nd->flags); 1214 err = follow_managed(path, nd->flags);
@@ -1222,6 +1222,17 @@ done:
1222 return 0; 1222 return 0;
1223 1223
1224need_lookup: 1224need_lookup:
1225 return 1;
1226}
1227
1228/* Fast lookup failed, do it the slow way */
1229static int lookup_slow(struct nameidata *nd, struct qstr *name,
1230 struct path *path)
1231{
1232 struct dentry *dentry, *parent;
1233 int err;
1234
1235 parent = nd->path.dentry;
1225 BUG_ON(nd->inode != parent->d_inode); 1236 BUG_ON(nd->inode != parent->d_inode);
1226 1237
1227 mutex_lock(&parent->d_inode->i_mutex); 1238 mutex_lock(&parent->d_inode->i_mutex);
@@ -1229,7 +1240,16 @@ need_lookup:
1229 mutex_unlock(&parent->d_inode->i_mutex); 1240 mutex_unlock(&parent->d_inode->i_mutex);
1230 if (IS_ERR(dentry)) 1241 if (IS_ERR(dentry))
1231 return PTR_ERR(dentry); 1242 return PTR_ERR(dentry);
1232 goto done; 1243 path->mnt = nd->path.mnt;
1244 path->dentry = dentry;
1245 err = follow_managed(path, nd->flags);
1246 if (unlikely(err < 0)) {
1247 path_put_conditional(path, nd);
1248 return err;
1249 }
1250 if (err)
1251 nd->flags |= LOOKUP_JUMPED;
1252 return 0;
1233} 1253}
1234 1254
1235static inline int may_lookup(struct nameidata *nd) 1255static inline int may_lookup(struct nameidata *nd)
@@ -1265,7 +1285,7 @@ static void terminate_walk(struct nameidata *nd)
1265 if (!(nd->flags & LOOKUP_ROOT)) 1285 if (!(nd->flags & LOOKUP_ROOT))
1266 nd->root.mnt = NULL; 1286 nd->root.mnt = NULL;
1267 rcu_read_unlock(); 1287 rcu_read_unlock();
1268 br_read_unlock(vfsmount_lock); 1288 br_read_unlock(&vfsmount_lock);
1269 } 1289 }
1270} 1290}
1271 1291
@@ -1301,21 +1321,26 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
1301 */ 1321 */
1302 if (unlikely(type != LAST_NORM)) 1322 if (unlikely(type != LAST_NORM))
1303 return handle_dots(nd, type); 1323 return handle_dots(nd, type);
1304 err = do_lookup(nd, name, path, &inode); 1324 err = lookup_fast(nd, name, path, &inode);
1305 if (unlikely(err)) { 1325 if (unlikely(err)) {
1306 terminate_walk(nd); 1326 if (err < 0)
1307 return err; 1327 goto out_err;
1308 } 1328
1309 if (!inode) { 1329 err = lookup_slow(nd, name, path);
1310 path_to_nameidata(path, nd); 1330 if (err < 0)
1311 terminate_walk(nd); 1331 goto out_err;
1312 return -ENOENT; 1332
1333 inode = path->dentry->d_inode;
1313 } 1334 }
1335 err = -ENOENT;
1336 if (!inode)
1337 goto out_path_put;
1338
1314 if (should_follow_link(inode, follow)) { 1339 if (should_follow_link(inode, follow)) {
1315 if (nd->flags & LOOKUP_RCU) { 1340 if (nd->flags & LOOKUP_RCU) {
1316 if (unlikely(unlazy_walk(nd, path->dentry))) { 1341 if (unlikely(unlazy_walk(nd, path->dentry))) {
1317 terminate_walk(nd); 1342 err = -ECHILD;
1318 return -ECHILD; 1343 goto out_err;
1319 } 1344 }
1320 } 1345 }
1321 BUG_ON(inode != path->dentry->d_inode); 1346 BUG_ON(inode != path->dentry->d_inode);
@@ -1324,6 +1349,12 @@ static inline int walk_component(struct nameidata *nd, struct path *path,
1324 path_to_nameidata(path, nd); 1349 path_to_nameidata(path, nd);
1325 nd->inode = inode; 1350 nd->inode = inode;
1326 return 0; 1351 return 0;
1352
1353out_path_put:
1354 path_to_nameidata(path, nd);
1355out_err:
1356 terminate_walk(nd);
1357 return err;
1327} 1358}
1328 1359
1329/* 1360/*
@@ -1620,7 +1651,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
1620 nd->path = nd->root; 1651 nd->path = nd->root;
1621 nd->inode = inode; 1652 nd->inode = inode;
1622 if (flags & LOOKUP_RCU) { 1653 if (flags & LOOKUP_RCU) {
1623 br_read_lock(vfsmount_lock); 1654 br_read_lock(&vfsmount_lock);
1624 rcu_read_lock(); 1655 rcu_read_lock();
1625 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); 1656 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1626 } else { 1657 } else {
@@ -1633,7 +1664,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
1633 1664
1634 if (*name=='/') { 1665 if (*name=='/') {
1635 if (flags & LOOKUP_RCU) { 1666 if (flags & LOOKUP_RCU) {
1636 br_read_lock(vfsmount_lock); 1667 br_read_lock(&vfsmount_lock);
1637 rcu_read_lock(); 1668 rcu_read_lock();
1638 set_root_rcu(nd); 1669 set_root_rcu(nd);
1639 } else { 1670 } else {
@@ -1646,7 +1677,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
1646 struct fs_struct *fs = current->fs; 1677 struct fs_struct *fs = current->fs;
1647 unsigned seq; 1678 unsigned seq;
1648 1679
1649 br_read_lock(vfsmount_lock); 1680 br_read_lock(&vfsmount_lock);
1650 rcu_read_lock(); 1681 rcu_read_lock();
1651 1682
1652 do { 1683 do {
@@ -1682,7 +1713,7 @@ static int path_init(int dfd, const char *name, unsigned int flags,
1682 if (fput_needed) 1713 if (fput_needed)
1683 *fp = file; 1714 *fp = file;
1684 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq); 1715 nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
1685 br_read_lock(vfsmount_lock); 1716 br_read_lock(&vfsmount_lock);
1686 rcu_read_lock(); 1717 rcu_read_lock();
1687 } else { 1718 } else {
1688 path_get(&file->f_path); 1719 path_get(&file->f_path);
@@ -2169,6 +2200,10 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2169 int want_write = 0; 2200 int want_write = 0;
2170 int acc_mode = op->acc_mode; 2201 int acc_mode = op->acc_mode;
2171 struct file *filp; 2202 struct file *filp;
2203 struct inode *inode;
2204 int symlink_ok = 0;
2205 struct path save_parent = { .dentry = NULL, .mnt = NULL };
2206 bool retried = false;
2172 int error; 2207 int error;
2173 2208
2174 nd->flags &= ~LOOKUP_PARENT; 2209 nd->flags &= ~LOOKUP_PARENT;
@@ -2200,30 +2235,23 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2200 } 2235 }
2201 2236
2202 if (!(open_flag & O_CREAT)) { 2237 if (!(open_flag & O_CREAT)) {
2203 int symlink_ok = 0;
2204 if (nd->last.name[nd->last.len]) 2238 if (nd->last.name[nd->last.len])
2205 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY; 2239 nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
2206 if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW)) 2240 if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
2207 symlink_ok = 1; 2241 symlink_ok = 1;
2208 /* we _can_ be in RCU mode here */ 2242 /* we _can_ be in RCU mode here */
2209 error = walk_component(nd, path, &nd->last, LAST_NORM, 2243 error = lookup_fast(nd, &nd->last, path, &inode);
2210 !symlink_ok); 2244 if (unlikely(error)) {
2211 if (error < 0) 2245 if (error < 0)
2212 return ERR_PTR(error); 2246 goto exit;
2213 if (error) /* symlink */
2214 return NULL;
2215 /* sayonara */
2216 error = complete_walk(nd);
2217 if (error)
2218 return ERR_PTR(error);
2219 2247
2220 error = -ENOTDIR; 2248 error = lookup_slow(nd, &nd->last, path);
2221 if (nd->flags & LOOKUP_DIRECTORY) { 2249 if (error < 0)
2222 if (!nd->inode->i_op->lookup)
2223 goto exit; 2250 goto exit;
2251
2252 inode = path->dentry->d_inode;
2224 } 2253 }
2225 audit_inode(pathname, nd->path.dentry); 2254 goto finish_lookup;
2226 goto ok;
2227 } 2255 }
2228 2256
2229 /* create side of things */ 2257 /* create side of things */
@@ -2241,6 +2269,7 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2241 if (nd->last.name[nd->last.len]) 2269 if (nd->last.name[nd->last.len])
2242 goto exit; 2270 goto exit;
2243 2271
2272retry_lookup:
2244 mutex_lock(&dir->d_inode->i_mutex); 2273 mutex_lock(&dir->d_inode->i_mutex);
2245 2274
2246 dentry = lookup_hash(nd); 2275 dentry = lookup_hash(nd);
@@ -2302,22 +2331,49 @@ static struct file *do_last(struct nameidata *nd, struct path *path,
2302 if (error) 2331 if (error)
2303 nd->flags |= LOOKUP_JUMPED; 2332 nd->flags |= LOOKUP_JUMPED;
2304 2333
2334 BUG_ON(nd->flags & LOOKUP_RCU);
2335 inode = path->dentry->d_inode;
2336finish_lookup:
2337 /* we _can_ be in RCU mode here */
2305 error = -ENOENT; 2338 error = -ENOENT;
2306 if (!path->dentry->d_inode) 2339 if (!inode) {
2307 goto exit_dput; 2340 path_to_nameidata(path, nd);
2341 goto exit;
2342 }
2308 2343
2309 if (path->dentry->d_inode->i_op->follow_link) 2344 if (should_follow_link(inode, !symlink_ok)) {
2345 if (nd->flags & LOOKUP_RCU) {
2346 if (unlikely(unlazy_walk(nd, path->dentry))) {
2347 error = -ECHILD;
2348 goto exit;
2349 }
2350 }
2351 BUG_ON(inode != path->dentry->d_inode);
2310 return NULL; 2352 return NULL;
2353 }
2311 2354
2312 path_to_nameidata(path, nd); 2355 if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) {
2313 nd->inode = path->dentry->d_inode; 2356 path_to_nameidata(path, nd);
2357 } else {
2358 save_parent.dentry = nd->path.dentry;
2359 save_parent.mnt = mntget(path->mnt);
2360 nd->path.dentry = path->dentry;
2361
2362 }
2363 nd->inode = inode;
2314 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */ 2364 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
2315 error = complete_walk(nd); 2365 error = complete_walk(nd);
2316 if (error) 2366 if (error) {
2367 path_put(&save_parent);
2317 return ERR_PTR(error); 2368 return ERR_PTR(error);
2369 }
2318 error = -EISDIR; 2370 error = -EISDIR;
2319 if (S_ISDIR(nd->inode->i_mode)) 2371 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
2372 goto exit;
2373 error = -ENOTDIR;
2374 if ((nd->flags & LOOKUP_DIRECTORY) && !nd->inode->i_op->lookup)
2320 goto exit; 2375 goto exit;
2376 audit_inode(pathname, nd->path.dentry);
2321ok: 2377ok:
2322 if (!S_ISREG(nd->inode->i_mode)) 2378 if (!S_ISREG(nd->inode->i_mode))
2323 will_truncate = 0; 2379 will_truncate = 0;
@@ -2333,6 +2389,20 @@ common:
2333 if (error) 2389 if (error)
2334 goto exit; 2390 goto exit;
2335 filp = nameidata_to_filp(nd); 2391 filp = nameidata_to_filp(nd);
2392 if (filp == ERR_PTR(-EOPENSTALE) && save_parent.dentry && !retried) {
2393 BUG_ON(save_parent.dentry != dir);
2394 path_put(&nd->path);
2395 nd->path = save_parent;
2396 nd->inode = dir->d_inode;
2397 save_parent.mnt = NULL;
2398 save_parent.dentry = NULL;
2399 if (want_write) {
2400 mnt_drop_write(nd->path.mnt);
2401 want_write = 0;
2402 }
2403 retried = true;
2404 goto retry_lookup;
2405 }
2336 if (!IS_ERR(filp)) { 2406 if (!IS_ERR(filp)) {
2337 error = ima_file_check(filp, op->acc_mode); 2407 error = ima_file_check(filp, op->acc_mode);
2338 if (error) { 2408 if (error) {
@@ -2352,7 +2422,8 @@ common:
2352out: 2422out:
2353 if (want_write) 2423 if (want_write)
2354 mnt_drop_write(nd->path.mnt); 2424 mnt_drop_write(nd->path.mnt);
2355 path_put(&nd->path); 2425 path_put(&save_parent);
2426 terminate_walk(nd);
2356 return filp; 2427 return filp;
2357 2428
2358exit_mutex_unlock: 2429exit_mutex_unlock:
@@ -2415,6 +2486,12 @@ out:
2415 if (base) 2486 if (base)
2416 fput(base); 2487 fput(base);
2417 release_open_intent(nd); 2488 release_open_intent(nd);
2489 if (filp == ERR_PTR(-EOPENSTALE)) {
2490 if (flags & LOOKUP_RCU)
2491 filp = ERR_PTR(-ECHILD);
2492 else
2493 filp = ERR_PTR(-ESTALE);
2494 }
2418 return filp; 2495 return filp;
2419 2496
2420out_filp: 2497out_filp:
diff --git a/fs/namespace.c b/fs/namespace.c
index e6081996c9a2..1e4a5fe3d7b7 100644
--- a/fs/namespace.c
+++ b/fs/namespace.c
@@ -397,7 +397,7 @@ static int mnt_make_readonly(struct mount *mnt)
397{ 397{
398 int ret = 0; 398 int ret = 0;
399 399
400 br_write_lock(vfsmount_lock); 400 br_write_lock(&vfsmount_lock);
401 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 401 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
402 /* 402 /*
403 * After storing MNT_WRITE_HOLD, we'll read the counters. This store 403 * After storing MNT_WRITE_HOLD, we'll read the counters. This store
@@ -431,15 +431,15 @@ static int mnt_make_readonly(struct mount *mnt)
431 */ 431 */
432 smp_wmb(); 432 smp_wmb();
433 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 433 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
434 br_write_unlock(vfsmount_lock); 434 br_write_unlock(&vfsmount_lock);
435 return ret; 435 return ret;
436} 436}
437 437
438static void __mnt_unmake_readonly(struct mount *mnt) 438static void __mnt_unmake_readonly(struct mount *mnt)
439{ 439{
440 br_write_lock(vfsmount_lock); 440 br_write_lock(&vfsmount_lock);
441 mnt->mnt.mnt_flags &= ~MNT_READONLY; 441 mnt->mnt.mnt_flags &= ~MNT_READONLY;
442 br_write_unlock(vfsmount_lock); 442 br_write_unlock(&vfsmount_lock);
443} 443}
444 444
445int sb_prepare_remount_readonly(struct super_block *sb) 445int sb_prepare_remount_readonly(struct super_block *sb)
@@ -451,7 +451,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
451 if (atomic_long_read(&sb->s_remove_count)) 451 if (atomic_long_read(&sb->s_remove_count))
452 return -EBUSY; 452 return -EBUSY;
453 453
454 br_write_lock(vfsmount_lock); 454 br_write_lock(&vfsmount_lock);
455 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) { 455 list_for_each_entry(mnt, &sb->s_mounts, mnt_instance) {
456 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) { 456 if (!(mnt->mnt.mnt_flags & MNT_READONLY)) {
457 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD; 457 mnt->mnt.mnt_flags |= MNT_WRITE_HOLD;
@@ -473,7 +473,7 @@ int sb_prepare_remount_readonly(struct super_block *sb)
473 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD) 473 if (mnt->mnt.mnt_flags & MNT_WRITE_HOLD)
474 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD; 474 mnt->mnt.mnt_flags &= ~MNT_WRITE_HOLD;
475 } 475 }
476 br_write_unlock(vfsmount_lock); 476 br_write_unlock(&vfsmount_lock);
477 477
478 return err; 478 return err;
479} 479}
@@ -522,14 +522,14 @@ struct vfsmount *lookup_mnt(struct path *path)
522{ 522{
523 struct mount *child_mnt; 523 struct mount *child_mnt;
524 524
525 br_read_lock(vfsmount_lock); 525 br_read_lock(&vfsmount_lock);
526 child_mnt = __lookup_mnt(path->mnt, path->dentry, 1); 526 child_mnt = __lookup_mnt(path->mnt, path->dentry, 1);
527 if (child_mnt) { 527 if (child_mnt) {
528 mnt_add_count(child_mnt, 1); 528 mnt_add_count(child_mnt, 1);
529 br_read_unlock(vfsmount_lock); 529 br_read_unlock(&vfsmount_lock);
530 return &child_mnt->mnt; 530 return &child_mnt->mnt;
531 } else { 531 } else {
532 br_read_unlock(vfsmount_lock); 532 br_read_unlock(&vfsmount_lock);
533 return NULL; 533 return NULL;
534 } 534 }
535} 535}
@@ -714,9 +714,9 @@ vfs_kern_mount(struct file_system_type *type, int flags, const char *name, void
714 mnt->mnt.mnt_sb = root->d_sb; 714 mnt->mnt.mnt_sb = root->d_sb;
715 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 715 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
716 mnt->mnt_parent = mnt; 716 mnt->mnt_parent = mnt;
717 br_write_lock(vfsmount_lock); 717 br_write_lock(&vfsmount_lock);
718 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts); 718 list_add_tail(&mnt->mnt_instance, &root->d_sb->s_mounts);
719 br_write_unlock(vfsmount_lock); 719 br_write_unlock(&vfsmount_lock);
720 return &mnt->mnt; 720 return &mnt->mnt;
721} 721}
722EXPORT_SYMBOL_GPL(vfs_kern_mount); 722EXPORT_SYMBOL_GPL(vfs_kern_mount);
@@ -745,9 +745,9 @@ static struct mount *clone_mnt(struct mount *old, struct dentry *root,
745 mnt->mnt.mnt_root = dget(root); 745 mnt->mnt.mnt_root = dget(root);
746 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 746 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
747 mnt->mnt_parent = mnt; 747 mnt->mnt_parent = mnt;
748 br_write_lock(vfsmount_lock); 748 br_write_lock(&vfsmount_lock);
749 list_add_tail(&mnt->mnt_instance, &sb->s_mounts); 749 list_add_tail(&mnt->mnt_instance, &sb->s_mounts);
750 br_write_unlock(vfsmount_lock); 750 br_write_unlock(&vfsmount_lock);
751 751
752 if (flag & CL_SLAVE) { 752 if (flag & CL_SLAVE) {
753 list_add(&mnt->mnt_slave, &old->mnt_slave_list); 753 list_add(&mnt->mnt_slave, &old->mnt_slave_list);
@@ -803,35 +803,36 @@ static void mntput_no_expire(struct mount *mnt)
803{ 803{
804put_again: 804put_again:
805#ifdef CONFIG_SMP 805#ifdef CONFIG_SMP
806 br_read_lock(vfsmount_lock); 806 br_read_lock(&vfsmount_lock);
807 if (likely(atomic_read(&mnt->mnt_longterm))) { 807 if (likely(atomic_read(&mnt->mnt_longterm))) {
808 mnt_add_count(mnt, -1); 808 mnt_add_count(mnt, -1);
809 br_read_unlock(vfsmount_lock); 809 br_read_unlock(&vfsmount_lock);
810 return; 810 return;
811 } 811 }
812 br_read_unlock(vfsmount_lock); 812 br_read_unlock(&vfsmount_lock);
813 813
814 br_write_lock(vfsmount_lock); 814 br_write_lock(&vfsmount_lock);
815 mnt_add_count(mnt, -1); 815 mnt_add_count(mnt, -1);
816 if (mnt_get_count(mnt)) { 816 if (mnt_get_count(mnt)) {
817 br_write_unlock(vfsmount_lock); 817 br_write_unlock(&vfsmount_lock);
818 return; 818 return;
819 } 819 }
820#else 820#else
821 mnt_add_count(mnt, -1); 821 mnt_add_count(mnt, -1);
822 if (likely(mnt_get_count(mnt))) 822 if (likely(mnt_get_count(mnt)))
823 return; 823 return;
824 br_write_lock(vfsmount_lock); 824 br_write_lock(&vfsmount_lock);
825#endif 825#endif
826 if (unlikely(mnt->mnt_pinned)) { 826 if (unlikely(mnt->mnt_pinned)) {
827 mnt_add_count(mnt, mnt->mnt_pinned + 1); 827 mnt_add_count(mnt, mnt->mnt_pinned + 1);
828 mnt->mnt_pinned = 0; 828 mnt->mnt_pinned = 0;
829 br_write_unlock(vfsmount_lock); 829 br_write_unlock(&vfsmount_lock);
830 acct_auto_close_mnt(&mnt->mnt); 830 acct_auto_close_mnt(&mnt->mnt);
831 goto put_again; 831 goto put_again;
832 } 832 }
833
833 list_del(&mnt->mnt_instance); 834 list_del(&mnt->mnt_instance);
834 br_write_unlock(vfsmount_lock); 835 br_write_unlock(&vfsmount_lock);
835 mntfree(mnt); 836 mntfree(mnt);
836} 837}
837 838
@@ -857,21 +858,21 @@ EXPORT_SYMBOL(mntget);
857 858
858void mnt_pin(struct vfsmount *mnt) 859void mnt_pin(struct vfsmount *mnt)
859{ 860{
860 br_write_lock(vfsmount_lock); 861 br_write_lock(&vfsmount_lock);
861 real_mount(mnt)->mnt_pinned++; 862 real_mount(mnt)->mnt_pinned++;
862 br_write_unlock(vfsmount_lock); 863 br_write_unlock(&vfsmount_lock);
863} 864}
864EXPORT_SYMBOL(mnt_pin); 865EXPORT_SYMBOL(mnt_pin);
865 866
866void mnt_unpin(struct vfsmount *m) 867void mnt_unpin(struct vfsmount *m)
867{ 868{
868 struct mount *mnt = real_mount(m); 869 struct mount *mnt = real_mount(m);
869 br_write_lock(vfsmount_lock); 870 br_write_lock(&vfsmount_lock);
870 if (mnt->mnt_pinned) { 871 if (mnt->mnt_pinned) {
871 mnt_add_count(mnt, 1); 872 mnt_add_count(mnt, 1);
872 mnt->mnt_pinned--; 873 mnt->mnt_pinned--;
873 } 874 }
874 br_write_unlock(vfsmount_lock); 875 br_write_unlock(&vfsmount_lock);
875} 876}
876EXPORT_SYMBOL(mnt_unpin); 877EXPORT_SYMBOL(mnt_unpin);
877 878
@@ -988,12 +989,12 @@ int may_umount_tree(struct vfsmount *m)
988 BUG_ON(!m); 989 BUG_ON(!m);
989 990
990 /* write lock needed for mnt_get_count */ 991 /* write lock needed for mnt_get_count */
991 br_write_lock(vfsmount_lock); 992 br_write_lock(&vfsmount_lock);
992 for (p = mnt; p; p = next_mnt(p, mnt)) { 993 for (p = mnt; p; p = next_mnt(p, mnt)) {
993 actual_refs += mnt_get_count(p); 994 actual_refs += mnt_get_count(p);
994 minimum_refs += 2; 995 minimum_refs += 2;
995 } 996 }
996 br_write_unlock(vfsmount_lock); 997 br_write_unlock(&vfsmount_lock);
997 998
998 if (actual_refs > minimum_refs) 999 if (actual_refs > minimum_refs)
999 return 0; 1000 return 0;
@@ -1020,10 +1021,10 @@ int may_umount(struct vfsmount *mnt)
1020{ 1021{
1021 int ret = 1; 1022 int ret = 1;
1022 down_read(&namespace_sem); 1023 down_read(&namespace_sem);
1023 br_write_lock(vfsmount_lock); 1024 br_write_lock(&vfsmount_lock);
1024 if (propagate_mount_busy(real_mount(mnt), 2)) 1025 if (propagate_mount_busy(real_mount(mnt), 2))
1025 ret = 0; 1026 ret = 0;
1026 br_write_unlock(vfsmount_lock); 1027 br_write_unlock(&vfsmount_lock);
1027 up_read(&namespace_sem); 1028 up_read(&namespace_sem);
1028 return ret; 1029 return ret;
1029} 1030}
@@ -1040,13 +1041,13 @@ void release_mounts(struct list_head *head)
1040 struct dentry *dentry; 1041 struct dentry *dentry;
1041 struct mount *m; 1042 struct mount *m;
1042 1043
1043 br_write_lock(vfsmount_lock); 1044 br_write_lock(&vfsmount_lock);
1044 dentry = mnt->mnt_mountpoint; 1045 dentry = mnt->mnt_mountpoint;
1045 m = mnt->mnt_parent; 1046 m = mnt->mnt_parent;
1046 mnt->mnt_mountpoint = mnt->mnt.mnt_root; 1047 mnt->mnt_mountpoint = mnt->mnt.mnt_root;
1047 mnt->mnt_parent = mnt; 1048 mnt->mnt_parent = mnt;
1048 m->mnt_ghosts--; 1049 m->mnt_ghosts--;
1049 br_write_unlock(vfsmount_lock); 1050 br_write_unlock(&vfsmount_lock);
1050 dput(dentry); 1051 dput(dentry);
1051 mntput(&m->mnt); 1052 mntput(&m->mnt);
1052 } 1053 }
@@ -1073,8 +1074,9 @@ void umount_tree(struct mount *mnt, int propagate, struct list_head *kill)
1073 list_del_init(&p->mnt_expire); 1074 list_del_init(&p->mnt_expire);
1074 list_del_init(&p->mnt_list); 1075 list_del_init(&p->mnt_list);
1075 __touch_mnt_namespace(p->mnt_ns); 1076 __touch_mnt_namespace(p->mnt_ns);
1077 if (p->mnt_ns)
1078 __mnt_make_shortterm(p);
1076 p->mnt_ns = NULL; 1079 p->mnt_ns = NULL;
1077 __mnt_make_shortterm(p);
1078 list_del_init(&p->mnt_child); 1080 list_del_init(&p->mnt_child);
1079 if (mnt_has_parent(p)) { 1081 if (mnt_has_parent(p)) {
1080 p->mnt_parent->mnt_ghosts++; 1082 p->mnt_parent->mnt_ghosts++;
@@ -1112,12 +1114,12 @@ static int do_umount(struct mount *mnt, int flags)
1112 * probably don't strictly need the lock here if we examined 1114 * probably don't strictly need the lock here if we examined
1113 * all race cases, but it's a slowpath. 1115 * all race cases, but it's a slowpath.
1114 */ 1116 */
1115 br_write_lock(vfsmount_lock); 1117 br_write_lock(&vfsmount_lock);
1116 if (mnt_get_count(mnt) != 2) { 1118 if (mnt_get_count(mnt) != 2) {
1117 br_write_unlock(vfsmount_lock); 1119 br_write_unlock(&vfsmount_lock);
1118 return -EBUSY; 1120 return -EBUSY;
1119 } 1121 }
1120 br_write_unlock(vfsmount_lock); 1122 br_write_unlock(&vfsmount_lock);
1121 1123
1122 if (!xchg(&mnt->mnt_expiry_mark, 1)) 1124 if (!xchg(&mnt->mnt_expiry_mark, 1))
1123 return -EAGAIN; 1125 return -EAGAIN;
@@ -1159,7 +1161,7 @@ static int do_umount(struct mount *mnt, int flags)
1159 } 1161 }
1160 1162
1161 down_write(&namespace_sem); 1163 down_write(&namespace_sem);
1162 br_write_lock(vfsmount_lock); 1164 br_write_lock(&vfsmount_lock);
1163 event++; 1165 event++;
1164 1166
1165 if (!(flags & MNT_DETACH)) 1167 if (!(flags & MNT_DETACH))
@@ -1171,7 +1173,7 @@ static int do_umount(struct mount *mnt, int flags)
1171 umount_tree(mnt, 1, &umount_list); 1173 umount_tree(mnt, 1, &umount_list);
1172 retval = 0; 1174 retval = 0;
1173 } 1175 }
1174 br_write_unlock(vfsmount_lock); 1176 br_write_unlock(&vfsmount_lock);
1175 up_write(&namespace_sem); 1177 up_write(&namespace_sem);
1176 release_mounts(&umount_list); 1178 release_mounts(&umount_list);
1177 return retval; 1179 return retval;
@@ -1286,19 +1288,19 @@ struct mount *copy_tree(struct mount *mnt, struct dentry *dentry,
1286 q = clone_mnt(p, p->mnt.mnt_root, flag); 1288 q = clone_mnt(p, p->mnt.mnt_root, flag);
1287 if (!q) 1289 if (!q)
1288 goto Enomem; 1290 goto Enomem;
1289 br_write_lock(vfsmount_lock); 1291 br_write_lock(&vfsmount_lock);
1290 list_add_tail(&q->mnt_list, &res->mnt_list); 1292 list_add_tail(&q->mnt_list, &res->mnt_list);
1291 attach_mnt(q, &path); 1293 attach_mnt(q, &path);
1292 br_write_unlock(vfsmount_lock); 1294 br_write_unlock(&vfsmount_lock);
1293 } 1295 }
1294 } 1296 }
1295 return res; 1297 return res;
1296Enomem: 1298Enomem:
1297 if (res) { 1299 if (res) {
1298 LIST_HEAD(umount_list); 1300 LIST_HEAD(umount_list);
1299 br_write_lock(vfsmount_lock); 1301 br_write_lock(&vfsmount_lock);
1300 umount_tree(res, 0, &umount_list); 1302 umount_tree(res, 0, &umount_list);
1301 br_write_unlock(vfsmount_lock); 1303 br_write_unlock(&vfsmount_lock);
1302 release_mounts(&umount_list); 1304 release_mounts(&umount_list);
1303 } 1305 }
1304 return NULL; 1306 return NULL;
@@ -1318,9 +1320,9 @@ void drop_collected_mounts(struct vfsmount *mnt)
1318{ 1320{
1319 LIST_HEAD(umount_list); 1321 LIST_HEAD(umount_list);
1320 down_write(&namespace_sem); 1322 down_write(&namespace_sem);
1321 br_write_lock(vfsmount_lock); 1323 br_write_lock(&vfsmount_lock);
1322 umount_tree(real_mount(mnt), 0, &umount_list); 1324 umount_tree(real_mount(mnt), 0, &umount_list);
1323 br_write_unlock(vfsmount_lock); 1325 br_write_unlock(&vfsmount_lock);
1324 up_write(&namespace_sem); 1326 up_write(&namespace_sem);
1325 release_mounts(&umount_list); 1327 release_mounts(&umount_list);
1326} 1328}
@@ -1448,7 +1450,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1448 if (err) 1450 if (err)
1449 goto out_cleanup_ids; 1451 goto out_cleanup_ids;
1450 1452
1451 br_write_lock(vfsmount_lock); 1453 br_write_lock(&vfsmount_lock);
1452 1454
1453 if (IS_MNT_SHARED(dest_mnt)) { 1455 if (IS_MNT_SHARED(dest_mnt)) {
1454 for (p = source_mnt; p; p = next_mnt(p, source_mnt)) 1456 for (p = source_mnt; p; p = next_mnt(p, source_mnt))
@@ -1467,7 +1469,7 @@ static int attach_recursive_mnt(struct mount *source_mnt,
1467 list_del_init(&child->mnt_hash); 1469 list_del_init(&child->mnt_hash);
1468 commit_tree(child); 1470 commit_tree(child);
1469 } 1471 }
1470 br_write_unlock(vfsmount_lock); 1472 br_write_unlock(&vfsmount_lock);
1471 1473
1472 return 0; 1474 return 0;
1473 1475
@@ -1565,10 +1567,10 @@ static int do_change_type(struct path *path, int flag)
1565 goto out_unlock; 1567 goto out_unlock;
1566 } 1568 }
1567 1569
1568 br_write_lock(vfsmount_lock); 1570 br_write_lock(&vfsmount_lock);
1569 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL)) 1571 for (m = mnt; m; m = (recurse ? next_mnt(m, mnt) : NULL))
1570 change_mnt_propagation(m, type); 1572 change_mnt_propagation(m, type);
1571 br_write_unlock(vfsmount_lock); 1573 br_write_unlock(&vfsmount_lock);
1572 1574
1573 out_unlock: 1575 out_unlock:
1574 up_write(&namespace_sem); 1576 up_write(&namespace_sem);
@@ -1617,9 +1619,9 @@ static int do_loopback(struct path *path, char *old_name,
1617 1619
1618 err = graft_tree(mnt, path); 1620 err = graft_tree(mnt, path);
1619 if (err) { 1621 if (err) {
1620 br_write_lock(vfsmount_lock); 1622 br_write_lock(&vfsmount_lock);
1621 umount_tree(mnt, 0, &umount_list); 1623 umount_tree(mnt, 0, &umount_list);
1622 br_write_unlock(vfsmount_lock); 1624 br_write_unlock(&vfsmount_lock);
1623 } 1625 }
1624out2: 1626out2:
1625 unlock_mount(path); 1627 unlock_mount(path);
@@ -1677,16 +1679,16 @@ static int do_remount(struct path *path, int flags, int mnt_flags,
1677 else 1679 else
1678 err = do_remount_sb(sb, flags, data, 0); 1680 err = do_remount_sb(sb, flags, data, 0);
1679 if (!err) { 1681 if (!err) {
1680 br_write_lock(vfsmount_lock); 1682 br_write_lock(&vfsmount_lock);
1681 mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK; 1683 mnt_flags |= mnt->mnt.mnt_flags & MNT_PROPAGATION_MASK;
1682 mnt->mnt.mnt_flags = mnt_flags; 1684 mnt->mnt.mnt_flags = mnt_flags;
1683 br_write_unlock(vfsmount_lock); 1685 br_write_unlock(&vfsmount_lock);
1684 } 1686 }
1685 up_write(&sb->s_umount); 1687 up_write(&sb->s_umount);
1686 if (!err) { 1688 if (!err) {
1687 br_write_lock(vfsmount_lock); 1689 br_write_lock(&vfsmount_lock);
1688 touch_mnt_namespace(mnt->mnt_ns); 1690 touch_mnt_namespace(mnt->mnt_ns);
1689 br_write_unlock(vfsmount_lock); 1691 br_write_unlock(&vfsmount_lock);
1690 } 1692 }
1691 return err; 1693 return err;
1692} 1694}
@@ -1893,9 +1895,9 @@ fail:
1893 /* remove m from any expiration list it may be on */ 1895 /* remove m from any expiration list it may be on */
1894 if (!list_empty(&mnt->mnt_expire)) { 1896 if (!list_empty(&mnt->mnt_expire)) {
1895 down_write(&namespace_sem); 1897 down_write(&namespace_sem);
1896 br_write_lock(vfsmount_lock); 1898 br_write_lock(&vfsmount_lock);
1897 list_del_init(&mnt->mnt_expire); 1899 list_del_init(&mnt->mnt_expire);
1898 br_write_unlock(vfsmount_lock); 1900 br_write_unlock(&vfsmount_lock);
1899 up_write(&namespace_sem); 1901 up_write(&namespace_sem);
1900 } 1902 }
1901 mntput(m); 1903 mntput(m);
@@ -1911,11 +1913,11 @@ fail:
1911void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list) 1913void mnt_set_expiry(struct vfsmount *mnt, struct list_head *expiry_list)
1912{ 1914{
1913 down_write(&namespace_sem); 1915 down_write(&namespace_sem);
1914 br_write_lock(vfsmount_lock); 1916 br_write_lock(&vfsmount_lock);
1915 1917
1916 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list); 1918 list_add_tail(&real_mount(mnt)->mnt_expire, expiry_list);
1917 1919
1918 br_write_unlock(vfsmount_lock); 1920 br_write_unlock(&vfsmount_lock);
1919 up_write(&namespace_sem); 1921 up_write(&namespace_sem);
1920} 1922}
1921EXPORT_SYMBOL(mnt_set_expiry); 1923EXPORT_SYMBOL(mnt_set_expiry);
@@ -1935,7 +1937,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
1935 return; 1937 return;
1936 1938
1937 down_write(&namespace_sem); 1939 down_write(&namespace_sem);
1938 br_write_lock(vfsmount_lock); 1940 br_write_lock(&vfsmount_lock);
1939 1941
1940 /* extract from the expiration list every vfsmount that matches the 1942 /* extract from the expiration list every vfsmount that matches the
1941 * following criteria: 1943 * following criteria:
@@ -1954,7 +1956,7 @@ void mark_mounts_for_expiry(struct list_head *mounts)
1954 touch_mnt_namespace(mnt->mnt_ns); 1956 touch_mnt_namespace(mnt->mnt_ns);
1955 umount_tree(mnt, 1, &umounts); 1957 umount_tree(mnt, 1, &umounts);
1956 } 1958 }
1957 br_write_unlock(vfsmount_lock); 1959 br_write_unlock(&vfsmount_lock);
1958 up_write(&namespace_sem); 1960 up_write(&namespace_sem);
1959 1961
1960 release_mounts(&umounts); 1962 release_mounts(&umounts);
@@ -2218,9 +2220,9 @@ void mnt_make_shortterm(struct vfsmount *m)
2218 struct mount *mnt = real_mount(m); 2220 struct mount *mnt = real_mount(m);
2219 if (atomic_add_unless(&mnt->mnt_longterm, -1, 1)) 2221 if (atomic_add_unless(&mnt->mnt_longterm, -1, 1))
2220 return; 2222 return;
2221 br_write_lock(vfsmount_lock); 2223 br_write_lock(&vfsmount_lock);
2222 atomic_dec(&mnt->mnt_longterm); 2224 atomic_dec(&mnt->mnt_longterm);
2223 br_write_unlock(vfsmount_lock); 2225 br_write_unlock(&vfsmount_lock);
2224#endif 2226#endif
2225} 2227}
2226 2228
@@ -2250,9 +2252,9 @@ static struct mnt_namespace *dup_mnt_ns(struct mnt_namespace *mnt_ns,
2250 return ERR_PTR(-ENOMEM); 2252 return ERR_PTR(-ENOMEM);
2251 } 2253 }
2252 new_ns->root = new; 2254 new_ns->root = new;
2253 br_write_lock(vfsmount_lock); 2255 br_write_lock(&vfsmount_lock);
2254 list_add_tail(&new_ns->list, &new->mnt_list); 2256 list_add_tail(&new_ns->list, &new->mnt_list);
2255 br_write_unlock(vfsmount_lock); 2257 br_write_unlock(&vfsmount_lock);
2256 2258
2257 /* 2259 /*
2258 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts 2260 * Second pass: switch the tsk->fs->* elements and mark new vfsmounts
@@ -2416,9 +2418,9 @@ bool is_path_reachable(struct mount *mnt, struct dentry *dentry,
2416int path_is_under(struct path *path1, struct path *path2) 2418int path_is_under(struct path *path1, struct path *path2)
2417{ 2419{
2418 int res; 2420 int res;
2419 br_read_lock(vfsmount_lock); 2421 br_read_lock(&vfsmount_lock);
2420 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2); 2422 res = is_path_reachable(real_mount(path1->mnt), path1->dentry, path2);
2421 br_read_unlock(vfsmount_lock); 2423 br_read_unlock(&vfsmount_lock);
2422 return res; 2424 return res;
2423} 2425}
2424EXPORT_SYMBOL(path_is_under); 2426EXPORT_SYMBOL(path_is_under);
@@ -2505,7 +2507,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2505 /* make sure we can reach put_old from new_root */ 2507 /* make sure we can reach put_old from new_root */
2506 if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new)) 2508 if (!is_path_reachable(real_mount(old.mnt), old.dentry, &new))
2507 goto out4; 2509 goto out4;
2508 br_write_lock(vfsmount_lock); 2510 br_write_lock(&vfsmount_lock);
2509 detach_mnt(new_mnt, &parent_path); 2511 detach_mnt(new_mnt, &parent_path);
2510 detach_mnt(root_mnt, &root_parent); 2512 detach_mnt(root_mnt, &root_parent);
2511 /* mount old root on put_old */ 2513 /* mount old root on put_old */
@@ -2513,7 +2515,7 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
2513 /* mount new_root on / */ 2515 /* mount new_root on / */
2514 attach_mnt(new_mnt, &root_parent); 2516 attach_mnt(new_mnt, &root_parent);
2515 touch_mnt_namespace(current->nsproxy->mnt_ns); 2517 touch_mnt_namespace(current->nsproxy->mnt_ns);
2516 br_write_unlock(vfsmount_lock); 2518 br_write_unlock(&vfsmount_lock);
2517 chroot_fs_refs(&root, &new); 2519 chroot_fs_refs(&root, &new);
2518 error = 0; 2520 error = 0;
2519out4: 2521out4:
@@ -2576,7 +2578,7 @@ void __init mnt_init(void)
2576 for (u = 0; u < HASH_SIZE; u++) 2578 for (u = 0; u < HASH_SIZE; u++)
2577 INIT_LIST_HEAD(&mount_hashtable[u]); 2579 INIT_LIST_HEAD(&mount_hashtable[u]);
2578 2580
2579 br_lock_init(vfsmount_lock); 2581 br_lock_init(&vfsmount_lock);
2580 2582
2581 err = sysfs_init(); 2583 err = sysfs_init();
2582 if (err) 2584 if (err)
@@ -2596,9 +2598,9 @@ void put_mnt_ns(struct mnt_namespace *ns)
2596 if (!atomic_dec_and_test(&ns->count)) 2598 if (!atomic_dec_and_test(&ns->count))
2597 return; 2599 return;
2598 down_write(&namespace_sem); 2600 down_write(&namespace_sem);
2599 br_write_lock(vfsmount_lock); 2601 br_write_lock(&vfsmount_lock);
2600 umount_tree(ns->root, 0, &umount_list); 2602 umount_tree(ns->root, 0, &umount_list);
2601 br_write_unlock(vfsmount_lock); 2603 br_write_unlock(&vfsmount_lock);
2602 up_write(&namespace_sem); 2604 up_write(&namespace_sem);
2603 release_mounts(&umount_list); 2605 release_mounts(&umount_list);
2604 kfree(ns); 2606 kfree(ns);
diff --git a/fs/ncpfs/file.c b/fs/ncpfs/file.c
index 3ff5fcc1528f..122e260247f5 100644
--- a/fs/ncpfs/file.c
+++ b/fs/ncpfs/file.c
@@ -221,6 +221,10 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
221 221
222 already_written = 0; 222 already_written = 0;
223 223
224 errno = file_update_time(file);
225 if (errno)
226 goto outrel;
227
224 bouncebuffer = vmalloc(bufsize); 228 bouncebuffer = vmalloc(bufsize);
225 if (!bouncebuffer) { 229 if (!bouncebuffer) {
226 errno = -EIO; /* -ENOMEM */ 230 errno = -EIO; /* -ENOMEM */
@@ -252,8 +256,6 @@ ncp_file_write(struct file *file, const char __user *buf, size_t count, loff_t *
252 } 256 }
253 vfree(bouncebuffer); 257 vfree(bouncebuffer);
254 258
255 file_update_time(file);
256
257 *ppos = pos; 259 *ppos = pos;
258 260
259 if (pos > i_size_read(inode)) { 261 if (pos > i_size_read(inode)) {
diff --git a/fs/ncpfs/ncp_fs_sb.h b/fs/ncpfs/ncp_fs_sb.h
index 4af803f13516..54cc0cdb3dcb 100644
--- a/fs/ncpfs/ncp_fs_sb.h
+++ b/fs/ncpfs/ncp_fs_sb.h
@@ -23,17 +23,17 @@ struct ncp_mount_data_kernel {
23 unsigned long flags; /* NCP_MOUNT_* flags */ 23 unsigned long flags; /* NCP_MOUNT_* flags */
24 unsigned int int_flags; /* internal flags */ 24 unsigned int int_flags; /* internal flags */
25#define NCP_IMOUNT_LOGGEDIN_POSSIBLE 0x0001 25#define NCP_IMOUNT_LOGGEDIN_POSSIBLE 0x0001
26 __kernel_uid32_t mounted_uid; /* Who may umount() this filesystem? */ 26 uid_t mounted_uid; /* Who may umount() this filesystem? */
27 struct pid *wdog_pid; /* Who cares for our watchdog packets? */ 27 struct pid *wdog_pid; /* Who cares for our watchdog packets? */
28 unsigned int ncp_fd; /* The socket to the ncp port */ 28 unsigned int ncp_fd; /* The socket to the ncp port */
29 unsigned int time_out; /* How long should I wait after 29 unsigned int time_out; /* How long should I wait after
30 sending a NCP request? */ 30 sending a NCP request? */
31 unsigned int retry_count; /* And how often should I retry? */ 31 unsigned int retry_count; /* And how often should I retry? */
32 unsigned char mounted_vol[NCP_VOLNAME_LEN + 1]; 32 unsigned char mounted_vol[NCP_VOLNAME_LEN + 1];
33 __kernel_uid32_t uid; 33 uid_t uid;
34 __kernel_gid32_t gid; 34 gid_t gid;
35 __kernel_mode_t file_mode; 35 umode_t file_mode;
36 __kernel_mode_t dir_mode; 36 umode_t dir_mode;
37 int info_fd; 37 int info_fd;
38}; 38};
39 39
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index eb95f5091c1a..23ff18fe080a 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -106,7 +106,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
106{ 106{
107 int ret; 107 int ret;
108 108
109 ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET, 109 ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET,
110 nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); 110 nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
111 if (ret <= 0) 111 if (ret <= 0)
112 goto out_err; 112 goto out_err;
@@ -114,7 +114,7 @@ nfs4_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
114 dprintk("NFS: Callback listener port = %u (af %u)\n", 114 dprintk("NFS: Callback listener port = %u (af %u)\n",
115 nfs_callback_tcpport, PF_INET); 115 nfs_callback_tcpport, PF_INET);
116 116
117 ret = svc_create_xprt(serv, "tcp", xprt->xprt_net, PF_INET6, 117 ret = svc_create_xprt(serv, "tcp", &init_net, PF_INET6,
118 nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS); 118 nfs_callback_set_tcpport, SVC_SOCK_ANONYMOUS);
119 if (ret > 0) { 119 if (ret > 0) {
120 nfs_callback_tcpport6 = ret; 120 nfs_callback_tcpport6 = ret;
@@ -183,7 +183,7 @@ nfs41_callback_up(struct svc_serv *serv, struct rpc_xprt *xprt)
183 * fore channel connection. 183 * fore channel connection.
184 * Returns the input port (0) and sets the svc_serv bc_xprt on success 184 * Returns the input port (0) and sets the svc_serv bc_xprt on success
185 */ 185 */
186 ret = svc_create_xprt(serv, "tcp-bc", xprt->xprt_net, PF_INET, 0, 186 ret = svc_create_xprt(serv, "tcp-bc", &init_net, PF_INET, 0,
187 SVC_SOCK_ANONYMOUS); 187 SVC_SOCK_ANONYMOUS);
188 if (ret < 0) { 188 if (ret < 0) {
189 rqstp = ERR_PTR(ret); 189 rqstp = ERR_PTR(ret);
@@ -253,6 +253,7 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
253 char svc_name[12]; 253 char svc_name[12];
254 int ret = 0; 254 int ret = 0;
255 int minorversion_setup; 255 int minorversion_setup;
256 struct net *net = &init_net;
256 257
257 mutex_lock(&nfs_callback_mutex); 258 mutex_lock(&nfs_callback_mutex);
258 if (cb_info->users++ || cb_info->task != NULL) { 259 if (cb_info->users++ || cb_info->task != NULL) {
@@ -265,6 +266,12 @@ int nfs_callback_up(u32 minorversion, struct rpc_xprt *xprt)
265 goto out_err; 266 goto out_err;
266 } 267 }
267 268
269 ret = svc_bind(serv, net);
270 if (ret < 0) {
271 printk(KERN_WARNING "NFS: bind callback service failed\n");
272 goto out_err;
273 }
274
268 minorversion_setup = nfs_minorversion_callback_svc_setup(minorversion, 275 minorversion_setup = nfs_minorversion_callback_svc_setup(minorversion,
269 serv, xprt, &rqstp, &callback_svc); 276 serv, xprt, &rqstp, &callback_svc);
270 if (!minorversion_setup) { 277 if (!minorversion_setup) {
@@ -306,6 +313,8 @@ out_err:
306 dprintk("NFS: Couldn't create callback socket or server thread; " 313 dprintk("NFS: Couldn't create callback socket or server thread; "
307 "err = %d\n", ret); 314 "err = %d\n", ret);
308 cb_info->users--; 315 cb_info->users--;
316 if (serv)
317 svc_shutdown_net(serv, net);
309 goto out; 318 goto out;
310} 319}
311 320
@@ -320,6 +329,7 @@ void nfs_callback_down(int minorversion)
320 cb_info->users--; 329 cb_info->users--;
321 if (cb_info->users == 0 && cb_info->task != NULL) { 330 if (cb_info->users == 0 && cb_info->task != NULL) {
322 kthread_stop(cb_info->task); 331 kthread_stop(cb_info->task);
332 svc_shutdown_net(cb_info->serv, &init_net);
323 svc_exit_thread(cb_info->rqst); 333 svc_exit_thread(cb_info->rqst);
324 cb_info->serv = NULL; 334 cb_info->serv = NULL;
325 cb_info->rqst = NULL; 335 cb_info->rqst = NULL;
@@ -332,7 +342,7 @@ void nfs_callback_down(int minorversion)
332int 342int
333check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp) 343check_gss_callback_principal(struct nfs_client *clp, struct svc_rqst *rqstp)
334{ 344{
335 char *p = svc_gss_principal(rqstp); 345 char *p = rqstp->rq_cred.cr_principal;
336 346
337 if (rqstp->rq_authop->flavour != RPC_AUTH_GSS) 347 if (rqstp->rq_authop->flavour != RPC_AUTH_GSS)
338 return 1; 348 return 1;
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 95bfc243992c..e64b01d2a338 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -455,9 +455,9 @@ static __be32 decode_cb_sequence_args(struct svc_rqst *rqstp,
455 args->csa_nrclists = ntohl(*p++); 455 args->csa_nrclists = ntohl(*p++);
456 args->csa_rclists = NULL; 456 args->csa_rclists = NULL;
457 if (args->csa_nrclists) { 457 if (args->csa_nrclists) {
458 args->csa_rclists = kmalloc(args->csa_nrclists * 458 args->csa_rclists = kmalloc_array(args->csa_nrclists,
459 sizeof(*args->csa_rclists), 459 sizeof(*args->csa_rclists),
460 GFP_KERNEL); 460 GFP_KERNEL);
461 if (unlikely(args->csa_rclists == NULL)) 461 if (unlikely(args->csa_rclists == NULL))
462 goto out; 462 goto out;
463 463
@@ -696,7 +696,7 @@ static __be32 encode_cb_sequence_res(struct svc_rqst *rqstp,
696 const struct cb_sequenceres *res) 696 const struct cb_sequenceres *res)
697{ 697{
698 __be32 *p; 698 __be32 *p;
699 unsigned status = res->csr_status; 699 __be32 status = res->csr_status;
700 700
701 if (unlikely(status != 0)) 701 if (unlikely(status != 0))
702 goto out; 702 goto out;
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 7d108753af81..f005b5bebdc7 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -207,7 +207,6 @@ error_0:
207static void nfs4_shutdown_session(struct nfs_client *clp) 207static void nfs4_shutdown_session(struct nfs_client *clp)
208{ 208{
209 if (nfs4_has_session(clp)) { 209 if (nfs4_has_session(clp)) {
210 nfs4_deviceid_purge_client(clp);
211 nfs4_destroy_session(clp->cl_session); 210 nfs4_destroy_session(clp->cl_session);
212 nfs4_destroy_clientid(clp); 211 nfs4_destroy_clientid(clp);
213 } 212 }
@@ -544,8 +543,6 @@ nfs_found_client(const struct nfs_client_initdata *cl_init,
544 543
545 smp_rmb(); 544 smp_rmb();
546 545
547 BUG_ON(clp->cl_cons_state != NFS_CS_READY);
548
549 dprintk("<-- %s found nfs_client %p for %s\n", 546 dprintk("<-- %s found nfs_client %p for %s\n",
550 __func__, clp, cl_init->hostname ?: ""); 547 __func__, clp, cl_init->hostname ?: "");
551 return clp; 548 return clp;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index 0989a2099688..f430057ff3b3 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -1354,10 +1354,10 @@ out:
1354} 1354}
1355 1355
1356#ifdef CONFIG_NFS_V4 1356#ifdef CONFIG_NFS_V4
1357static int nfs_open_revalidate(struct dentry *, struct nameidata *); 1357static int nfs4_lookup_revalidate(struct dentry *, struct nameidata *);
1358 1358
1359const struct dentry_operations nfs4_dentry_operations = { 1359const struct dentry_operations nfs4_dentry_operations = {
1360 .d_revalidate = nfs_open_revalidate, 1360 .d_revalidate = nfs4_lookup_revalidate,
1361 .d_delete = nfs_dentry_delete, 1361 .d_delete = nfs_dentry_delete,
1362 .d_iput = nfs_dentry_iput, 1362 .d_iput = nfs_dentry_iput,
1363 .d_automount = nfs_d_automount, 1363 .d_automount = nfs_d_automount,
@@ -1519,13 +1519,11 @@ no_open:
1519 return nfs_lookup(dir, dentry, nd); 1519 return nfs_lookup(dir, dentry, nd);
1520} 1520}
1521 1521
1522static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd) 1522static int nfs4_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
1523{ 1523{
1524 struct dentry *parent = NULL; 1524 struct dentry *parent = NULL;
1525 struct inode *inode; 1525 struct inode *inode;
1526 struct inode *dir; 1526 struct inode *dir;
1527 struct nfs_open_context *ctx;
1528 struct iattr attr;
1529 int openflags, ret = 0; 1527 int openflags, ret = 0;
1530 1528
1531 if (nd->flags & LOOKUP_RCU) 1529 if (nd->flags & LOOKUP_RCU)
@@ -1554,57 +1552,13 @@ static int nfs_open_revalidate(struct dentry *dentry, struct nameidata *nd)
1554 /* We cannot do exclusive creation on a positive dentry */ 1552 /* We cannot do exclusive creation on a positive dentry */
1555 if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL)) 1553 if ((openflags & (O_CREAT|O_EXCL)) == (O_CREAT|O_EXCL))
1556 goto no_open_dput; 1554 goto no_open_dput;
1557 /* We can't create new files here */
1558 openflags &= ~(O_CREAT|O_EXCL);
1559
1560 ctx = create_nfs_open_context(dentry, openflags);
1561 ret = PTR_ERR(ctx);
1562 if (IS_ERR(ctx))
1563 goto out;
1564 1555
1565 attr.ia_valid = ATTR_OPEN; 1556 /* Let f_op->open() actually open (and revalidate) the file */
1566 if (openflags & O_TRUNC) { 1557 ret = 1;
1567 attr.ia_valid |= ATTR_SIZE;
1568 attr.ia_size = 0;
1569 nfs_wb_all(inode);
1570 }
1571
1572 /*
1573 * Note: we're not holding inode->i_mutex and so may be racing with
1574 * operations that change the directory. We therefore save the
1575 * change attribute *before* we do the RPC call.
1576 */
1577 inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
1578 if (IS_ERR(inode)) {
1579 ret = PTR_ERR(inode);
1580 switch (ret) {
1581 case -EPERM:
1582 case -EACCES:
1583 case -EDQUOT:
1584 case -ENOSPC:
1585 case -EROFS:
1586 goto out_put_ctx;
1587 default:
1588 goto out_drop;
1589 }
1590 }
1591 iput(inode);
1592 if (inode != dentry->d_inode)
1593 goto out_drop;
1594 1558
1595 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
1596 ret = nfs_intent_set_file(nd, ctx);
1597 if (ret >= 0)
1598 ret = 1;
1599out: 1559out:
1600 dput(parent); 1560 dput(parent);
1601 return ret; 1561 return ret;
1602out_drop:
1603 d_drop(dentry);
1604 ret = 0;
1605out_put_ctx:
1606 put_nfs_open_context(ctx);
1607 goto out;
1608 1562
1609no_open_dput: 1563no_open_dput:
1610 dput(parent); 1564 dput(parent);
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 23d170bc44f4..9a4cbfc85d81 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -454,6 +454,12 @@ out:
454 return result; 454 return result;
455} 455}
456 456
457static void nfs_inode_dio_write_done(struct inode *inode)
458{
459 nfs_zap_mapping(inode, inode->i_mapping);
460 inode_dio_done(inode);
461}
462
457#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 463#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
458static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) 464static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
459{ 465{
@@ -484,6 +490,7 @@ static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
484 dreq->error = -EIO; 490 dreq->error = -EIO;
485 spin_unlock(cinfo.lock); 491 spin_unlock(cinfo.lock);
486 } 492 }
493 nfs_release_request(req);
487 } 494 }
488 nfs_pageio_complete(&desc); 495 nfs_pageio_complete(&desc);
489 496
@@ -517,9 +524,9 @@ static void nfs_direct_commit_complete(struct nfs_commit_data *data)
517 nfs_list_remove_request(req); 524 nfs_list_remove_request(req);
518 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) { 525 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
519 /* Note the rewrite will go through mds */ 526 /* Note the rewrite will go through mds */
520 kref_get(&req->wb_kref);
521 nfs_mark_request_commit(req, NULL, &cinfo); 527 nfs_mark_request_commit(req, NULL, &cinfo);
522 } 528 } else
529 nfs_release_request(req);
523 nfs_unlock_and_release_request(req); 530 nfs_unlock_and_release_request(req);
524 } 531 }
525 532
@@ -564,7 +571,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
564 nfs_direct_write_reschedule(dreq); 571 nfs_direct_write_reschedule(dreq);
565 break; 572 break;
566 default: 573 default:
567 nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping); 574 nfs_inode_dio_write_done(dreq->inode);
568 nfs_direct_complete(dreq); 575 nfs_direct_complete(dreq);
569 } 576 }
570} 577}
@@ -581,7 +588,7 @@ static void nfs_direct_write_schedule_work(struct work_struct *work)
581 588
582static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) 589static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
583{ 590{
584 nfs_zap_mapping(inode, inode->i_mapping); 591 nfs_inode_dio_write_done(inode);
585 nfs_direct_complete(dreq); 592 nfs_direct_complete(dreq);
586} 593}
587#endif 594#endif
@@ -710,12 +717,12 @@ static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
710 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) 717 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
711 bit = NFS_IOHDR_NEED_RESCHED; 718 bit = NFS_IOHDR_NEED_RESCHED;
712 else if (dreq->flags == 0) { 719 else if (dreq->flags == 0) {
713 memcpy(&dreq->verf, &req->wb_verf, 720 memcpy(&dreq->verf, hdr->verf,
714 sizeof(dreq->verf)); 721 sizeof(dreq->verf));
715 bit = NFS_IOHDR_NEED_COMMIT; 722 bit = NFS_IOHDR_NEED_COMMIT;
716 dreq->flags = NFS_ODIRECT_DO_COMMIT; 723 dreq->flags = NFS_ODIRECT_DO_COMMIT;
717 } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) { 724 } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
718 if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) { 725 if (memcmp(&dreq->verf, hdr->verf, sizeof(dreq->verf))) {
719 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 726 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
720 bit = NFS_IOHDR_NEED_RESCHED; 727 bit = NFS_IOHDR_NEED_RESCHED;
721 } else 728 } else
@@ -766,14 +773,16 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
766 loff_t pos) 773 loff_t pos)
767{ 774{
768 struct nfs_pageio_descriptor desc; 775 struct nfs_pageio_descriptor desc;
776 struct inode *inode = dreq->inode;
769 ssize_t result = 0; 777 ssize_t result = 0;
770 size_t requested_bytes = 0; 778 size_t requested_bytes = 0;
771 unsigned long seg; 779 unsigned long seg;
772 780
773 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_COND_STABLE, 781 nfs_pageio_init_write(&desc, inode, FLUSH_COND_STABLE,
774 &nfs_direct_write_completion_ops); 782 &nfs_direct_write_completion_ops);
775 desc.pg_dreq = dreq; 783 desc.pg_dreq = dreq;
776 get_dreq(dreq); 784 get_dreq(dreq);
785 atomic_inc(&inode->i_dio_count);
777 786
778 for (seg = 0; seg < nr_segs; seg++) { 787 for (seg = 0; seg < nr_segs; seg++) {
779 const struct iovec *vec = &iov[seg]; 788 const struct iovec *vec = &iov[seg];
@@ -793,6 +802,7 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
793 * generic layer handle the completion. 802 * generic layer handle the completion.
794 */ 803 */
795 if (requested_bytes == 0) { 804 if (requested_bytes == 0) {
805 inode_dio_done(inode);
796 nfs_direct_req_release(dreq); 806 nfs_direct_req_release(dreq);
797 return result < 0 ? result : -EIO; 807 return result < 0 ? result : -EIO;
798 } 808 }
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 56311ca5f9f8..a6708e6b438d 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -879,12 +879,81 @@ const struct file_operations nfs_file_operations = {
879static int 879static int
880nfs4_file_open(struct inode *inode, struct file *filp) 880nfs4_file_open(struct inode *inode, struct file *filp)
881{ 881{
882 struct nfs_open_context *ctx;
883 struct dentry *dentry = filp->f_path.dentry;
884 struct dentry *parent = NULL;
885 struct inode *dir;
886 unsigned openflags = filp->f_flags;
887 struct iattr attr;
888 int err;
889
890 BUG_ON(inode != dentry->d_inode);
882 /* 891 /*
883 * NFSv4 opens are handled in d_lookup and d_revalidate. If we get to 892 * If no cached dentry exists or if it's negative, NFSv4 handled the
884 * this point, then something is very wrong 893 * opens in ->lookup() or ->create().
894 *
895 * We only get this far for a cached positive dentry. We skipped
896 * revalidation, so handle it here by dropping the dentry and returning
897 * -EOPENSTALE. The VFS will retry the lookup/create/open.
885 */ 898 */
886 dprintk("NFS: %s called! inode=%p filp=%p\n", __func__, inode, filp); 899
887 return -ENOTDIR; 900 dprintk("NFS: open file(%s/%s)\n",
901 dentry->d_parent->d_name.name,
902 dentry->d_name.name);
903
904 if ((openflags & O_ACCMODE) == 3)
905 openflags--;
906
907 /* We can't create new files here */
908 openflags &= ~(O_CREAT|O_EXCL);
909
910 parent = dget_parent(dentry);
911 dir = parent->d_inode;
912
913 ctx = alloc_nfs_open_context(filp->f_path.dentry, filp->f_mode);
914 err = PTR_ERR(ctx);
915 if (IS_ERR(ctx))
916 goto out;
917
918 attr.ia_valid = ATTR_OPEN;
919 if (openflags & O_TRUNC) {
920 attr.ia_valid |= ATTR_SIZE;
921 attr.ia_size = 0;
922 nfs_wb_all(inode);
923 }
924
925 inode = NFS_PROTO(dir)->open_context(dir, ctx, openflags, &attr);
926 if (IS_ERR(inode)) {
927 err = PTR_ERR(inode);
928 switch (err) {
929 case -EPERM:
930 case -EACCES:
931 case -EDQUOT:
932 case -ENOSPC:
933 case -EROFS:
934 goto out_put_ctx;
935 default:
936 goto out_drop;
937 }
938 }
939 iput(inode);
940 if (inode != dentry->d_inode)
941 goto out_drop;
942
943 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
944 nfs_file_set_open_context(filp, ctx);
945 err = 0;
946
947out_put_ctx:
948 put_nfs_open_context(ctx);
949out:
950 dput(parent);
951 return err;
952
953out_drop:
954 d_drop(dentry);
955 err = -EOPENSTALE;
956 goto out_put_ctx;
888} 957}
889 958
890const struct file_operations nfs4_file_operations = { 959const struct file_operations nfs4_file_operations = {
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index b5b86a05059c..864c51e4b400 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -57,6 +57,11 @@ unsigned int nfs_idmap_cache_timeout = 600;
57static const struct cred *id_resolver_cache; 57static const struct cred *id_resolver_cache;
58static struct key_type key_type_id_resolver_legacy; 58static struct key_type key_type_id_resolver_legacy;
59 59
60struct idmap {
61 struct rpc_pipe *idmap_pipe;
62 struct key_construction *idmap_key_cons;
63 struct mutex idmap_mutex;
64};
60 65
61/** 66/**
62 * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields 67 * nfs_fattr_init_names - initialise the nfs_fattr owner_name/group_name fields
@@ -310,9 +315,11 @@ static ssize_t nfs_idmap_get_key(const char *name, size_t namelen,
310 name, namelen, type, data, 315 name, namelen, type, data,
311 data_size, NULL); 316 data_size, NULL);
312 if (ret < 0) { 317 if (ret < 0) {
318 mutex_lock(&idmap->idmap_mutex);
313 ret = nfs_idmap_request_key(&key_type_id_resolver_legacy, 319 ret = nfs_idmap_request_key(&key_type_id_resolver_legacy,
314 name, namelen, type, data, 320 name, namelen, type, data,
315 data_size, idmap); 321 data_size, idmap);
322 mutex_unlock(&idmap->idmap_mutex);
316 } 323 }
317 return ret; 324 return ret;
318} 325}
@@ -354,11 +361,6 @@ static int nfs_idmap_lookup_id(const char *name, size_t namelen, const char *typ
354/* idmap classic begins here */ 361/* idmap classic begins here */
355module_param(nfs_idmap_cache_timeout, int, 0644); 362module_param(nfs_idmap_cache_timeout, int, 0644);
356 363
357struct idmap {
358 struct rpc_pipe *idmap_pipe;
359 struct key_construction *idmap_key_cons;
360};
361
362enum { 364enum {
363 Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err 365 Opt_find_uid, Opt_find_gid, Opt_find_user, Opt_find_group, Opt_find_err
364}; 366};
@@ -469,6 +471,7 @@ nfs_idmap_new(struct nfs_client *clp)
469 return error; 471 return error;
470 } 472 }
471 idmap->idmap_pipe = pipe; 473 idmap->idmap_pipe = pipe;
474 mutex_init(&idmap->idmap_mutex);
472 475
473 clp->cl_idmap = idmap; 476 clp->cl_idmap = idmap;
474 return 0; 477 return 0;
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 2f6f78c4b42d..f7296983eba6 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -418,8 +418,10 @@ nfs_setattr(struct dentry *dentry, struct iattr *attr)
418 return 0; 418 return 0;
419 419
420 /* Write all dirty data */ 420 /* Write all dirty data */
421 if (S_ISREG(inode->i_mode)) 421 if (S_ISREG(inode->i_mode)) {
422 nfs_inode_dio_wait(inode);
422 nfs_wb_all(inode); 423 nfs_wb_all(inode);
424 }
423 425
424 fattr = nfs_alloc_fattr(); 426 fattr = nfs_alloc_fattr();
425 if (fattr == NULL) 427 if (fattr == NULL)
@@ -503,6 +505,7 @@ int nfs_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
503 505
504 /* Flush out writes to the server in order to update c/mtime. */ 506 /* Flush out writes to the server in order to update c/mtime. */
505 if (S_ISREG(inode->i_mode)) { 507 if (S_ISREG(inode->i_mode)) {
508 nfs_inode_dio_wait(inode);
506 err = filemap_write_and_wait(inode->i_mapping); 509 err = filemap_write_and_wait(inode->i_mapping);
507 if (err) 510 if (err)
508 goto out; 511 goto out;
@@ -1527,7 +1530,6 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
1527 nfsi->delegation_state = 0; 1530 nfsi->delegation_state = 0;
1528 init_rwsem(&nfsi->rwsem); 1531 init_rwsem(&nfsi->rwsem);
1529 nfsi->layout = NULL; 1532 nfsi->layout = NULL;
1530 atomic_set(&nfsi->commit_info.rpcs_out, 0);
1531#endif 1533#endif
1532} 1534}
1533 1535
@@ -1542,6 +1544,7 @@ static void init_once(void *foo)
1542 INIT_LIST_HEAD(&nfsi->commit_info.list); 1544 INIT_LIST_HEAD(&nfsi->commit_info.list);
1543 nfsi->npages = 0; 1545 nfsi->npages = 0;
1544 nfsi->commit_info.ncommit = 0; 1546 nfsi->commit_info.ncommit = 0;
1547 atomic_set(&nfsi->commit_info.rpcs_out, 0);
1545 atomic_set(&nfsi->silly_count, 1); 1548 atomic_set(&nfsi->silly_count, 1);
1546 INIT_HLIST_HEAD(&nfsi->silly_list); 1549 INIT_HLIST_HEAD(&nfsi->silly_list);
1547 init_waitqueue_head(&nfsi->waitqueue); 1550 init_waitqueue_head(&nfsi->waitqueue);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 1848a7275592..18f99ef71343 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -369,6 +369,10 @@ extern int nfs_migrate_page(struct address_space *,
369/* direct.c */ 369/* direct.c */
370void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo, 370void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
371 struct nfs_direct_req *dreq); 371 struct nfs_direct_req *dreq);
372static inline void nfs_inode_dio_wait(struct inode *inode)
373{
374 inode_dio_wait(inode);
375}
372 376
373/* nfs4proc.c */ 377/* nfs4proc.c */
374extern void __nfs4_read_done_cb(struct nfs_read_data *); 378extern void __nfs4_read_done_cb(struct nfs_read_data *);
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index c6827f93ab57..cc5900ac61b5 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -295,7 +295,7 @@ is_ds_client(struct nfs_client *clp)
295 295
296extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[]; 296extern const struct nfs4_minor_version_ops *nfs_v4_minor_ops[];
297 297
298extern const u32 nfs4_fattr_bitmap[2]; 298extern const u32 nfs4_fattr_bitmap[3];
299extern const u32 nfs4_statfs_bitmap[2]; 299extern const u32 nfs4_statfs_bitmap[2];
300extern const u32 nfs4_pathconf_bitmap[2]; 300extern const u32 nfs4_pathconf_bitmap[2];
301extern const u32 nfs4_fsinfo_bitmap[3]; 301extern const u32 nfs4_fsinfo_bitmap[3];
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index d48dbefa0e71..15fc7e4664ed 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -105,6 +105,8 @@ static int nfs4_map_errors(int err)
105 return -EINVAL; 105 return -EINVAL;
106 case -NFS4ERR_SHARE_DENIED: 106 case -NFS4ERR_SHARE_DENIED:
107 return -EACCES; 107 return -EACCES;
108 case -NFS4ERR_MINOR_VERS_MISMATCH:
109 return -EPROTONOSUPPORT;
108 default: 110 default:
109 dprintk("%s could not handle NFSv4 error %d\n", 111 dprintk("%s could not handle NFSv4 error %d\n",
110 __func__, -err); 112 __func__, -err);
@@ -116,7 +118,7 @@ static int nfs4_map_errors(int err)
116/* 118/*
117 * This is our standard bitmap for GETATTR requests. 119 * This is our standard bitmap for GETATTR requests.
118 */ 120 */
119const u32 nfs4_fattr_bitmap[2] = { 121const u32 nfs4_fattr_bitmap[3] = {
120 FATTR4_WORD0_TYPE 122 FATTR4_WORD0_TYPE
121 | FATTR4_WORD0_CHANGE 123 | FATTR4_WORD0_CHANGE
122 | FATTR4_WORD0_SIZE 124 | FATTR4_WORD0_SIZE
@@ -133,6 +135,24 @@ const u32 nfs4_fattr_bitmap[2] = {
133 | FATTR4_WORD1_TIME_MODIFY 135 | FATTR4_WORD1_TIME_MODIFY
134}; 136};
135 137
138static const u32 nfs4_pnfs_open_bitmap[3] = {
139 FATTR4_WORD0_TYPE
140 | FATTR4_WORD0_CHANGE
141 | FATTR4_WORD0_SIZE
142 | FATTR4_WORD0_FSID
143 | FATTR4_WORD0_FILEID,
144 FATTR4_WORD1_MODE
145 | FATTR4_WORD1_NUMLINKS
146 | FATTR4_WORD1_OWNER
147 | FATTR4_WORD1_OWNER_GROUP
148 | FATTR4_WORD1_RAWDEV
149 | FATTR4_WORD1_SPACE_USED
150 | FATTR4_WORD1_TIME_ACCESS
151 | FATTR4_WORD1_TIME_METADATA
152 | FATTR4_WORD1_TIME_MODIFY,
153 FATTR4_WORD2_MDSTHRESHOLD
154};
155
136const u32 nfs4_statfs_bitmap[2] = { 156const u32 nfs4_statfs_bitmap[2] = {
137 FATTR4_WORD0_FILES_AVAIL 157 FATTR4_WORD0_FILES_AVAIL
138 | FATTR4_WORD0_FILES_FREE 158 | FATTR4_WORD0_FILES_FREE
@@ -844,6 +864,7 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
844 p->o_arg.name = &dentry->d_name; 864 p->o_arg.name = &dentry->d_name;
845 p->o_arg.server = server; 865 p->o_arg.server = server;
846 p->o_arg.bitmask = server->attr_bitmask; 866 p->o_arg.bitmask = server->attr_bitmask;
867 p->o_arg.open_bitmap = &nfs4_fattr_bitmap[0];
847 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; 868 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
848 if (attrs != NULL && attrs->ia_valid != 0) { 869 if (attrs != NULL && attrs->ia_valid != 0) {
849 __be32 verf[2]; 870 __be32 verf[2];
@@ -1820,6 +1841,7 @@ static int _nfs4_do_open(struct inode *dir,
1820 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc(); 1841 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
1821 if (!opendata->f_attr.mdsthreshold) 1842 if (!opendata->f_attr.mdsthreshold)
1822 goto err_opendata_put; 1843 goto err_opendata_put;
1844 opendata->o_arg.open_bitmap = &nfs4_pnfs_open_bitmap[0];
1823 } 1845 }
1824 if (dentry->d_inode != NULL) 1846 if (dentry->d_inode != NULL)
1825 opendata->state = nfs4_get_open_state(dentry->d_inode, sp); 1847 opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
@@ -1880,6 +1902,7 @@ static struct nfs4_state *nfs4_do_open(struct inode *dir,
1880 struct nfs4_state *res; 1902 struct nfs4_state *res;
1881 int status; 1903 int status;
1882 1904
1905 fmode &= FMODE_READ|FMODE_WRITE;
1883 do { 1906 do {
1884 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, 1907 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
1885 &res, ctx_th); 1908 &res, ctx_th);
@@ -2526,6 +2549,14 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2526 2549
2527 nfs_fattr_init(fattr); 2550 nfs_fattr_init(fattr);
2528 2551
2552 /* Deal with open(O_TRUNC) */
2553 if (sattr->ia_valid & ATTR_OPEN)
2554 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2555
2556 /* Optimization: if the end result is no change, don't RPC */
2557 if ((sattr->ia_valid & ~(ATTR_FILE)) == 0)
2558 return 0;
2559
2529 /* Search for an existing open(O_WRITE) file */ 2560 /* Search for an existing open(O_WRITE) file */
2530 if (sattr->ia_valid & ATTR_FILE) { 2561 if (sattr->ia_valid & ATTR_FILE) {
2531 struct nfs_open_context *ctx; 2562 struct nfs_open_context *ctx;
@@ -2537,10 +2568,6 @@ nfs4_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
2537 } 2568 }
2538 } 2569 }
2539 2570
2540 /* Deal with open(O_TRUNC) */
2541 if (sattr->ia_valid & ATTR_OPEN)
2542 sattr->ia_valid &= ~(ATTR_MTIME|ATTR_CTIME|ATTR_OPEN);
2543
2544 status = nfs4_do_setattr(inode, cred, fattr, sattr, state); 2571 status = nfs4_do_setattr(inode, cred, fattr, sattr, state);
2545 if (status == 0) 2572 if (status == 0)
2546 nfs_setattr_update_inode(inode, sattr); 2573 nfs_setattr_update_inode(inode, sattr);
@@ -5275,7 +5302,7 @@ static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
5275 5302
5276 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5303 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5277 if (status) 5304 if (status)
5278 pr_warn("NFS: Got error %d from the server %s on " 5305 dprintk("NFS: Got error %d from the server %s on "
5279 "DESTROY_CLIENTID.", status, clp->cl_hostname); 5306 "DESTROY_CLIENTID.", status, clp->cl_hostname);
5280 return status; 5307 return status;
5281} 5308}
@@ -5746,8 +5773,7 @@ int nfs4_proc_destroy_session(struct nfs4_session *session,
5746 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5773 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5747 5774
5748 if (status) 5775 if (status)
5749 printk(KERN_WARNING 5776 dprintk("NFS: Got error %d from the server on DESTROY_SESSION. "
5750 "NFS: Got error %d from the server on DESTROY_SESSION. "
5751 "Session has been destroyed regardless...\n", status); 5777 "Session has been destroyed regardless...\n", status);
5752 5778
5753 dprintk("<-- nfs4_proc_destroy_session\n"); 5779 dprintk("<-- nfs4_proc_destroy_session\n");
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index c679b9ecef63..f38300e9f171 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -244,6 +244,16 @@ static int nfs4_begin_drain_session(struct nfs_client *clp)
244 return nfs4_wait_on_slot_tbl(&ses->fc_slot_table); 244 return nfs4_wait_on_slot_tbl(&ses->fc_slot_table);
245} 245}
246 246
247static void nfs41_finish_session_reset(struct nfs_client *clp)
248{
249 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
250 clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
251 /* create_session negotiated new slot table */
252 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
253 clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
254 nfs41_setup_state_renewal(clp);
255}
256
247int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred) 257int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
248{ 258{
249 int status; 259 int status;
@@ -259,8 +269,7 @@ do_confirm:
259 status = nfs4_proc_create_session(clp, cred); 269 status = nfs4_proc_create_session(clp, cred);
260 if (status != 0) 270 if (status != 0)
261 goto out; 271 goto out;
262 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 272 nfs41_finish_session_reset(clp);
263 nfs41_setup_state_renewal(clp);
264 nfs_mark_client_ready(clp, NFS_CS_READY); 273 nfs_mark_client_ready(clp, NFS_CS_READY);
265out: 274out:
266 return status; 275 return status;
@@ -1772,16 +1781,9 @@ static int nfs4_reset_session(struct nfs_client *clp)
1772 status = nfs4_handle_reclaim_lease_error(clp, status); 1781 status = nfs4_handle_reclaim_lease_error(clp, status);
1773 goto out; 1782 goto out;
1774 } 1783 }
1775 clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); 1784 nfs41_finish_session_reset(clp);
1776 /* create_session negotiated new slot table */
1777 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1778 clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
1779 dprintk("%s: session reset was successful for server %s!\n", 1785 dprintk("%s: session reset was successful for server %s!\n",
1780 __func__, clp->cl_hostname); 1786 __func__, clp->cl_hostname);
1781
1782 /* Let the state manager reestablish state */
1783 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1784 nfs41_setup_state_renewal(clp);
1785out: 1787out:
1786 if (cred) 1788 if (cred)
1787 put_rpccred(cred); 1789 put_rpccred(cred);
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index ee4a74db95d0..18fae29b0301 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1198,12 +1198,13 @@ static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct c
1198} 1198}
1199 1199
1200static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask, 1200static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask,
1201 const u32 *open_bitmap,
1201 struct compound_hdr *hdr) 1202 struct compound_hdr *hdr)
1202{ 1203{
1203 encode_getattr_three(xdr, 1204 encode_getattr_three(xdr,
1204 bitmask[0] & nfs4_fattr_bitmap[0], 1205 bitmask[0] & open_bitmap[0],
1205 bitmask[1] & nfs4_fattr_bitmap[1], 1206 bitmask[1] & open_bitmap[1],
1206 bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD, 1207 bitmask[2] & open_bitmap[2],
1207 hdr); 1208 hdr);
1208} 1209}
1209 1210
@@ -2221,7 +2222,7 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
2221 encode_putfh(xdr, args->fh, &hdr); 2222 encode_putfh(xdr, args->fh, &hdr);
2222 encode_open(xdr, args, &hdr); 2223 encode_open(xdr, args, &hdr);
2223 encode_getfh(xdr, &hdr); 2224 encode_getfh(xdr, &hdr);
2224 encode_getfattr_open(xdr, args->bitmask, &hdr); 2225 encode_getfattr_open(xdr, args->bitmask, args->open_bitmap, &hdr);
2225 encode_nops(&hdr); 2226 encode_nops(&hdr);
2226} 2227}
2227 2228
@@ -4359,7 +4360,10 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
4359 4360
4360 if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U))) 4361 if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U)))
4361 return -EIO; 4362 return -EIO;
4362 if (likely(bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD)) { 4363 if (bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD) {
4364 /* Did the server return an unrequested attribute? */
4365 if (unlikely(res == NULL))
4366 return -EREMOTEIO;
4363 p = xdr_inline_decode(xdr, 4); 4367 p = xdr_inline_decode(xdr, 4);
4364 if (unlikely(!p)) 4368 if (unlikely(!p))
4365 goto out_overflow; 4369 goto out_overflow;
@@ -4372,6 +4376,7 @@ static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
4372 __func__); 4376 __func__);
4373 4377
4374 status = decode_first_threshold_item4(xdr, res); 4378 status = decode_first_threshold_item4(xdr, res);
4379 bitmap[2] &= ~FATTR4_WORD2_MDSTHRESHOLD;
4375 } 4380 }
4376 return status; 4381 return status;
4377out_overflow: 4382out_overflow:
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index b8323aa7b543..bbc49caa7a82 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -70,6 +70,10 @@ find_pnfs_driver(u32 id)
70 70
71 spin_lock(&pnfs_spinlock); 71 spin_lock(&pnfs_spinlock);
72 local = find_pnfs_driver_locked(id); 72 local = find_pnfs_driver_locked(id);
73 if (local != NULL && !try_module_get(local->owner)) {
74 dprintk("%s: Could not grab reference on module\n", __func__);
75 local = NULL;
76 }
73 spin_unlock(&pnfs_spinlock); 77 spin_unlock(&pnfs_spinlock);
74 return local; 78 return local;
75} 79}
@@ -80,6 +84,9 @@ unset_pnfs_layoutdriver(struct nfs_server *nfss)
80 if (nfss->pnfs_curr_ld) { 84 if (nfss->pnfs_curr_ld) {
81 if (nfss->pnfs_curr_ld->clear_layoutdriver) 85 if (nfss->pnfs_curr_ld->clear_layoutdriver)
82 nfss->pnfs_curr_ld->clear_layoutdriver(nfss); 86 nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
87 /* Decrement the MDS count. Purge the deviceid cache if zero */
88 if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
89 nfs4_deviceid_purge_client(nfss->nfs_client);
83 module_put(nfss->pnfs_curr_ld->owner); 90 module_put(nfss->pnfs_curr_ld->owner);
84 } 91 }
85 nfss->pnfs_curr_ld = NULL; 92 nfss->pnfs_curr_ld = NULL;
@@ -115,10 +122,6 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
115 goto out_no_driver; 122 goto out_no_driver;
116 } 123 }
117 } 124 }
118 if (!try_module_get(ld_type->owner)) {
119 dprintk("%s: Could not grab reference on module\n", __func__);
120 goto out_no_driver;
121 }
122 server->pnfs_curr_ld = ld_type; 125 server->pnfs_curr_ld = ld_type;
123 if (ld_type->set_layoutdriver 126 if (ld_type->set_layoutdriver
124 && ld_type->set_layoutdriver(server, mntfh)) { 127 && ld_type->set_layoutdriver(server, mntfh)) {
@@ -127,6 +130,8 @@ set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
127 module_put(ld_type->owner); 130 module_put(ld_type->owner);
128 goto out_no_driver; 131 goto out_no_driver;
129 } 132 }
133 /* Bump the MDS count */
134 atomic_inc(&server->nfs_client->cl_mds_count);
130 135
131 dprintk("%s: pNFS module for %u set\n", __func__, id); 136 dprintk("%s: pNFS module for %u set\n", __func__, id);
132 return; 137 return;
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 29fd23c0efdc..64f90d845f6a 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -365,7 +365,7 @@ static inline bool
365pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src, 365pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
366 struct nfs_server *nfss) 366 struct nfs_server *nfss)
367{ 367{
368 return (dst && src && src->bm != 0 && 368 return (dst && src && src->bm != 0 && nfss->pnfs_curr_ld &&
369 nfss->pnfs_curr_ld->id == src->l_type); 369 nfss->pnfs_curr_ld->id == src->l_type);
370} 370}
371 371
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index a706b6bcc286..617c7419a08e 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -651,7 +651,7 @@ static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
651 /* Emulate the eof flag, which isn't normally needed in NFSv2 651 /* Emulate the eof flag, which isn't normally needed in NFSv2
652 * as it is guaranteed to always return the file attributes 652 * as it is guaranteed to always return the file attributes
653 */ 653 */
654 if (data->args.offset + data->args.count >= data->res.fattr->size) 654 if (data->args.offset + data->res.count >= data->res.fattr->size)
655 data->res.eof = 1; 655 data->res.eof = 1;
656 } 656 }
657 return 0; 657 return 0;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index ff656c022684..906f09c7d842 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1867,6 +1867,7 @@ static int nfs23_validate_mount_data(void *options,
1867 if (data == NULL) 1867 if (data == NULL)
1868 goto out_no_data; 1868 goto out_no_data;
1869 1869
1870 args->version = NFS_DEFAULT_VERSION;
1870 switch (data->version) { 1871 switch (data->version) {
1871 case 1: 1872 case 1:
1872 data->namlen = 0; 1873 data->namlen = 0;
@@ -2637,6 +2638,8 @@ static int nfs4_validate_mount_data(void *options,
2637 if (data == NULL) 2638 if (data == NULL)
2638 goto out_no_data; 2639 goto out_no_data;
2639 2640
2641 args->version = 4;
2642
2640 switch (data->version) { 2643 switch (data->version) {
2641 case 1: 2644 case 1:
2642 if (data->host_addrlen > sizeof(args->nfs_server.address)) 2645 if (data->host_addrlen > sizeof(args->nfs_server.address))
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index e6fe3d69d14c..4d6861c0dc14 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -80,6 +80,7 @@ struct nfs_write_header *nfs_writehdr_alloc(void)
80 INIT_LIST_HEAD(&hdr->rpc_list); 80 INIT_LIST_HEAD(&hdr->rpc_list);
81 spin_lock_init(&hdr->lock); 81 spin_lock_init(&hdr->lock);
82 atomic_set(&hdr->refcnt, 0); 82 atomic_set(&hdr->refcnt, 0);
83 hdr->verf = &p->verf;
83 } 84 }
84 return p; 85 return p;
85} 86}
@@ -619,6 +620,7 @@ static void nfs_write_completion(struct nfs_pgio_header *hdr)
619 goto next; 620 goto next;
620 } 621 }
621 if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) { 622 if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
623 memcpy(&req->wb_verf, hdr->verf, sizeof(req->wb_verf));
622 nfs_mark_request_commit(req, hdr->lseg, &cinfo); 624 nfs_mark_request_commit(req, hdr->lseg, &cinfo);
623 goto next; 625 goto next;
624 } 626 }
@@ -1255,15 +1257,14 @@ static void nfs_writeback_release_common(void *calldata)
1255 struct nfs_write_data *data = calldata; 1257 struct nfs_write_data *data = calldata;
1256 struct nfs_pgio_header *hdr = data->header; 1258 struct nfs_pgio_header *hdr = data->header;
1257 int status = data->task.tk_status; 1259 int status = data->task.tk_status;
1258 struct nfs_page *req = hdr->req;
1259 1260
1260 if ((status >= 0) && nfs_write_need_commit(data)) { 1261 if ((status >= 0) && nfs_write_need_commit(data)) {
1261 spin_lock(&hdr->lock); 1262 spin_lock(&hdr->lock);
1262 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) 1263 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags))
1263 ; /* Do nothing */ 1264 ; /* Do nothing */
1264 else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) 1265 else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags))
1265 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); 1266 memcpy(hdr->verf, &data->verf, sizeof(*hdr->verf));
1266 else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) 1267 else if (memcmp(hdr->verf, &data->verf, sizeof(*hdr->verf)))
1267 set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags); 1268 set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags);
1268 spin_unlock(&hdr->lock); 1269 spin_unlock(&hdr->lock);
1269 } 1270 }
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index 204438cc914e..34a10d78b839 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -11,7 +11,7 @@ int nfsexp_flags(struct svc_rqst *rqstp, struct svc_export *exp)
11 struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors; 11 struct exp_flavor_info *end = exp->ex_flavors + exp->ex_nflavors;
12 12
13 for (f = exp->ex_flavors; f < end; f++) { 13 for (f = exp->ex_flavors; f < end; f++) {
14 if (f->pseudoflavor == rqstp->rq_flavor) 14 if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
15 return f->flags; 15 return f->flags;
16 } 16 }
17 return exp->ex_flags; 17 return exp->ex_flags;
diff --git a/fs/nfsd/export.c b/fs/nfsd/export.c
index 8e9689abbc0c..ba233499b9a5 100644
--- a/fs/nfsd/export.c
+++ b/fs/nfsd/export.c
@@ -15,11 +15,13 @@
15#include <linux/namei.h> 15#include <linux/namei.h>
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/exportfs.h> 17#include <linux/exportfs.h>
18#include <linux/sunrpc/svc_xprt.h>
18 19
19#include <net/ipv6.h> 20#include <net/ipv6.h>
20 21
21#include "nfsd.h" 22#include "nfsd.h"
22#include "nfsfh.h" 23#include "nfsfh.h"
24#include "netns.h"
23 25
24#define NFSDDBG_FACILITY NFSDDBG_EXPORT 26#define NFSDDBG_FACILITY NFSDDBG_EXPORT
25 27
@@ -38,7 +40,6 @@ typedef struct svc_export svc_export;
38#define EXPKEY_HASHBITS 8 40#define EXPKEY_HASHBITS 8
39#define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS) 41#define EXPKEY_HASHMAX (1 << EXPKEY_HASHBITS)
40#define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1) 42#define EXPKEY_HASHMASK (EXPKEY_HASHMAX -1)
41static struct cache_head *expkey_table[EXPKEY_HASHMAX];
42 43
43static void expkey_put(struct kref *ref) 44static void expkey_put(struct kref *ref)
44{ 45{
@@ -71,9 +72,9 @@ static int expkey_upcall(struct cache_detail *cd, struct cache_head *h)
71 return sunrpc_cache_pipe_upcall(cd, h, expkey_request); 72 return sunrpc_cache_pipe_upcall(cd, h, expkey_request);
72} 73}
73 74
74static struct svc_expkey *svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old); 75static struct svc_expkey *svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
75static struct svc_expkey *svc_expkey_lookup(struct svc_expkey *); 76 struct svc_expkey *old);
76static struct cache_detail svc_expkey_cache; 77static struct svc_expkey *svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *);
77 78
78static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen) 79static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
79{ 80{
@@ -131,7 +132,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
131 key.ek_fsidtype = fsidtype; 132 key.ek_fsidtype = fsidtype;
132 memcpy(key.ek_fsid, buf, len); 133 memcpy(key.ek_fsid, buf, len);
133 134
134 ek = svc_expkey_lookup(&key); 135 ek = svc_expkey_lookup(cd, &key);
135 err = -ENOMEM; 136 err = -ENOMEM;
136 if (!ek) 137 if (!ek)
137 goto out; 138 goto out;
@@ -145,7 +146,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
145 err = 0; 146 err = 0;
146 if (len == 0) { 147 if (len == 0) {
147 set_bit(CACHE_NEGATIVE, &key.h.flags); 148 set_bit(CACHE_NEGATIVE, &key.h.flags);
148 ek = svc_expkey_update(&key, ek); 149 ek = svc_expkey_update(cd, &key, ek);
149 if (!ek) 150 if (!ek)
150 err = -ENOMEM; 151 err = -ENOMEM;
151 } else { 152 } else {
@@ -155,7 +156,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
155 156
156 dprintk("Found the path %s\n", buf); 157 dprintk("Found the path %s\n", buf);
157 158
158 ek = svc_expkey_update(&key, ek); 159 ek = svc_expkey_update(cd, &key, ek);
159 if (!ek) 160 if (!ek)
160 err = -ENOMEM; 161 err = -ENOMEM;
161 path_put(&key.ek_path); 162 path_put(&key.ek_path);
@@ -163,7 +164,7 @@ static int expkey_parse(struct cache_detail *cd, char *mesg, int mlen)
163 cache_flush(); 164 cache_flush();
164 out: 165 out:
165 if (ek) 166 if (ek)
166 cache_put(&ek->h, &svc_expkey_cache); 167 cache_put(&ek->h, cd);
167 if (dom) 168 if (dom)
168 auth_domain_put(dom); 169 auth_domain_put(dom);
169 kfree(buf); 170 kfree(buf);
@@ -239,10 +240,9 @@ static struct cache_head *expkey_alloc(void)
239 return NULL; 240 return NULL;
240} 241}
241 242
242static struct cache_detail svc_expkey_cache = { 243static struct cache_detail svc_expkey_cache_template = {
243 .owner = THIS_MODULE, 244 .owner = THIS_MODULE,
244 .hash_size = EXPKEY_HASHMAX, 245 .hash_size = EXPKEY_HASHMAX,
245 .hash_table = expkey_table,
246 .name = "nfsd.fh", 246 .name = "nfsd.fh",
247 .cache_put = expkey_put, 247 .cache_put = expkey_put,
248 .cache_upcall = expkey_upcall, 248 .cache_upcall = expkey_upcall,
@@ -268,13 +268,12 @@ svc_expkey_hash(struct svc_expkey *item)
268} 268}
269 269
270static struct svc_expkey * 270static struct svc_expkey *
271svc_expkey_lookup(struct svc_expkey *item) 271svc_expkey_lookup(struct cache_detail *cd, struct svc_expkey *item)
272{ 272{
273 struct cache_head *ch; 273 struct cache_head *ch;
274 int hash = svc_expkey_hash(item); 274 int hash = svc_expkey_hash(item);
275 275
276 ch = sunrpc_cache_lookup(&svc_expkey_cache, &item->h, 276 ch = sunrpc_cache_lookup(cd, &item->h, hash);
277 hash);
278 if (ch) 277 if (ch)
279 return container_of(ch, struct svc_expkey, h); 278 return container_of(ch, struct svc_expkey, h);
280 else 279 else
@@ -282,13 +281,13 @@ svc_expkey_lookup(struct svc_expkey *item)
282} 281}
283 282
284static struct svc_expkey * 283static struct svc_expkey *
285svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old) 284svc_expkey_update(struct cache_detail *cd, struct svc_expkey *new,
285 struct svc_expkey *old)
286{ 286{
287 struct cache_head *ch; 287 struct cache_head *ch;
288 int hash = svc_expkey_hash(new); 288 int hash = svc_expkey_hash(new);
289 289
290 ch = sunrpc_cache_update(&svc_expkey_cache, &new->h, 290 ch = sunrpc_cache_update(cd, &new->h, &old->h, hash);
291 &old->h, hash);
292 if (ch) 291 if (ch)
293 return container_of(ch, struct svc_expkey, h); 292 return container_of(ch, struct svc_expkey, h);
294 else 293 else
@@ -299,8 +298,6 @@ svc_expkey_update(struct svc_expkey *new, struct svc_expkey *old)
299#define EXPORT_HASHBITS 8 298#define EXPORT_HASHBITS 8
300#define EXPORT_HASHMAX (1<< EXPORT_HASHBITS) 299#define EXPORT_HASHMAX (1<< EXPORT_HASHBITS)
301 300
302static struct cache_head *export_table[EXPORT_HASHMAX];
303
304static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc) 301static void nfsd4_fslocs_free(struct nfsd4_fs_locations *fsloc)
305{ 302{
306 int i; 303 int i;
@@ -525,6 +522,7 @@ static int svc_export_parse(struct cache_detail *cd, char *mesg, int mlen)
525 goto out1; 522 goto out1;
526 523
527 exp.ex_client = dom; 524 exp.ex_client = dom;
525 exp.cd = cd;
528 526
529 /* expiry */ 527 /* expiry */
530 err = -EINVAL; 528 err = -EINVAL;
@@ -672,6 +670,7 @@ static void svc_export_init(struct cache_head *cnew, struct cache_head *citem)
672 new->ex_fslocs.locations = NULL; 670 new->ex_fslocs.locations = NULL;
673 new->ex_fslocs.locations_count = 0; 671 new->ex_fslocs.locations_count = 0;
674 new->ex_fslocs.migrated = 0; 672 new->ex_fslocs.migrated = 0;
673 new->cd = item->cd;
675} 674}
676 675
677static void export_update(struct cache_head *cnew, struct cache_head *citem) 676static void export_update(struct cache_head *cnew, struct cache_head *citem)
@@ -707,10 +706,9 @@ static struct cache_head *svc_export_alloc(void)
707 return NULL; 706 return NULL;
708} 707}
709 708
710struct cache_detail svc_export_cache = { 709static struct cache_detail svc_export_cache_template = {
711 .owner = THIS_MODULE, 710 .owner = THIS_MODULE,
712 .hash_size = EXPORT_HASHMAX, 711 .hash_size = EXPORT_HASHMAX,
713 .hash_table = export_table,
714 .name = "nfsd.export", 712 .name = "nfsd.export",
715 .cache_put = svc_export_put, 713 .cache_put = svc_export_put,
716 .cache_upcall = svc_export_upcall, 714 .cache_upcall = svc_export_upcall,
@@ -739,8 +737,7 @@ svc_export_lookup(struct svc_export *exp)
739 struct cache_head *ch; 737 struct cache_head *ch;
740 int hash = svc_export_hash(exp); 738 int hash = svc_export_hash(exp);
741 739
742 ch = sunrpc_cache_lookup(&svc_export_cache, &exp->h, 740 ch = sunrpc_cache_lookup(exp->cd, &exp->h, hash);
743 hash);
744 if (ch) 741 if (ch)
745 return container_of(ch, struct svc_export, h); 742 return container_of(ch, struct svc_export, h);
746 else 743 else
@@ -753,9 +750,7 @@ svc_export_update(struct svc_export *new, struct svc_export *old)
753 struct cache_head *ch; 750 struct cache_head *ch;
754 int hash = svc_export_hash(old); 751 int hash = svc_export_hash(old);
755 752
756 ch = sunrpc_cache_update(&svc_export_cache, &new->h, 753 ch = sunrpc_cache_update(old->cd, &new->h, &old->h, hash);
757 &old->h,
758 hash);
759 if (ch) 754 if (ch)
760 return container_of(ch, struct svc_export, h); 755 return container_of(ch, struct svc_export, h);
761 else 756 else
@@ -764,7 +759,8 @@ svc_export_update(struct svc_export *new, struct svc_export *old)
764 759
765 760
766static struct svc_expkey * 761static struct svc_expkey *
767exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp) 762exp_find_key(struct cache_detail *cd, svc_client *clp, int fsid_type,
763 u32 *fsidv, struct cache_req *reqp)
768{ 764{
769 struct svc_expkey key, *ek; 765 struct svc_expkey key, *ek;
770 int err; 766 int err;
@@ -776,18 +772,18 @@ exp_find_key(svc_client *clp, int fsid_type, u32 *fsidv, struct cache_req *reqp)
776 key.ek_fsidtype = fsid_type; 772 key.ek_fsidtype = fsid_type;
777 memcpy(key.ek_fsid, fsidv, key_len(fsid_type)); 773 memcpy(key.ek_fsid, fsidv, key_len(fsid_type));
778 774
779 ek = svc_expkey_lookup(&key); 775 ek = svc_expkey_lookup(cd, &key);
780 if (ek == NULL) 776 if (ek == NULL)
781 return ERR_PTR(-ENOMEM); 777 return ERR_PTR(-ENOMEM);
782 err = cache_check(&svc_expkey_cache, &ek->h, reqp); 778 err = cache_check(cd, &ek->h, reqp);
783 if (err) 779 if (err)
784 return ERR_PTR(err); 780 return ERR_PTR(err);
785 return ek; 781 return ek;
786} 782}
787 783
788 784
789static svc_export *exp_get_by_name(svc_client *clp, const struct path *path, 785static svc_export *exp_get_by_name(struct cache_detail *cd, svc_client *clp,
790 struct cache_req *reqp) 786 const struct path *path, struct cache_req *reqp)
791{ 787{
792 struct svc_export *exp, key; 788 struct svc_export *exp, key;
793 int err; 789 int err;
@@ -797,11 +793,12 @@ static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
797 793
798 key.ex_client = clp; 794 key.ex_client = clp;
799 key.ex_path = *path; 795 key.ex_path = *path;
796 key.cd = cd;
800 797
801 exp = svc_export_lookup(&key); 798 exp = svc_export_lookup(&key);
802 if (exp == NULL) 799 if (exp == NULL)
803 return ERR_PTR(-ENOMEM); 800 return ERR_PTR(-ENOMEM);
804 err = cache_check(&svc_export_cache, &exp->h, reqp); 801 err = cache_check(cd, &exp->h, reqp);
805 if (err) 802 if (err)
806 return ERR_PTR(err); 803 return ERR_PTR(err);
807 return exp; 804 return exp;
@@ -810,16 +807,17 @@ static svc_export *exp_get_by_name(svc_client *clp, const struct path *path,
810/* 807/*
811 * Find the export entry for a given dentry. 808 * Find the export entry for a given dentry.
812 */ 809 */
813static struct svc_export *exp_parent(svc_client *clp, struct path *path) 810static struct svc_export *exp_parent(struct cache_detail *cd, svc_client *clp,
811 struct path *path)
814{ 812{
815 struct dentry *saved = dget(path->dentry); 813 struct dentry *saved = dget(path->dentry);
816 svc_export *exp = exp_get_by_name(clp, path, NULL); 814 svc_export *exp = exp_get_by_name(cd, clp, path, NULL);
817 815
818 while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) { 816 while (PTR_ERR(exp) == -ENOENT && !IS_ROOT(path->dentry)) {
819 struct dentry *parent = dget_parent(path->dentry); 817 struct dentry *parent = dget_parent(path->dentry);
820 dput(path->dentry); 818 dput(path->dentry);
821 path->dentry = parent; 819 path->dentry = parent;
822 exp = exp_get_by_name(clp, path, NULL); 820 exp = exp_get_by_name(cd, clp, path, NULL);
823 } 821 }
824 dput(path->dentry); 822 dput(path->dentry);
825 path->dentry = saved; 823 path->dentry = saved;
@@ -834,13 +832,16 @@ static struct svc_export *exp_parent(svc_client *clp, struct path *path)
834 * since its harder to fool a kernel module than a user space program. 832 * since its harder to fool a kernel module than a user space program.
835 */ 833 */
836int 834int
837exp_rootfh(svc_client *clp, char *name, struct knfsd_fh *f, int maxsize) 835exp_rootfh(struct net *net, svc_client *clp, char *name,
836 struct knfsd_fh *f, int maxsize)
838{ 837{
839 struct svc_export *exp; 838 struct svc_export *exp;
840 struct path path; 839 struct path path;
841 struct inode *inode; 840 struct inode *inode;
842 struct svc_fh fh; 841 struct svc_fh fh;
843 int err; 842 int err;
843 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
844 struct cache_detail *cd = nn->svc_export_cache;
844 845
845 err = -EPERM; 846 err = -EPERM;
846 /* NB: we probably ought to check that it's NUL-terminated */ 847 /* NB: we probably ought to check that it's NUL-terminated */
@@ -853,7 +854,7 @@ exp_rootfh(svc_client *clp, char *name, struct knfsd_fh *f, int maxsize)
853 dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n", 854 dprintk("nfsd: exp_rootfh(%s [%p] %s:%s/%ld)\n",
854 name, path.dentry, clp->name, 855 name, path.dentry, clp->name,
855 inode->i_sb->s_id, inode->i_ino); 856 inode->i_sb->s_id, inode->i_ino);
856 exp = exp_parent(clp, &path); 857 exp = exp_parent(cd, clp, &path);
857 if (IS_ERR(exp)) { 858 if (IS_ERR(exp)) {
858 err = PTR_ERR(exp); 859 err = PTR_ERR(exp);
859 goto out; 860 goto out;
@@ -875,16 +876,18 @@ out:
875 return err; 876 return err;
876} 877}
877 878
878static struct svc_export *exp_find(struct auth_domain *clp, int fsid_type, 879static struct svc_export *exp_find(struct cache_detail *cd,
880 struct auth_domain *clp, int fsid_type,
879 u32 *fsidv, struct cache_req *reqp) 881 u32 *fsidv, struct cache_req *reqp)
880{ 882{
881 struct svc_export *exp; 883 struct svc_export *exp;
882 struct svc_expkey *ek = exp_find_key(clp, fsid_type, fsidv, reqp); 884 struct nfsd_net *nn = net_generic(cd->net, nfsd_net_id);
885 struct svc_expkey *ek = exp_find_key(nn->svc_expkey_cache, clp, fsid_type, fsidv, reqp);
883 if (IS_ERR(ek)) 886 if (IS_ERR(ek))
884 return ERR_CAST(ek); 887 return ERR_CAST(ek);
885 888
886 exp = exp_get_by_name(clp, &ek->ek_path, reqp); 889 exp = exp_get_by_name(cd, clp, &ek->ek_path, reqp);
887 cache_put(&ek->h, &svc_expkey_cache); 890 cache_put(&ek->h, nn->svc_expkey_cache);
888 891
889 if (IS_ERR(exp)) 892 if (IS_ERR(exp))
890 return ERR_CAST(exp); 893 return ERR_CAST(exp);
@@ -901,13 +904,13 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp)
901 return 0; 904 return 0;
902 /* ip-address based client; check sec= export option: */ 905 /* ip-address based client; check sec= export option: */
903 for (f = exp->ex_flavors; f < end; f++) { 906 for (f = exp->ex_flavors; f < end; f++) {
904 if (f->pseudoflavor == rqstp->rq_flavor) 907 if (f->pseudoflavor == rqstp->rq_cred.cr_flavor)
905 return 0; 908 return 0;
906 } 909 }
907 /* defaults in absence of sec= options: */ 910 /* defaults in absence of sec= options: */
908 if (exp->ex_nflavors == 0) { 911 if (exp->ex_nflavors == 0) {
909 if (rqstp->rq_flavor == RPC_AUTH_NULL || 912 if (rqstp->rq_cred.cr_flavor == RPC_AUTH_NULL ||
910 rqstp->rq_flavor == RPC_AUTH_UNIX) 913 rqstp->rq_cred.cr_flavor == RPC_AUTH_UNIX)
911 return 0; 914 return 0;
912 } 915 }
913 return nfserr_wrongsec; 916 return nfserr_wrongsec;
@@ -926,12 +929,14 @@ struct svc_export *
926rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path) 929rqst_exp_get_by_name(struct svc_rqst *rqstp, struct path *path)
927{ 930{
928 struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT); 931 struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
932 struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
933 struct cache_detail *cd = nn->svc_export_cache;
929 934
930 if (rqstp->rq_client == NULL) 935 if (rqstp->rq_client == NULL)
931 goto gss; 936 goto gss;
932 937
933 /* First try the auth_unix client: */ 938 /* First try the auth_unix client: */
934 exp = exp_get_by_name(rqstp->rq_client, path, &rqstp->rq_chandle); 939 exp = exp_get_by_name(cd, rqstp->rq_client, path, &rqstp->rq_chandle);
935 if (PTR_ERR(exp) == -ENOENT) 940 if (PTR_ERR(exp) == -ENOENT)
936 goto gss; 941 goto gss;
937 if (IS_ERR(exp)) 942 if (IS_ERR(exp))
@@ -943,7 +948,7 @@ gss:
943 /* Otherwise, try falling back on gss client */ 948 /* Otherwise, try falling back on gss client */
944 if (rqstp->rq_gssclient == NULL) 949 if (rqstp->rq_gssclient == NULL)
945 return exp; 950 return exp;
946 gssexp = exp_get_by_name(rqstp->rq_gssclient, path, &rqstp->rq_chandle); 951 gssexp = exp_get_by_name(cd, rqstp->rq_gssclient, path, &rqstp->rq_chandle);
947 if (PTR_ERR(gssexp) == -ENOENT) 952 if (PTR_ERR(gssexp) == -ENOENT)
948 return exp; 953 return exp;
949 if (!IS_ERR(exp)) 954 if (!IS_ERR(exp))
@@ -955,12 +960,15 @@ struct svc_export *
955rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv) 960rqst_exp_find(struct svc_rqst *rqstp, int fsid_type, u32 *fsidv)
956{ 961{
957 struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT); 962 struct svc_export *gssexp, *exp = ERR_PTR(-ENOENT);
963 struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
964 struct cache_detail *cd = nn->svc_export_cache;
958 965
959 if (rqstp->rq_client == NULL) 966 if (rqstp->rq_client == NULL)
960 goto gss; 967 goto gss;
961 968
962 /* First try the auth_unix client: */ 969 /* First try the auth_unix client: */
963 exp = exp_find(rqstp->rq_client, fsid_type, fsidv, &rqstp->rq_chandle); 970 exp = exp_find(cd, rqstp->rq_client, fsid_type,
971 fsidv, &rqstp->rq_chandle);
964 if (PTR_ERR(exp) == -ENOENT) 972 if (PTR_ERR(exp) == -ENOENT)
965 goto gss; 973 goto gss;
966 if (IS_ERR(exp)) 974 if (IS_ERR(exp))
@@ -972,7 +980,7 @@ gss:
972 /* Otherwise, try falling back on gss client */ 980 /* Otherwise, try falling back on gss client */
973 if (rqstp->rq_gssclient == NULL) 981 if (rqstp->rq_gssclient == NULL)
974 return exp; 982 return exp;
975 gssexp = exp_find(rqstp->rq_gssclient, fsid_type, fsidv, 983 gssexp = exp_find(cd, rqstp->rq_gssclient, fsid_type, fsidv,
976 &rqstp->rq_chandle); 984 &rqstp->rq_chandle);
977 if (PTR_ERR(gssexp) == -ENOENT) 985 if (PTR_ERR(gssexp) == -ENOENT)
978 return exp; 986 return exp;
@@ -1029,13 +1037,15 @@ exp_pseudoroot(struct svc_rqst *rqstp, struct svc_fh *fhp)
1029/* Iterator */ 1037/* Iterator */
1030 1038
1031static void *e_start(struct seq_file *m, loff_t *pos) 1039static void *e_start(struct seq_file *m, loff_t *pos)
1032 __acquires(svc_export_cache.hash_lock) 1040 __acquires(((struct cache_detail *)m->private)->hash_lock)
1033{ 1041{
1034 loff_t n = *pos; 1042 loff_t n = *pos;
1035 unsigned hash, export; 1043 unsigned hash, export;
1036 struct cache_head *ch; 1044 struct cache_head *ch;
1037 1045 struct cache_detail *cd = m->private;
1038 read_lock(&svc_export_cache.hash_lock); 1046 struct cache_head **export_table = cd->hash_table;
1047
1048 read_lock(&cd->hash_lock);
1039 if (!n--) 1049 if (!n--)
1040 return SEQ_START_TOKEN; 1050 return SEQ_START_TOKEN;
1041 hash = n >> 32; 1051 hash = n >> 32;
@@ -1060,6 +1070,8 @@ static void *e_next(struct seq_file *m, void *p, loff_t *pos)
1060{ 1070{
1061 struct cache_head *ch = p; 1071 struct cache_head *ch = p;
1062 int hash = (*pos >> 32); 1072 int hash = (*pos >> 32);
1073 struct cache_detail *cd = m->private;
1074 struct cache_head **export_table = cd->hash_table;
1063 1075
1064 if (p == SEQ_START_TOKEN) 1076 if (p == SEQ_START_TOKEN)
1065 hash = 0; 1077 hash = 0;
@@ -1082,9 +1094,11 @@ static void *e_next(struct seq_file *m, void *p, loff_t *pos)
1082} 1094}
1083 1095
1084static void e_stop(struct seq_file *m, void *p) 1096static void e_stop(struct seq_file *m, void *p)
1085 __releases(svc_export_cache.hash_lock) 1097 __releases(((struct cache_detail *)m->private)->hash_lock)
1086{ 1098{
1087 read_unlock(&svc_export_cache.hash_lock); 1099 struct cache_detail *cd = m->private;
1100
1101 read_unlock(&cd->hash_lock);
1088} 1102}
1089 1103
1090static struct flags { 1104static struct flags {
@@ -1195,6 +1209,7 @@ static int e_show(struct seq_file *m, void *p)
1195{ 1209{
1196 struct cache_head *cp = p; 1210 struct cache_head *cp = p;
1197 struct svc_export *exp = container_of(cp, struct svc_export, h); 1211 struct svc_export *exp = container_of(cp, struct svc_export, h);
1212 struct cache_detail *cd = m->private;
1198 1213
1199 if (p == SEQ_START_TOKEN) { 1214 if (p == SEQ_START_TOKEN) {
1200 seq_puts(m, "# Version 1.1\n"); 1215 seq_puts(m, "# Version 1.1\n");
@@ -1203,10 +1218,10 @@ static int e_show(struct seq_file *m, void *p)
1203 } 1218 }
1204 1219
1205 cache_get(&exp->h); 1220 cache_get(&exp->h);
1206 if (cache_check(&svc_export_cache, &exp->h, NULL)) 1221 if (cache_check(cd, &exp->h, NULL))
1207 return 0; 1222 return 0;
1208 cache_put(&exp->h, &svc_export_cache); 1223 exp_put(exp);
1209 return svc_export_show(m, &svc_export_cache, cp); 1224 return svc_export_show(m, cd, cp);
1210} 1225}
1211 1226
1212const struct seq_operations nfs_exports_op = { 1227const struct seq_operations nfs_exports_op = {
@@ -1216,48 +1231,70 @@ const struct seq_operations nfs_exports_op = {
1216 .show = e_show, 1231 .show = e_show,
1217}; 1232};
1218 1233
1219
1220/* 1234/*
1221 * Initialize the exports module. 1235 * Initialize the exports module.
1222 */ 1236 */
1223int 1237int
1224nfsd_export_init(void) 1238nfsd_export_init(struct net *net)
1225{ 1239{
1226 int rv; 1240 int rv;
1227 dprintk("nfsd: initializing export module.\n"); 1241 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1242
1243 dprintk("nfsd: initializing export module (net: %p).\n", net);
1228 1244
1229 rv = cache_register_net(&svc_export_cache, &init_net); 1245 nn->svc_export_cache = cache_create_net(&svc_export_cache_template, net);
1246 if (IS_ERR(nn->svc_export_cache))
1247 return PTR_ERR(nn->svc_export_cache);
1248 rv = cache_register_net(nn->svc_export_cache, net);
1230 if (rv) 1249 if (rv)
1231 return rv; 1250 goto destroy_export_cache;
1232 rv = cache_register_net(&svc_expkey_cache, &init_net); 1251
1252 nn->svc_expkey_cache = cache_create_net(&svc_expkey_cache_template, net);
1253 if (IS_ERR(nn->svc_expkey_cache)) {
1254 rv = PTR_ERR(nn->svc_expkey_cache);
1255 goto unregister_export_cache;
1256 }
1257 rv = cache_register_net(nn->svc_expkey_cache, net);
1233 if (rv) 1258 if (rv)
1234 cache_unregister_net(&svc_export_cache, &init_net); 1259 goto destroy_expkey_cache;
1235 return rv; 1260 return 0;
1236 1261
1262destroy_expkey_cache:
1263 cache_destroy_net(nn->svc_expkey_cache, net);
1264unregister_export_cache:
1265 cache_unregister_net(nn->svc_export_cache, net);
1266destroy_export_cache:
1267 cache_destroy_net(nn->svc_export_cache, net);
1268 return rv;
1237} 1269}
1238 1270
1239/* 1271/*
1240 * Flush exports table - called when last nfsd thread is killed 1272 * Flush exports table - called when last nfsd thread is killed
1241 */ 1273 */
1242void 1274void
1243nfsd_export_flush(void) 1275nfsd_export_flush(struct net *net)
1244{ 1276{
1245 cache_purge(&svc_expkey_cache); 1277 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1246 cache_purge(&svc_export_cache); 1278
1279 cache_purge(nn->svc_expkey_cache);
1280 cache_purge(nn->svc_export_cache);
1247} 1281}
1248 1282
1249/* 1283/*
1250 * Shutdown the exports module. 1284 * Shutdown the exports module.
1251 */ 1285 */
1252void 1286void
1253nfsd_export_shutdown(void) 1287nfsd_export_shutdown(struct net *net)
1254{ 1288{
1289 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
1255 1290
1256 dprintk("nfsd: shutting down export module.\n"); 1291 dprintk("nfsd: shutting down export module (net: %p).\n", net);
1257 1292
1258 cache_unregister_net(&svc_expkey_cache, &init_net); 1293 cache_unregister_net(nn->svc_expkey_cache, net);
1259 cache_unregister_net(&svc_export_cache, &init_net); 1294 cache_unregister_net(nn->svc_export_cache, net);
1260 svcauth_unix_purge(); 1295 cache_destroy_net(nn->svc_expkey_cache, net);
1296 cache_destroy_net(nn->svc_export_cache, net);
1297 svcauth_unix_purge(net);
1261 1298
1262 dprintk("nfsd: export shutdown complete.\n"); 1299 dprintk("nfsd: export shutdown complete (net: %p).\n", net);
1263} 1300}
diff --git a/fs/nfsd/fault_inject.c b/fs/nfsd/fault_inject.c
index 9559ce468732..e6c38159622f 100644
--- a/fs/nfsd/fault_inject.c
+++ b/fs/nfsd/fault_inject.c
@@ -58,6 +58,7 @@ static int nfsd_inject_set(void *op_ptr, u64 val)
58 58
59static int nfsd_inject_get(void *data, u64 *val) 59static int nfsd_inject_get(void *data, u64 *val)
60{ 60{
61 *val = 0;
61 return 0; 62 return 0;
62} 63}
63 64
diff --git a/fs/nfsd/idmap.h b/fs/nfsd/idmap.h
index 2f3be1321534..9d513efc01ba 100644
--- a/fs/nfsd/idmap.h
+++ b/fs/nfsd/idmap.h
@@ -42,14 +42,14 @@
42#define IDMAP_NAMESZ 128 42#define IDMAP_NAMESZ 128
43 43
44#ifdef CONFIG_NFSD_V4 44#ifdef CONFIG_NFSD_V4
45int nfsd_idmap_init(void); 45int nfsd_idmap_init(struct net *);
46void nfsd_idmap_shutdown(void); 46void nfsd_idmap_shutdown(struct net *);
47#else 47#else
48static inline int nfsd_idmap_init(void) 48static inline int nfsd_idmap_init(struct net *net)
49{ 49{
50 return 0; 50 return 0;
51} 51}
52static inline void nfsd_idmap_shutdown(void) 52static inline void nfsd_idmap_shutdown(struct net *net)
53{ 53{
54} 54}
55#endif 55#endif
diff --git a/fs/nfsd/netns.h b/fs/nfsd/netns.h
index 12e0cff435b4..39365636b244 100644
--- a/fs/nfsd/netns.h
+++ b/fs/nfsd/netns.h
@@ -28,6 +28,12 @@ struct cld_net;
28 28
29struct nfsd_net { 29struct nfsd_net {
30 struct cld_net *cld_net; 30 struct cld_net *cld_net;
31
32 struct cache_detail *svc_expkey_cache;
33 struct cache_detail *svc_export_cache;
34
35 struct cache_detail *idtoname_cache;
36 struct cache_detail *nametoid_cache;
31}; 37};
32 38
33extern int nfsd_net_id; 39extern int nfsd_net_id;
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index c8e9f637153a..a5fd6b982f27 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -650,9 +650,10 @@ static int setup_callback_client(struct nfs4_client *clp, struct nfs4_cb_conn *c
650 struct rpc_clnt *client; 650 struct rpc_clnt *client;
651 651
652 if (clp->cl_minorversion == 0) { 652 if (clp->cl_minorversion == 0) {
653 if (!clp->cl_principal && (clp->cl_flavor >= RPC_AUTH_GSS_KRB5)) 653 if (!clp->cl_cred.cr_principal &&
654 (clp->cl_flavor >= RPC_AUTH_GSS_KRB5))
654 return -EINVAL; 655 return -EINVAL;
655 args.client_name = clp->cl_principal; 656 args.client_name = clp->cl_cred.cr_principal;
656 args.prognumber = conn->cb_prog, 657 args.prognumber = conn->cb_prog,
657 args.protocol = XPRT_TRANSPORT_TCP; 658 args.protocol = XPRT_TRANSPORT_TCP;
658 args.authflavor = clp->cl_flavor; 659 args.authflavor = clp->cl_flavor;
diff --git a/fs/nfsd/nfs4idmap.c b/fs/nfsd/nfs4idmap.c
index 322d11ce06a4..dae36f1dee95 100644
--- a/fs/nfsd/nfs4idmap.c
+++ b/fs/nfsd/nfs4idmap.c
@@ -36,9 +36,11 @@
36#include <linux/seq_file.h> 36#include <linux/seq_file.h>
37#include <linux/sched.h> 37#include <linux/sched.h>
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/sunrpc/svc_xprt.h>
39#include <net/net_namespace.h> 40#include <net/net_namespace.h>
40#include "idmap.h" 41#include "idmap.h"
41#include "nfsd.h" 42#include "nfsd.h"
43#include "netns.h"
42 44
43/* 45/*
44 * Turn off idmapping when using AUTH_SYS. 46 * Turn off idmapping when using AUTH_SYS.
@@ -107,8 +109,6 @@ ent_alloc(void)
107 * ID -> Name cache 109 * ID -> Name cache
108 */ 110 */
109 111
110static struct cache_head *idtoname_table[ENT_HASHMAX];
111
112static uint32_t 112static uint32_t
113idtoname_hash(struct ent *ent) 113idtoname_hash(struct ent *ent)
114{ 114{
@@ -183,13 +183,13 @@ warn_no_idmapd(struct cache_detail *detail, int has_died)
183 183
184 184
185static int idtoname_parse(struct cache_detail *, char *, int); 185static int idtoname_parse(struct cache_detail *, char *, int);
186static struct ent *idtoname_lookup(struct ent *); 186static struct ent *idtoname_lookup(struct cache_detail *, struct ent *);
187static struct ent *idtoname_update(struct ent *, struct ent *); 187static struct ent *idtoname_update(struct cache_detail *, struct ent *,
188 struct ent *);
188 189
189static struct cache_detail idtoname_cache = { 190static struct cache_detail idtoname_cache_template = {
190 .owner = THIS_MODULE, 191 .owner = THIS_MODULE,
191 .hash_size = ENT_HASHMAX, 192 .hash_size = ENT_HASHMAX,
192 .hash_table = idtoname_table,
193 .name = "nfs4.idtoname", 193 .name = "nfs4.idtoname",
194 .cache_put = ent_put, 194 .cache_put = ent_put,
195 .cache_upcall = idtoname_upcall, 195 .cache_upcall = idtoname_upcall,
@@ -244,7 +244,7 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
244 goto out; 244 goto out;
245 245
246 error = -ENOMEM; 246 error = -ENOMEM;
247 res = idtoname_lookup(&ent); 247 res = idtoname_lookup(cd, &ent);
248 if (!res) 248 if (!res)
249 goto out; 249 goto out;
250 250
@@ -260,11 +260,11 @@ idtoname_parse(struct cache_detail *cd, char *buf, int buflen)
260 else 260 else
261 memcpy(ent.name, buf1, sizeof(ent.name)); 261 memcpy(ent.name, buf1, sizeof(ent.name));
262 error = -ENOMEM; 262 error = -ENOMEM;
263 res = idtoname_update(&ent, res); 263 res = idtoname_update(cd, &ent, res);
264 if (res == NULL) 264 if (res == NULL)
265 goto out; 265 goto out;
266 266
267 cache_put(&res->h, &idtoname_cache); 267 cache_put(&res->h, cd);
268 268
269 error = 0; 269 error = 0;
270out: 270out:
@@ -275,10 +275,9 @@ out:
275 275
276 276
277static struct ent * 277static struct ent *
278idtoname_lookup(struct ent *item) 278idtoname_lookup(struct cache_detail *cd, struct ent *item)
279{ 279{
280 struct cache_head *ch = sunrpc_cache_lookup(&idtoname_cache, 280 struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
281 &item->h,
282 idtoname_hash(item)); 281 idtoname_hash(item));
283 if (ch) 282 if (ch)
284 return container_of(ch, struct ent, h); 283 return container_of(ch, struct ent, h);
@@ -287,10 +286,9 @@ idtoname_lookup(struct ent *item)
287} 286}
288 287
289static struct ent * 288static struct ent *
290idtoname_update(struct ent *new, struct ent *old) 289idtoname_update(struct cache_detail *cd, struct ent *new, struct ent *old)
291{ 290{
292 struct cache_head *ch = sunrpc_cache_update(&idtoname_cache, 291 struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h,
293 &new->h, &old->h,
294 idtoname_hash(new)); 292 idtoname_hash(new));
295 if (ch) 293 if (ch)
296 return container_of(ch, struct ent, h); 294 return container_of(ch, struct ent, h);
@@ -303,8 +301,6 @@ idtoname_update(struct ent *new, struct ent *old)
303 * Name -> ID cache 301 * Name -> ID cache
304 */ 302 */
305 303
306static struct cache_head *nametoid_table[ENT_HASHMAX];
307
308static inline int 304static inline int
309nametoid_hash(struct ent *ent) 305nametoid_hash(struct ent *ent)
310{ 306{
@@ -359,14 +355,14 @@ nametoid_show(struct seq_file *m, struct cache_detail *cd, struct cache_head *h)
359 return 0; 355 return 0;
360} 356}
361 357
362static struct ent *nametoid_lookup(struct ent *); 358static struct ent *nametoid_lookup(struct cache_detail *, struct ent *);
363static struct ent *nametoid_update(struct ent *, struct ent *); 359static struct ent *nametoid_update(struct cache_detail *, struct ent *,
360 struct ent *);
364static int nametoid_parse(struct cache_detail *, char *, int); 361static int nametoid_parse(struct cache_detail *, char *, int);
365 362
366static struct cache_detail nametoid_cache = { 363static struct cache_detail nametoid_cache_template = {
367 .owner = THIS_MODULE, 364 .owner = THIS_MODULE,
368 .hash_size = ENT_HASHMAX, 365 .hash_size = ENT_HASHMAX,
369 .hash_table = nametoid_table,
370 .name = "nfs4.nametoid", 366 .name = "nfs4.nametoid",
371 .cache_put = ent_put, 367 .cache_put = ent_put,
372 .cache_upcall = nametoid_upcall, 368 .cache_upcall = nametoid_upcall,
@@ -426,14 +422,14 @@ nametoid_parse(struct cache_detail *cd, char *buf, int buflen)
426 set_bit(CACHE_NEGATIVE, &ent.h.flags); 422 set_bit(CACHE_NEGATIVE, &ent.h.flags);
427 423
428 error = -ENOMEM; 424 error = -ENOMEM;
429 res = nametoid_lookup(&ent); 425 res = nametoid_lookup(cd, &ent);
430 if (res == NULL) 426 if (res == NULL)
431 goto out; 427 goto out;
432 res = nametoid_update(&ent, res); 428 res = nametoid_update(cd, &ent, res);
433 if (res == NULL) 429 if (res == NULL)
434 goto out; 430 goto out;
435 431
436 cache_put(&res->h, &nametoid_cache); 432 cache_put(&res->h, cd);
437 error = 0; 433 error = 0;
438out: 434out:
439 kfree(buf1); 435 kfree(buf1);
@@ -443,10 +439,9 @@ out:
443 439
444 440
445static struct ent * 441static struct ent *
446nametoid_lookup(struct ent *item) 442nametoid_lookup(struct cache_detail *cd, struct ent *item)
447{ 443{
448 struct cache_head *ch = sunrpc_cache_lookup(&nametoid_cache, 444 struct cache_head *ch = sunrpc_cache_lookup(cd, &item->h,
449 &item->h,
450 nametoid_hash(item)); 445 nametoid_hash(item));
451 if (ch) 446 if (ch)
452 return container_of(ch, struct ent, h); 447 return container_of(ch, struct ent, h);
@@ -455,10 +450,9 @@ nametoid_lookup(struct ent *item)
455} 450}
456 451
457static struct ent * 452static struct ent *
458nametoid_update(struct ent *new, struct ent *old) 453nametoid_update(struct cache_detail *cd, struct ent *new, struct ent *old)
459{ 454{
460 struct cache_head *ch = sunrpc_cache_update(&nametoid_cache, 455 struct cache_head *ch = sunrpc_cache_update(cd, &new->h, &old->h,
461 &new->h, &old->h,
462 nametoid_hash(new)); 456 nametoid_hash(new));
463 if (ch) 457 if (ch)
464 return container_of(ch, struct ent, h); 458 return container_of(ch, struct ent, h);
@@ -471,34 +465,55 @@ nametoid_update(struct ent *new, struct ent *old)
471 */ 465 */
472 466
473int 467int
474nfsd_idmap_init(void) 468nfsd_idmap_init(struct net *net)
475{ 469{
476 int rv; 470 int rv;
471 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
477 472
478 rv = cache_register_net(&idtoname_cache, &init_net); 473 nn->idtoname_cache = cache_create_net(&idtoname_cache_template, net);
474 if (IS_ERR(nn->idtoname_cache))
475 return PTR_ERR(nn->idtoname_cache);
476 rv = cache_register_net(nn->idtoname_cache, net);
479 if (rv) 477 if (rv)
480 return rv; 478 goto destroy_idtoname_cache;
481 rv = cache_register_net(&nametoid_cache, &init_net); 479 nn->nametoid_cache = cache_create_net(&nametoid_cache_template, net);
480 if (IS_ERR(nn->nametoid_cache)) {
481 rv = PTR_ERR(nn->idtoname_cache);
482 goto unregister_idtoname_cache;
483 }
484 rv = cache_register_net(nn->nametoid_cache, net);
482 if (rv) 485 if (rv)
483 cache_unregister_net(&idtoname_cache, &init_net); 486 goto destroy_nametoid_cache;
487 return 0;
488
489destroy_nametoid_cache:
490 cache_destroy_net(nn->nametoid_cache, net);
491unregister_idtoname_cache:
492 cache_unregister_net(nn->idtoname_cache, net);
493destroy_idtoname_cache:
494 cache_destroy_net(nn->idtoname_cache, net);
484 return rv; 495 return rv;
485} 496}
486 497
487void 498void
488nfsd_idmap_shutdown(void) 499nfsd_idmap_shutdown(struct net *net)
489{ 500{
490 cache_unregister_net(&idtoname_cache, &init_net); 501 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
491 cache_unregister_net(&nametoid_cache, &init_net); 502
503 cache_unregister_net(nn->idtoname_cache, net);
504 cache_unregister_net(nn->nametoid_cache, net);
505 cache_destroy_net(nn->idtoname_cache, net);
506 cache_destroy_net(nn->nametoid_cache, net);
492} 507}
493 508
494static int 509static int
495idmap_lookup(struct svc_rqst *rqstp, 510idmap_lookup(struct svc_rqst *rqstp,
496 struct ent *(*lookup_fn)(struct ent *), struct ent *key, 511 struct ent *(*lookup_fn)(struct cache_detail *, struct ent *),
497 struct cache_detail *detail, struct ent **item) 512 struct ent *key, struct cache_detail *detail, struct ent **item)
498{ 513{
499 int ret; 514 int ret;
500 515
501 *item = lookup_fn(key); 516 *item = lookup_fn(detail, key);
502 if (!*item) 517 if (!*item)
503 return -ENOMEM; 518 return -ENOMEM;
504 retry: 519 retry:
@@ -506,7 +521,7 @@ idmap_lookup(struct svc_rqst *rqstp,
506 521
507 if (ret == -ETIMEDOUT) { 522 if (ret == -ETIMEDOUT) {
508 struct ent *prev_item = *item; 523 struct ent *prev_item = *item;
509 *item = lookup_fn(key); 524 *item = lookup_fn(detail, key);
510 if (*item != prev_item) 525 if (*item != prev_item)
511 goto retry; 526 goto retry;
512 cache_put(&(*item)->h, detail); 527 cache_put(&(*item)->h, detail);
@@ -531,19 +546,20 @@ idmap_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen
531 .type = type, 546 .type = type,
532 }; 547 };
533 int ret; 548 int ret;
549 struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
534 550
535 if (namelen + 1 > sizeof(key.name)) 551 if (namelen + 1 > sizeof(key.name))
536 return nfserr_badowner; 552 return nfserr_badowner;
537 memcpy(key.name, name, namelen); 553 memcpy(key.name, name, namelen);
538 key.name[namelen] = '\0'; 554 key.name[namelen] = '\0';
539 strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname)); 555 strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
540 ret = idmap_lookup(rqstp, nametoid_lookup, &key, &nametoid_cache, &item); 556 ret = idmap_lookup(rqstp, nametoid_lookup, &key, nn->nametoid_cache, &item);
541 if (ret == -ENOENT) 557 if (ret == -ENOENT)
542 return nfserr_badowner; 558 return nfserr_badowner;
543 if (ret) 559 if (ret)
544 return nfserrno(ret); 560 return nfserrno(ret);
545 *id = item->id; 561 *id = item->id;
546 cache_put(&item->h, &nametoid_cache); 562 cache_put(&item->h, nn->nametoid_cache);
547 return 0; 563 return 0;
548} 564}
549 565
@@ -555,9 +571,10 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
555 .type = type, 571 .type = type,
556 }; 572 };
557 int ret; 573 int ret;
574 struct nfsd_net *nn = net_generic(rqstp->rq_xprt->xpt_net, nfsd_net_id);
558 575
559 strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname)); 576 strlcpy(key.authname, rqst_authname(rqstp), sizeof(key.authname));
560 ret = idmap_lookup(rqstp, idtoname_lookup, &key, &idtoname_cache, &item); 577 ret = idmap_lookup(rqstp, idtoname_lookup, &key, nn->idtoname_cache, &item);
561 if (ret == -ENOENT) 578 if (ret == -ENOENT)
562 return sprintf(name, "%u", id); 579 return sprintf(name, "%u", id);
563 if (ret) 580 if (ret)
@@ -565,7 +582,7 @@ idmap_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
565 ret = strlen(item->name); 582 ret = strlen(item->name);
566 BUG_ON(ret > IDMAP_NAMESZ); 583 BUG_ON(ret > IDMAP_NAMESZ);
567 memcpy(name, item->name, ret); 584 memcpy(name, item->name, ret);
568 cache_put(&item->h, &idtoname_cache); 585 cache_put(&item->h, nn->idtoname_cache);
569 return ret; 586 return ret;
570} 587}
571 588
@@ -588,7 +605,7 @@ numeric_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namel
588static __be32 605static __be32
589do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id) 606do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, uid_t *id)
590{ 607{
591 if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS) 608 if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
592 if (numeric_name_to_id(rqstp, type, name, namelen, id)) 609 if (numeric_name_to_id(rqstp, type, name, namelen, id))
593 return 0; 610 return 0;
594 /* 611 /*
@@ -601,7 +618,7 @@ do_name_to_id(struct svc_rqst *rqstp, int type, const char *name, u32 namelen, u
601static int 618static int
602do_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name) 619do_id_to_name(struct svc_rqst *rqstp, int type, uid_t id, char *name)
603{ 620{
604 if (nfs4_disable_idmapping && rqstp->rq_flavor < RPC_AUTH_GSS) 621 if (nfs4_disable_idmapping && rqstp->rq_cred.cr_flavor < RPC_AUTH_GSS)
605 return sprintf(name, "%u", id); 622 return sprintf(name, "%u", id);
606 return idmap_id_to_name(rqstp, type, id, name); 623 return idmap_id_to_name(rqstp, type, id, name);
607} 624}
diff --git a/fs/nfsd/nfs4recover.c b/fs/nfsd/nfs4recover.c
index ed3f9206a0ee..5ff0b7b9fc08 100644
--- a/fs/nfsd/nfs4recover.c
+++ b/fs/nfsd/nfs4recover.c
@@ -570,7 +570,7 @@ static ssize_t
570cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen) 570cld_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
571{ 571{
572 struct cld_upcall *tmp, *cup; 572 struct cld_upcall *tmp, *cup;
573 struct cld_msg *cmsg = (struct cld_msg *)src; 573 struct cld_msg __user *cmsg = (struct cld_msg __user *)src;
574 uint32_t xid; 574 uint32_t xid;
575 struct nfsd_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info, 575 struct nfsd_net *nn = net_generic(filp->f_dentry->d_sb->s_fs_info,
576 nfsd_net_id); 576 nfsd_net_id);
@@ -1029,7 +1029,7 @@ rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr)
1029 return ret; 1029 return ret;
1030} 1030}
1031 1031
1032struct notifier_block nfsd4_cld_block = { 1032static struct notifier_block nfsd4_cld_block = {
1033 .notifier_call = rpc_pipefs_event, 1033 .notifier_call = rpc_pipefs_event,
1034}; 1034};
1035 1035
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 7f71c69cdcdf..94effd5bc4a1 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -42,6 +42,7 @@
42#include <linux/sunrpc/clnt.h> 42#include <linux/sunrpc/clnt.h>
43#include "xdr4.h" 43#include "xdr4.h"
44#include "vfs.h" 44#include "vfs.h"
45#include "current_stateid.h"
45 46
46#define NFSDDBG_FACILITY NFSDDBG_PROC 47#define NFSDDBG_FACILITY NFSDDBG_PROC
47 48
@@ -447,37 +448,69 @@ static struct list_head close_lru;
447 * 448 *
448 * which we should reject. 449 * which we should reject.
449 */ 450 */
450static void 451static unsigned int
451set_access(unsigned int *access, unsigned long bmap) { 452bmap_to_share_mode(unsigned long bmap) {
452 int i; 453 int i;
454 unsigned int access = 0;
453 455
454 *access = 0;
455 for (i = 1; i < 4; i++) { 456 for (i = 1; i < 4; i++) {
456 if (test_bit(i, &bmap)) 457 if (test_bit(i, &bmap))
457 *access |= i; 458 access |= i;
458 }
459}
460
461static void
462set_deny(unsigned int *deny, unsigned long bmap) {
463 int i;
464
465 *deny = 0;
466 for (i = 0; i < 4; i++) {
467 if (test_bit(i, &bmap))
468 *deny |= i ;
469 } 459 }
460 return access;
470} 461}
471 462
472static int 463static bool
473test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) { 464test_share(struct nfs4_ol_stateid *stp, struct nfsd4_open *open) {
474 unsigned int access, deny; 465 unsigned int access, deny;
475 466
476 set_access(&access, stp->st_access_bmap); 467 access = bmap_to_share_mode(stp->st_access_bmap);
477 set_deny(&deny, stp->st_deny_bmap); 468 deny = bmap_to_share_mode(stp->st_deny_bmap);
478 if ((access & open->op_share_deny) || (deny & open->op_share_access)) 469 if ((access & open->op_share_deny) || (deny & open->op_share_access))
479 return 0; 470 return false;
480 return 1; 471 return true;
472}
473
474/* set share access for a given stateid */
475static inline void
476set_access(u32 access, struct nfs4_ol_stateid *stp)
477{
478 __set_bit(access, &stp->st_access_bmap);
479}
480
481/* clear share access for a given stateid */
482static inline void
483clear_access(u32 access, struct nfs4_ol_stateid *stp)
484{
485 __clear_bit(access, &stp->st_access_bmap);
486}
487
488/* test whether a given stateid has access */
489static inline bool
490test_access(u32 access, struct nfs4_ol_stateid *stp)
491{
492 return test_bit(access, &stp->st_access_bmap);
493}
494
495/* set share deny for a given stateid */
496static inline void
497set_deny(u32 access, struct nfs4_ol_stateid *stp)
498{
499 __set_bit(access, &stp->st_deny_bmap);
500}
501
502/* clear share deny for a given stateid */
503static inline void
504clear_deny(u32 access, struct nfs4_ol_stateid *stp)
505{
506 __clear_bit(access, &stp->st_deny_bmap);
507}
508
509/* test whether a given stateid is denying specific access */
510static inline bool
511test_deny(u32 access, struct nfs4_ol_stateid *stp)
512{
513 return test_bit(access, &stp->st_deny_bmap);
481} 514}
482 515
483static int nfs4_access_to_omode(u32 access) 516static int nfs4_access_to_omode(u32 access)
@@ -493,6 +526,20 @@ static int nfs4_access_to_omode(u32 access)
493 BUG(); 526 BUG();
494} 527}
495 528
529/* release all access and file references for a given stateid */
530static void
531release_all_access(struct nfs4_ol_stateid *stp)
532{
533 int i;
534
535 for (i = 1; i < 4; i++) {
536 if (test_access(i, stp))
537 nfs4_file_put_access(stp->st_file,
538 nfs4_access_to_omode(i));
539 clear_access(i, stp);
540 }
541}
542
496static void unhash_generic_stateid(struct nfs4_ol_stateid *stp) 543static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
497{ 544{
498 list_del(&stp->st_perfile); 545 list_del(&stp->st_perfile);
@@ -501,16 +548,7 @@ static void unhash_generic_stateid(struct nfs4_ol_stateid *stp)
501 548
502static void close_generic_stateid(struct nfs4_ol_stateid *stp) 549static void close_generic_stateid(struct nfs4_ol_stateid *stp)
503{ 550{
504 int i; 551 release_all_access(stp);
505
506 if (stp->st_access_bmap) {
507 for (i = 1; i < 4; i++) {
508 if (test_bit(i, &stp->st_access_bmap))
509 nfs4_file_put_access(stp->st_file,
510 nfs4_access_to_omode(i));
511 __clear_bit(i, &stp->st_access_bmap);
512 }
513 }
514 put_nfs4_file(stp->st_file); 552 put_nfs4_file(stp->st_file);
515 stp->st_file = NULL; 553 stp->st_file = NULL;
516} 554}
@@ -862,7 +900,7 @@ static void free_session(struct kref *kref)
862 struct nfsd4_session *ses; 900 struct nfsd4_session *ses;
863 int mem; 901 int mem;
864 902
865 BUG_ON(!spin_is_locked(&client_lock)); 903 lockdep_assert_held(&client_lock);
866 ses = container_of(kref, struct nfsd4_session, se_ref); 904 ses = container_of(kref, struct nfsd4_session, se_ref);
867 nfsd4_del_conns(ses); 905 nfsd4_del_conns(ses);
868 spin_lock(&nfsd_drc_lock); 906 spin_lock(&nfsd_drc_lock);
@@ -885,7 +923,7 @@ static struct nfsd4_session *alloc_init_session(struct svc_rqst *rqstp, struct n
885 struct nfsd4_session *new; 923 struct nfsd4_session *new;
886 struct nfsd4_channel_attrs *fchan = &cses->fore_channel; 924 struct nfsd4_channel_attrs *fchan = &cses->fore_channel;
887 int numslots, slotsize; 925 int numslots, slotsize;
888 int status; 926 __be32 status;
889 int idx; 927 int idx;
890 928
891 /* 929 /*
@@ -984,7 +1022,8 @@ static inline void
984renew_client_locked(struct nfs4_client *clp) 1022renew_client_locked(struct nfs4_client *clp)
985{ 1023{
986 if (is_client_expired(clp)) { 1024 if (is_client_expired(clp)) {
987 dprintk("%s: client (clientid %08x/%08x) already expired\n", 1025 WARN_ON(1);
1026 printk("%s: client (clientid %08x/%08x) already expired\n",
988 __func__, 1027 __func__,
989 clp->cl_clientid.cl_boot, 1028 clp->cl_clientid.cl_boot,
990 clp->cl_clientid.cl_id); 1029 clp->cl_clientid.cl_id);
@@ -1041,7 +1080,7 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
1041static inline void 1080static inline void
1042free_client(struct nfs4_client *clp) 1081free_client(struct nfs4_client *clp)
1043{ 1082{
1044 BUG_ON(!spin_is_locked(&client_lock)); 1083 lockdep_assert_held(&client_lock);
1045 while (!list_empty(&clp->cl_sessions)) { 1084 while (!list_empty(&clp->cl_sessions)) {
1046 struct nfsd4_session *ses; 1085 struct nfsd4_session *ses;
1047 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session, 1086 ses = list_entry(clp->cl_sessions.next, struct nfsd4_session,
@@ -1049,9 +1088,7 @@ free_client(struct nfs4_client *clp)
1049 list_del(&ses->se_perclnt); 1088 list_del(&ses->se_perclnt);
1050 nfsd4_put_session_locked(ses); 1089 nfsd4_put_session_locked(ses);
1051 } 1090 }
1052 if (clp->cl_cred.cr_group_info) 1091 free_svc_cred(&clp->cl_cred);
1053 put_group_info(clp->cl_cred.cr_group_info);
1054 kfree(clp->cl_principal);
1055 kfree(clp->cl_name.data); 1092 kfree(clp->cl_name.data);
1056 kfree(clp); 1093 kfree(clp);
1057} 1094}
@@ -1132,12 +1169,21 @@ static void copy_clid(struct nfs4_client *target, struct nfs4_client *source)
1132 target->cl_clientid.cl_id = source->cl_clientid.cl_id; 1169 target->cl_clientid.cl_id = source->cl_clientid.cl_id;
1133} 1170}
1134 1171
1135static void copy_cred(struct svc_cred *target, struct svc_cred *source) 1172static int copy_cred(struct svc_cred *target, struct svc_cred *source)
1136{ 1173{
1174 if (source->cr_principal) {
1175 target->cr_principal =
1176 kstrdup(source->cr_principal, GFP_KERNEL);
1177 if (target->cr_principal == NULL)
1178 return -ENOMEM;
1179 } else
1180 target->cr_principal = NULL;
1181 target->cr_flavor = source->cr_flavor;
1137 target->cr_uid = source->cr_uid; 1182 target->cr_uid = source->cr_uid;
1138 target->cr_gid = source->cr_gid; 1183 target->cr_gid = source->cr_gid;
1139 target->cr_group_info = source->cr_group_info; 1184 target->cr_group_info = source->cr_group_info;
1140 get_group_info(target->cr_group_info); 1185 get_group_info(target->cr_group_info);
1186 return 0;
1141} 1187}
1142 1188
1143static int same_name(const char *n1, const char *n2) 1189static int same_name(const char *n1, const char *n2)
@@ -1157,11 +1203,31 @@ same_clid(clientid_t *cl1, clientid_t *cl2)
1157 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id); 1203 return (cl1->cl_boot == cl2->cl_boot) && (cl1->cl_id == cl2->cl_id);
1158} 1204}
1159 1205
1160/* XXX what about NGROUP */ 1206static bool groups_equal(struct group_info *g1, struct group_info *g2)
1207{
1208 int i;
1209
1210 if (g1->ngroups != g2->ngroups)
1211 return false;
1212 for (i=0; i<g1->ngroups; i++)
1213 if (GROUP_AT(g1, i) != GROUP_AT(g2, i))
1214 return false;
1215 return true;
1216}
1217
1161static int 1218static int
1162same_creds(struct svc_cred *cr1, struct svc_cred *cr2) 1219same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
1163{ 1220{
1164 return cr1->cr_uid == cr2->cr_uid; 1221 if ((cr1->cr_flavor != cr2->cr_flavor)
1222 || (cr1->cr_uid != cr2->cr_uid)
1223 || (cr1->cr_gid != cr2->cr_gid)
1224 || !groups_equal(cr1->cr_group_info, cr2->cr_group_info))
1225 return false;
1226 if (cr1->cr_principal == cr2->cr_principal)
1227 return true;
1228 if (!cr1->cr_principal || !cr2->cr_principal)
1229 return false;
1230 return 0 == strcmp(cr1->cr_principal, cr1->cr_principal);
1165} 1231}
1166 1232
1167static void gen_clid(struct nfs4_client *clp) 1233static void gen_clid(struct nfs4_client *clp)
@@ -1204,25 +1270,20 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
1204{ 1270{
1205 struct nfs4_client *clp; 1271 struct nfs4_client *clp;
1206 struct sockaddr *sa = svc_addr(rqstp); 1272 struct sockaddr *sa = svc_addr(rqstp);
1207 char *princ; 1273 int ret;
1208 1274
1209 clp = alloc_client(name); 1275 clp = alloc_client(name);
1210 if (clp == NULL) 1276 if (clp == NULL)
1211 return NULL; 1277 return NULL;
1212 1278
1213 INIT_LIST_HEAD(&clp->cl_sessions); 1279 INIT_LIST_HEAD(&clp->cl_sessions);
1214 1280 ret = copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1215 princ = svc_gss_principal(rqstp); 1281 if (ret) {
1216 if (princ) { 1282 spin_lock(&client_lock);
1217 clp->cl_principal = kstrdup(princ, GFP_KERNEL); 1283 free_client(clp);
1218 if (clp->cl_principal == NULL) { 1284 spin_unlock(&client_lock);
1219 spin_lock(&client_lock); 1285 return NULL;
1220 free_client(clp);
1221 spin_unlock(&client_lock);
1222 return NULL;
1223 }
1224 } 1286 }
1225
1226 idr_init(&clp->cl_stateids); 1287 idr_init(&clp->cl_stateids);
1227 memcpy(clp->cl_recdir, recdir, HEXDIR_LEN); 1288 memcpy(clp->cl_recdir, recdir, HEXDIR_LEN);
1228 atomic_set(&clp->cl_refcount, 0); 1289 atomic_set(&clp->cl_refcount, 0);
@@ -1240,8 +1301,6 @@ static struct nfs4_client *create_client(struct xdr_netobj name, char *recdir,
1240 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table"); 1301 rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
1241 copy_verf(clp, verf); 1302 copy_verf(clp, verf);
1242 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa); 1303 rpc_copy_addr((struct sockaddr *) &clp->cl_addr, sa);
1243 clp->cl_flavor = rqstp->rq_flavor;
1244 copy_cred(&clp->cl_cred, &rqstp->rq_cred);
1245 gen_confirm(clp); 1304 gen_confirm(clp);
1246 clp->cl_cb_session = NULL; 1305 clp->cl_cb_session = NULL;
1247 return clp; 1306 return clp;
@@ -1470,18 +1529,32 @@ nfsd4_set_ex_flags(struct nfs4_client *new, struct nfsd4_exchange_id *clid)
1470 clid->flags = new->cl_exchange_flags; 1529 clid->flags = new->cl_exchange_flags;
1471} 1530}
1472 1531
1532static bool client_has_state(struct nfs4_client *clp)
1533{
1534 /*
1535 * Note clp->cl_openowners check isn't quite right: there's no
1536 * need to count owners without stateid's.
1537 *
1538 * Also note we should probably be using this in 4.0 case too.
1539 */
1540 return !list_empty(&clp->cl_openowners)
1541 || !list_empty(&clp->cl_delegations)
1542 || !list_empty(&clp->cl_sessions);
1543}
1544
1473__be32 1545__be32
1474nfsd4_exchange_id(struct svc_rqst *rqstp, 1546nfsd4_exchange_id(struct svc_rqst *rqstp,
1475 struct nfsd4_compound_state *cstate, 1547 struct nfsd4_compound_state *cstate,
1476 struct nfsd4_exchange_id *exid) 1548 struct nfsd4_exchange_id *exid)
1477{ 1549{
1478 struct nfs4_client *unconf, *conf, *new; 1550 struct nfs4_client *unconf, *conf, *new;
1479 int status; 1551 __be32 status;
1480 unsigned int strhashval; 1552 unsigned int strhashval;
1481 char dname[HEXDIR_LEN]; 1553 char dname[HEXDIR_LEN];
1482 char addr_str[INET6_ADDRSTRLEN]; 1554 char addr_str[INET6_ADDRSTRLEN];
1483 nfs4_verifier verf = exid->verifier; 1555 nfs4_verifier verf = exid->verifier;
1484 struct sockaddr *sa = svc_addr(rqstp); 1556 struct sockaddr *sa = svc_addr(rqstp);
1557 bool update = exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A;
1485 1558
1486 rpc_ntop(sa, addr_str, sizeof(addr_str)); 1559 rpc_ntop(sa, addr_str, sizeof(addr_str));
1487 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p " 1560 dprintk("%s rqstp=%p exid=%p clname.len=%u clname.data=%p "
@@ -1507,71 +1580,63 @@ nfsd4_exchange_id(struct svc_rqst *rqstp,
1507 status = nfs4_make_rec_clidname(dname, &exid->clname); 1580 status = nfs4_make_rec_clidname(dname, &exid->clname);
1508 1581
1509 if (status) 1582 if (status)
1510 goto error; 1583 return status;
1511 1584
1512 strhashval = clientstr_hashval(dname); 1585 strhashval = clientstr_hashval(dname);
1513 1586
1587 /* Cases below refer to rfc 5661 section 18.35.4: */
1514 nfs4_lock_state(); 1588 nfs4_lock_state();
1515 status = nfs_ok;
1516
1517 conf = find_confirmed_client_by_str(dname, strhashval); 1589 conf = find_confirmed_client_by_str(dname, strhashval);
1518 if (conf) { 1590 if (conf) {
1519 if (!clp_used_exchangeid(conf)) { 1591 bool creds_match = same_creds(&conf->cl_cred, &rqstp->rq_cred);
1520 status = nfserr_clid_inuse; /* XXX: ? */ 1592 bool verfs_match = same_verf(&verf, &conf->cl_verifier);
1521 goto out; 1593
1522 } 1594 if (update) {
1523 if (!same_verf(&verf, &conf->cl_verifier)) { 1595 if (!clp_used_exchangeid(conf)) { /* buggy client */
1524 /* 18.35.4 case 8 */ 1596 status = nfserr_inval;
1525 if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) { 1597 goto out;
1598 }
1599 if (!creds_match) { /* case 9 */
1600 status = nfserr_perm;
1601 goto out;
1602 }
1603 if (!verfs_match) { /* case 8 */
1526 status = nfserr_not_same; 1604 status = nfserr_not_same;
1527 goto out; 1605 goto out;
1528 } 1606 }
1529 /* Client reboot: destroy old state */ 1607 /* case 6 */
1530 expire_client(conf); 1608 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R;
1531 goto out_new; 1609 new = conf;
1610 goto out_copy;
1532 } 1611 }
1533 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred)) { 1612 if (!creds_match) { /* case 3 */
1534 /* 18.35.4 case 9 */ 1613 if (client_has_state(conf)) {
1535 if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) { 1614 status = nfserr_clid_inuse;
1536 status = nfserr_perm;
1537 goto out; 1615 goto out;
1538 } 1616 }
1539 expire_client(conf); 1617 expire_client(conf);
1540 goto out_new; 1618 goto out_new;
1541 } 1619 }
1542 /* 1620 if (verfs_match) { /* case 2 */
1543 * Set bit when the owner id and verifier map to an already 1621 conf->cl_exchange_flags |= EXCHGID4_FLAG_CONFIRMED_R;
1544 * confirmed client id (18.35.3). 1622 new = conf;
1545 */ 1623 goto out_copy;
1546 exid->flags |= EXCHGID4_FLAG_CONFIRMED_R; 1624 }
1547 1625 /* case 5, client reboot */
1548 /* 1626 goto out_new;
1549 * Falling into 18.35.4 case 2, possible router replay.
1550 * Leave confirmed record intact and return same result.
1551 */
1552 copy_verf(conf, &verf);
1553 new = conf;
1554 goto out_copy;
1555 } 1627 }
1556 1628
1557 /* 18.35.4 case 7 */ 1629 if (update) { /* case 7 */
1558 if (exid->flags & EXCHGID4_FLAG_UPD_CONFIRMED_REC_A) {
1559 status = nfserr_noent; 1630 status = nfserr_noent;
1560 goto out; 1631 goto out;
1561 } 1632 }
1562 1633
1563 unconf = find_unconfirmed_client_by_str(dname, strhashval); 1634 unconf = find_unconfirmed_client_by_str(dname, strhashval);
1564 if (unconf) { 1635 if (unconf) /* case 4, possible retry or client restart */
1565 /*
1566 * Possible retry or client restart. Per 18.35.4 case 4,
1567 * a new unconfirmed record should be generated regardless
1568 * of whether any properties have changed.
1569 */
1570 expire_client(unconf); 1636 expire_client(unconf);
1571 }
1572 1637
1638 /* case 1 (normal case) */
1573out_new: 1639out_new:
1574 /* Normal case */
1575 new = create_client(exid->clname, dname, rqstp, &verf); 1640 new = create_client(exid->clname, dname, rqstp, &verf);
1576 if (new == NULL) { 1641 if (new == NULL) {
1577 status = nfserr_jukebox; 1642 status = nfserr_jukebox;
@@ -1584,7 +1649,7 @@ out_copy:
1584 exid->clientid.cl_boot = new->cl_clientid.cl_boot; 1649 exid->clientid.cl_boot = new->cl_clientid.cl_boot;
1585 exid->clientid.cl_id = new->cl_clientid.cl_id; 1650 exid->clientid.cl_id = new->cl_clientid.cl_id;
1586 1651
1587 exid->seqid = 1; 1652 exid->seqid = new->cl_cs_slot.sl_seqid + 1;
1588 nfsd4_set_ex_flags(new, exid); 1653 nfsd4_set_ex_flags(new, exid);
1589 1654
1590 dprintk("nfsd4_exchange_id seqid %d flags %x\n", 1655 dprintk("nfsd4_exchange_id seqid %d flags %x\n",
@@ -1593,12 +1658,10 @@ out_copy:
1593 1658
1594out: 1659out:
1595 nfs4_unlock_state(); 1660 nfs4_unlock_state();
1596error:
1597 dprintk("nfsd4_exchange_id returns %d\n", ntohl(status));
1598 return status; 1661 return status;
1599} 1662}
1600 1663
1601static int 1664static __be32
1602check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse) 1665check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1603{ 1666{
1604 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid, 1667 dprintk("%s enter. seqid %d slot_seqid %d\n", __func__, seqid,
@@ -1626,7 +1689,7 @@ check_slot_seqid(u32 seqid, u32 slot_seqid, int slot_inuse)
1626 */ 1689 */
1627static void 1690static void
1628nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses, 1691nfsd4_cache_create_session(struct nfsd4_create_session *cr_ses,
1629 struct nfsd4_clid_slot *slot, int nfserr) 1692 struct nfsd4_clid_slot *slot, __be32 nfserr)
1630{ 1693{
1631 slot->sl_status = nfserr; 1694 slot->sl_status = nfserr;
1632 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses)); 1695 memcpy(&slot->sl_cr_ses, cr_ses, sizeof(*cr_ses));
@@ -1657,7 +1720,7 @@ nfsd4_replay_create_session(struct nfsd4_create_session *cr_ses,
1657 /* seqid, slotID, slotID, slotID, status */ \ 1720 /* seqid, slotID, slotID, slotID, status */ \
1658 5 ) * sizeof(__be32)) 1721 5 ) * sizeof(__be32))
1659 1722
1660static __be32 check_forechannel_attrs(struct nfsd4_channel_attrs fchannel) 1723static bool check_forechannel_attrs(struct nfsd4_channel_attrs fchannel)
1661{ 1724{
1662 return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ 1725 return fchannel.maxreq_sz < NFSD_MIN_REQ_HDR_SEQ_SZ
1663 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ; 1726 || fchannel.maxresp_sz < NFSD_MIN_RESP_HDR_SEQ_SZ;
@@ -1673,7 +1736,7 @@ nfsd4_create_session(struct svc_rqst *rqstp,
1673 struct nfsd4_session *new; 1736 struct nfsd4_session *new;
1674 struct nfsd4_clid_slot *cs_slot = NULL; 1737 struct nfsd4_clid_slot *cs_slot = NULL;
1675 bool confirm_me = false; 1738 bool confirm_me = false;
1676 int status = 0; 1739 __be32 status = 0;
1677 1740
1678 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A) 1741 if (cr_ses->flags & ~SESSION4_FLAG_MASK_A)
1679 return nfserr_inval; 1742 return nfserr_inval;
@@ -1686,16 +1749,10 @@ nfsd4_create_session(struct svc_rqst *rqstp,
1686 cs_slot = &conf->cl_cs_slot; 1749 cs_slot = &conf->cl_cs_slot;
1687 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 1750 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1688 if (status == nfserr_replay_cache) { 1751 if (status == nfserr_replay_cache) {
1689 dprintk("Got a create_session replay! seqid= %d\n",
1690 cs_slot->sl_seqid);
1691 /* Return the cached reply status */
1692 status = nfsd4_replay_create_session(cr_ses, cs_slot); 1752 status = nfsd4_replay_create_session(cr_ses, cs_slot);
1693 goto out; 1753 goto out;
1694 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) { 1754 } else if (cr_ses->seqid != cs_slot->sl_seqid + 1) {
1695 status = nfserr_seq_misordered; 1755 status = nfserr_seq_misordered;
1696 dprintk("Sequence misordered!\n");
1697 dprintk("Expected seqid= %d but got seqid= %d\n",
1698 cs_slot->sl_seqid, cr_ses->seqid);
1699 goto out; 1756 goto out;
1700 } 1757 }
1701 } else if (unconf) { 1758 } else if (unconf) {
@@ -1704,7 +1761,6 @@ nfsd4_create_session(struct svc_rqst *rqstp,
1704 status = nfserr_clid_inuse; 1761 status = nfserr_clid_inuse;
1705 goto out; 1762 goto out;
1706 } 1763 }
1707
1708 cs_slot = &unconf->cl_cs_slot; 1764 cs_slot = &unconf->cl_cs_slot;
1709 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0); 1765 status = check_slot_seqid(cr_ses->seqid, cs_slot->sl_seqid, 0);
1710 if (status) { 1766 if (status) {
@@ -1712,7 +1768,6 @@ nfsd4_create_session(struct svc_rqst *rqstp,
1712 status = nfserr_seq_misordered; 1768 status = nfserr_seq_misordered;
1713 goto out; 1769 goto out;
1714 } 1770 }
1715
1716 confirm_me = true; 1771 confirm_me = true;
1717 conf = unconf; 1772 conf = unconf;
1718 } else { 1773 } else {
@@ -1749,8 +1804,14 @@ nfsd4_create_session(struct svc_rqst *rqstp,
1749 1804
1750 /* cache solo and embedded create sessions under the state lock */ 1805 /* cache solo and embedded create sessions under the state lock */
1751 nfsd4_cache_create_session(cr_ses, cs_slot, status); 1806 nfsd4_cache_create_session(cr_ses, cs_slot, status);
1752 if (confirm_me) 1807 if (confirm_me) {
1808 unsigned int hash = clientstr_hashval(unconf->cl_recdir);
1809 struct nfs4_client *old =
1810 find_confirmed_client_by_str(conf->cl_recdir, hash);
1811 if (old)
1812 expire_client(old);
1753 move_to_confirmed(conf); 1813 move_to_confirmed(conf);
1814 }
1754out: 1815out:
1755 nfs4_unlock_state(); 1816 nfs4_unlock_state();
1756 dprintk("%s returns %d\n", __func__, ntohl(status)); 1817 dprintk("%s returns %d\n", __func__, ntohl(status));
@@ -1818,7 +1879,7 @@ nfsd4_destroy_session(struct svc_rqst *r,
1818 struct nfsd4_destroy_session *sessionid) 1879 struct nfsd4_destroy_session *sessionid)
1819{ 1880{
1820 struct nfsd4_session *ses; 1881 struct nfsd4_session *ses;
1821 u32 status = nfserr_badsession; 1882 __be32 status = nfserr_badsession;
1822 1883
1823 /* Notes: 1884 /* Notes:
1824 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid 1885 * - The confirmed nfs4_client->cl_sessionid holds destroyed sessinid
@@ -1914,7 +1975,7 @@ nfsd4_sequence(struct svc_rqst *rqstp,
1914 struct nfsd4_session *session; 1975 struct nfsd4_session *session;
1915 struct nfsd4_slot *slot; 1976 struct nfsd4_slot *slot;
1916 struct nfsd4_conn *conn; 1977 struct nfsd4_conn *conn;
1917 int status; 1978 __be32 status;
1918 1979
1919 if (resp->opcnt != 1) 1980 if (resp->opcnt != 1)
1920 return nfserr_sequence_pos; 1981 return nfserr_sequence_pos;
@@ -2008,18 +2069,11 @@ out:
2008 return status; 2069 return status;
2009} 2070}
2010 2071
2011static inline bool has_resources(struct nfs4_client *clp)
2012{
2013 return !list_empty(&clp->cl_openowners)
2014 || !list_empty(&clp->cl_delegations)
2015 || !list_empty(&clp->cl_sessions);
2016}
2017
2018__be32 2072__be32
2019nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc) 2073nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_destroy_clientid *dc)
2020{ 2074{
2021 struct nfs4_client *conf, *unconf, *clp; 2075 struct nfs4_client *conf, *unconf, *clp;
2022 int status = 0; 2076 __be32 status = 0;
2023 2077
2024 nfs4_lock_state(); 2078 nfs4_lock_state();
2025 unconf = find_unconfirmed_client(&dc->clientid); 2079 unconf = find_unconfirmed_client(&dc->clientid);
@@ -2028,7 +2082,7 @@ nfsd4_destroy_clientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *csta
2028 if (conf) { 2082 if (conf) {
2029 clp = conf; 2083 clp = conf;
2030 2084
2031 if (!is_client_expired(conf) && has_resources(conf)) { 2085 if (!is_client_expired(conf) && client_has_state(conf)) {
2032 status = nfserr_clientid_busy; 2086 status = nfserr_clientid_busy;
2033 goto out; 2087 goto out;
2034 } 2088 }
@@ -2055,7 +2109,7 @@ out:
2055__be32 2109__be32
2056nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc) 2110nfsd4_reclaim_complete(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate, struct nfsd4_reclaim_complete *rc)
2057{ 2111{
2058 int status = 0; 2112 __be32 status = 0;
2059 2113
2060 if (rc->rca_one_fs) { 2114 if (rc->rca_one_fs) {
2061 if (!cstate->current_fh.fh_dentry) 2115 if (!cstate->current_fh.fh_dentry)
@@ -2106,17 +2160,13 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2106 if (status) 2160 if (status)
2107 return status; 2161 return status;
2108 2162
2109 /*
2110 * XXX The Duplicate Request Cache (DRC) has been checked (??)
2111 * We get here on a DRC miss.
2112 */
2113
2114 strhashval = clientstr_hashval(dname); 2163 strhashval = clientstr_hashval(dname);
2115 2164
2165 /* Cases below refer to rfc 3530 section 14.2.33: */
2116 nfs4_lock_state(); 2166 nfs4_lock_state();
2117 conf = find_confirmed_client_by_str(dname, strhashval); 2167 conf = find_confirmed_client_by_str(dname, strhashval);
2118 if (conf) { 2168 if (conf) {
2119 /* RFC 3530 14.2.33 CASE 0: */ 2169 /* case 0: */
2120 status = nfserr_clid_inuse; 2170 status = nfserr_clid_inuse;
2121 if (clp_used_exchangeid(conf)) 2171 if (clp_used_exchangeid(conf))
2122 goto out; 2172 goto out;
@@ -2129,63 +2179,18 @@ nfsd4_setclientid(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
2129 goto out; 2179 goto out;
2130 } 2180 }
2131 } 2181 }
2132 /*
2133 * section 14.2.33 of RFC 3530 (under the heading "IMPLEMENTATION")
2134 * has a description of SETCLIENTID request processing consisting
2135 * of 5 bullet points, labeled as CASE0 - CASE4 below.
2136 */
2137 unconf = find_unconfirmed_client_by_str(dname, strhashval); 2182 unconf = find_unconfirmed_client_by_str(dname, strhashval);
2183 if (unconf)
2184 expire_client(unconf);
2138 status = nfserr_jukebox; 2185 status = nfserr_jukebox;
2139 if (!conf) { 2186 new = create_client(clname, dname, rqstp, &clverifier);
2140 /* 2187 if (new == NULL)
2141 * RFC 3530 14.2.33 CASE 4: 2188 goto out;
2142 * placed first, because it is the normal case 2189 if (conf && same_verf(&conf->cl_verifier, &clverifier))
2143 */ 2190 /* case 1: probable callback update */
2144 if (unconf)
2145 expire_client(unconf);
2146 new = create_client(clname, dname, rqstp, &clverifier);
2147 if (new == NULL)
2148 goto out;
2149 gen_clid(new);
2150 } else if (same_verf(&conf->cl_verifier, &clverifier)) {
2151 /*
2152 * RFC 3530 14.2.33 CASE 1:
2153 * probable callback update
2154 */
2155 if (unconf) {
2156 /* Note this is removing unconfirmed {*x***},
2157 * which is stronger than RFC recommended {vxc**}.
2158 * This has the advantage that there is at most
2159 * one {*x***} in either list at any time.
2160 */
2161 expire_client(unconf);
2162 }
2163 new = create_client(clname, dname, rqstp, &clverifier);
2164 if (new == NULL)
2165 goto out;
2166 copy_clid(new, conf); 2191 copy_clid(new, conf);
2167 } else if (!unconf) { 2192 else /* case 4 (new client) or cases 2, 3 (client reboot): */
2168 /*
2169 * RFC 3530 14.2.33 CASE 2:
2170 * probable client reboot; state will be removed if
2171 * confirmed.
2172 */
2173 new = create_client(clname, dname, rqstp, &clverifier);
2174 if (new == NULL)
2175 goto out;
2176 gen_clid(new);
2177 } else {
2178 /*
2179 * RFC 3530 14.2.33 CASE 3:
2180 * probable client reboot; state will be removed if
2181 * confirmed.
2182 */
2183 expire_client(unconf);
2184 new = create_client(clname, dname, rqstp, &clverifier);
2185 if (new == NULL)
2186 goto out;
2187 gen_clid(new); 2193 gen_clid(new);
2188 }
2189 /* 2194 /*
2190 * XXX: we should probably set this at creation time, and check 2195 * XXX: we should probably set this at creation time, and check
2191 * for consistent minorversion use throughout: 2196 * for consistent minorversion use throughout:
@@ -2203,17 +2208,11 @@ out:
2203} 2208}
2204 2209
2205 2210
2206/*
2207 * Section 14.2.34 of RFC 3530 (under the heading "IMPLEMENTATION") has
2208 * a description of SETCLIENTID_CONFIRM request processing consisting of 4
2209 * bullets, labeled as CASE1 - CASE4 below.
2210 */
2211__be32 2211__be32
2212nfsd4_setclientid_confirm(struct svc_rqst *rqstp, 2212nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2213 struct nfsd4_compound_state *cstate, 2213 struct nfsd4_compound_state *cstate,
2214 struct nfsd4_setclientid_confirm *setclientid_confirm) 2214 struct nfsd4_setclientid_confirm *setclientid_confirm)
2215{ 2215{
2216 struct sockaddr *sa = svc_addr(rqstp);
2217 struct nfs4_client *conf, *unconf; 2216 struct nfs4_client *conf, *unconf;
2218 nfs4_verifier confirm = setclientid_confirm->sc_confirm; 2217 nfs4_verifier confirm = setclientid_confirm->sc_confirm;
2219 clientid_t * clid = &setclientid_confirm->sc_clientid; 2218 clientid_t * clid = &setclientid_confirm->sc_clientid;
@@ -2221,84 +2220,44 @@ nfsd4_setclientid_confirm(struct svc_rqst *rqstp,
2221 2220
2222 if (STALE_CLIENTID(clid)) 2221 if (STALE_CLIENTID(clid))
2223 return nfserr_stale_clientid; 2222 return nfserr_stale_clientid;
2224 /*
2225 * XXX The Duplicate Request Cache (DRC) has been checked (??)
2226 * We get here on a DRC miss.
2227 */
2228
2229 nfs4_lock_state(); 2223 nfs4_lock_state();
2230 2224
2231 conf = find_confirmed_client(clid); 2225 conf = find_confirmed_client(clid);
2232 unconf = find_unconfirmed_client(clid); 2226 unconf = find_unconfirmed_client(clid);
2233
2234 status = nfserr_clid_inuse;
2235 if (conf && !rpc_cmp_addr((struct sockaddr *) &conf->cl_addr, sa))
2236 goto out;
2237 if (unconf && !rpc_cmp_addr((struct sockaddr *) &unconf->cl_addr, sa))
2238 goto out;
2239
2240 /* 2227 /*
2241 * section 14.2.34 of RFC 3530 has a description of 2228 * We try hard to give out unique clientid's, so if we get an
2242 * SETCLIENTID_CONFIRM request processing consisting 2229 * attempt to confirm the same clientid with a different cred,
2243 * of 4 bullet points, labeled as CASE1 - CASE4 below. 2230 * there's a bug somewhere. Let's charitably assume it's our
2231 * bug.
2244 */ 2232 */
2245 if (conf && unconf && same_verf(&confirm, &unconf->cl_confirm)) { 2233 status = nfserr_serverfault;
2246 /* 2234 if (unconf && !same_creds(&unconf->cl_cred, &rqstp->rq_cred))
2247 * RFC 3530 14.2.34 CASE 1: 2235 goto out;
2248 * callback update 2236 if (conf && !same_creds(&conf->cl_cred, &rqstp->rq_cred))
2249 */ 2237 goto out;
2250 if (!same_creds(&conf->cl_cred, &unconf->cl_cred)) 2238 /* cases below refer to rfc 3530 section 14.2.34: */
2251 status = nfserr_clid_inuse; 2239 if (!unconf || !same_verf(&confirm, &unconf->cl_confirm)) {
2252 else { 2240 if (conf && !unconf) /* case 2: probable retransmit */
2253 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2254 nfsd4_probe_callback(conf);
2255 expire_client(unconf);
2256 status = nfs_ok; 2241 status = nfs_ok;
2242 else /* case 4: client hasn't noticed we rebooted yet? */
2243 status = nfserr_stale_clientid;
2244 goto out;
2245 }
2246 status = nfs_ok;
2247 if (conf) { /* case 1: callback update */
2248 nfsd4_change_callback(conf, &unconf->cl_cb_conn);
2249 nfsd4_probe_callback(conf);
2250 expire_client(unconf);
2251 } else { /* case 3: normal case; new or rebooted client */
2252 unsigned int hash = clientstr_hashval(unconf->cl_recdir);
2257 2253
2254 conf = find_confirmed_client_by_str(unconf->cl_recdir, hash);
2255 if (conf) {
2256 nfsd4_client_record_remove(conf);
2257 expire_client(conf);
2258 } 2258 }
2259 } else if (conf && !unconf) { 2259 move_to_confirmed(unconf);
2260 /* 2260 nfsd4_probe_callback(unconf);
2261 * RFC 3530 14.2.34 CASE 2:
2262 * probable retransmitted request; play it safe and
2263 * do nothing.
2264 */
2265 if (!same_creds(&conf->cl_cred, &rqstp->rq_cred))
2266 status = nfserr_clid_inuse;
2267 else
2268 status = nfs_ok;
2269 } else if (!conf && unconf
2270 && same_verf(&unconf->cl_confirm, &confirm)) {
2271 /*
2272 * RFC 3530 14.2.34 CASE 3:
2273 * Normal case; new or rebooted client:
2274 */
2275 if (!same_creds(&unconf->cl_cred, &rqstp->rq_cred)) {
2276 status = nfserr_clid_inuse;
2277 } else {
2278 unsigned int hash =
2279 clientstr_hashval(unconf->cl_recdir);
2280 conf = find_confirmed_client_by_str(unconf->cl_recdir,
2281 hash);
2282 if (conf) {
2283 nfsd4_client_record_remove(conf);
2284 expire_client(conf);
2285 }
2286 move_to_confirmed(unconf);
2287 conf = unconf;
2288 nfsd4_probe_callback(conf);
2289 status = nfs_ok;
2290 }
2291 } else if ((!conf || (conf && !same_verf(&conf->cl_confirm, &confirm)))
2292 && (!unconf || (unconf && !same_verf(&unconf->cl_confirm,
2293 &confirm)))) {
2294 /*
2295 * RFC 3530 14.2.34 CASE 4:
2296 * Client probably hasn't noticed that we rebooted yet.
2297 */
2298 status = nfserr_stale_clientid;
2299 } else {
2300 /* check that we have hit one of the cases...*/
2301 status = nfserr_clid_inuse;
2302 } 2261 }
2303out: 2262out:
2304 nfs4_unlock_state(); 2263 nfs4_unlock_state();
@@ -2454,8 +2413,8 @@ static void init_open_stateid(struct nfs4_ol_stateid *stp, struct nfs4_file *fp,
2454 stp->st_file = fp; 2413 stp->st_file = fp;
2455 stp->st_access_bmap = 0; 2414 stp->st_access_bmap = 0;
2456 stp->st_deny_bmap = 0; 2415 stp->st_deny_bmap = 0;
2457 __set_bit(open->op_share_access, &stp->st_access_bmap); 2416 set_access(open->op_share_access, stp);
2458 __set_bit(open->op_share_deny, &stp->st_deny_bmap); 2417 set_deny(open->op_share_deny, stp);
2459 stp->st_openstp = NULL; 2418 stp->st_openstp = NULL;
2460} 2419}
2461 2420
@@ -2534,8 +2493,8 @@ nfs4_share_conflict(struct svc_fh *current_fh, unsigned int deny_type)
2534 ret = nfserr_locked; 2493 ret = nfserr_locked;
2535 /* Search for conflicting share reservations */ 2494 /* Search for conflicting share reservations */
2536 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) { 2495 list_for_each_entry(stp, &fp->fi_stateids, st_perfile) {
2537 if (test_bit(deny_type, &stp->st_deny_bmap) || 2496 if (test_deny(deny_type, stp) ||
2538 test_bit(NFS4_SHARE_DENY_BOTH, &stp->st_deny_bmap)) 2497 test_deny(NFS4_SHARE_DENY_BOTH, stp))
2539 goto out; 2498 goto out;
2540 } 2499 }
2541 ret = nfs_ok; 2500 ret = nfs_ok;
@@ -2791,7 +2750,7 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
2791 bool new_access; 2750 bool new_access;
2792 __be32 status; 2751 __be32 status;
2793 2752
2794 new_access = !test_bit(op_share_access, &stp->st_access_bmap); 2753 new_access = !test_access(op_share_access, stp);
2795 if (new_access) { 2754 if (new_access) {
2796 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open); 2755 status = nfs4_get_vfs_file(rqstp, fp, cur_fh, open);
2797 if (status) 2756 if (status)
@@ -2806,8 +2765,8 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct nfs4_file *fp, struct svc_fh *c
2806 return status; 2765 return status;
2807 } 2766 }
2808 /* remember the open */ 2767 /* remember the open */
2809 __set_bit(op_share_access, &stp->st_access_bmap); 2768 set_access(op_share_access, stp);
2810 __set_bit(open->op_share_deny, &stp->st_deny_bmap); 2769 set_deny(open->op_share_deny, stp);
2811 2770
2812 return nfs_ok; 2771 return nfs_ok;
2813} 2772}
@@ -3155,10 +3114,17 @@ out:
3155static struct lock_manager nfsd4_manager = { 3114static struct lock_manager nfsd4_manager = {
3156}; 3115};
3157 3116
3117static bool grace_ended;
3118
3158static void 3119static void
3159nfsd4_end_grace(void) 3120nfsd4_end_grace(void)
3160{ 3121{
3122 /* do nothing if grace period already ended */
3123 if (grace_ended)
3124 return;
3125
3161 dprintk("NFSD: end of grace period\n"); 3126 dprintk("NFSD: end of grace period\n");
3127 grace_ended = true;
3162 nfsd4_record_grace_done(&init_net, boot_time); 3128 nfsd4_record_grace_done(&init_net, boot_time);
3163 locks_end_grace(&nfsd4_manager); 3129 locks_end_grace(&nfsd4_manager);
3164 /* 3130 /*
@@ -3183,8 +3149,7 @@ nfs4_laundromat(void)
3183 nfs4_lock_state(); 3149 nfs4_lock_state();
3184 3150
3185 dprintk("NFSD: laundromat service - starting\n"); 3151 dprintk("NFSD: laundromat service - starting\n");
3186 if (locks_in_grace()) 3152 nfsd4_end_grace();
3187 nfsd4_end_grace();
3188 INIT_LIST_HEAD(&reaplist); 3153 INIT_LIST_HEAD(&reaplist);
3189 spin_lock(&client_lock); 3154 spin_lock(&client_lock);
3190 list_for_each_safe(pos, next, &client_lru) { 3155 list_for_each_safe(pos, next, &client_lru) {
@@ -3276,18 +3241,18 @@ STALE_STATEID(stateid_t *stateid)
3276} 3241}
3277 3242
3278static inline int 3243static inline int
3279access_permit_read(unsigned long access_bmap) 3244access_permit_read(struct nfs4_ol_stateid *stp)
3280{ 3245{
3281 return test_bit(NFS4_SHARE_ACCESS_READ, &access_bmap) || 3246 return test_access(NFS4_SHARE_ACCESS_READ, stp) ||
3282 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap) || 3247 test_access(NFS4_SHARE_ACCESS_BOTH, stp) ||
3283 test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap); 3248 test_access(NFS4_SHARE_ACCESS_WRITE, stp);
3284} 3249}
3285 3250
3286static inline int 3251static inline int
3287access_permit_write(unsigned long access_bmap) 3252access_permit_write(struct nfs4_ol_stateid *stp)
3288{ 3253{
3289 return test_bit(NFS4_SHARE_ACCESS_WRITE, &access_bmap) || 3254 return test_access(NFS4_SHARE_ACCESS_WRITE, stp) ||
3290 test_bit(NFS4_SHARE_ACCESS_BOTH, &access_bmap); 3255 test_access(NFS4_SHARE_ACCESS_BOTH, stp);
3291} 3256}
3292 3257
3293static 3258static
@@ -3298,9 +3263,9 @@ __be32 nfs4_check_openmode(struct nfs4_ol_stateid *stp, int flags)
3298 /* For lock stateid's, we test the parent open, not the lock: */ 3263 /* For lock stateid's, we test the parent open, not the lock: */
3299 if (stp->st_openstp) 3264 if (stp->st_openstp)
3300 stp = stp->st_openstp; 3265 stp = stp->st_openstp;
3301 if ((flags & WR_STATE) && (!access_permit_write(stp->st_access_bmap))) 3266 if ((flags & WR_STATE) && !access_permit_write(stp))
3302 goto out; 3267 goto out;
3303 if ((flags & RD_STATE) && (!access_permit_read(stp->st_access_bmap))) 3268 if ((flags & RD_STATE) && !access_permit_read(stp))
3304 goto out; 3269 goto out;
3305 status = nfs_ok; 3270 status = nfs_ok;
3306out: 3271out:
@@ -3340,7 +3305,7 @@ static bool stateid_generation_after(stateid_t *a, stateid_t *b)
3340 return (s32)a->si_generation - (s32)b->si_generation > 0; 3305 return (s32)a->si_generation - (s32)b->si_generation > 0;
3341} 3306}
3342 3307
3343static int check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session) 3308static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_session)
3344{ 3309{
3345 /* 3310 /*
3346 * When sessions are used the stateid generation number is ignored 3311 * When sessions are used the stateid generation number is ignored
@@ -3649,10 +3614,10 @@ out:
3649 3614
3650static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access) 3615static inline void nfs4_stateid_downgrade_bit(struct nfs4_ol_stateid *stp, u32 access)
3651{ 3616{
3652 if (!test_bit(access, &stp->st_access_bmap)) 3617 if (!test_access(access, stp))
3653 return; 3618 return;
3654 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access)); 3619 nfs4_file_put_access(stp->st_file, nfs4_access_to_omode(access));
3655 __clear_bit(access, &stp->st_access_bmap); 3620 clear_access(access, stp);
3656} 3621}
3657 3622
3658static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access) 3623static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_access)
@@ -3674,12 +3639,12 @@ static inline void nfs4_stateid_downgrade(struct nfs4_ol_stateid *stp, u32 to_ac
3674} 3639}
3675 3640
3676static void 3641static void
3677reset_union_bmap_deny(unsigned long deny, unsigned long *bmap) 3642reset_union_bmap_deny(unsigned long deny, struct nfs4_ol_stateid *stp)
3678{ 3643{
3679 int i; 3644 int i;
3680 for (i = 0; i < 4; i++) { 3645 for (i = 0; i < 4; i++) {
3681 if ((i & deny) != i) 3646 if ((i & deny) != i)
3682 __clear_bit(i, bmap); 3647 clear_deny(i, stp);
3683 } 3648 }
3684} 3649}
3685 3650
@@ -3706,19 +3671,19 @@ nfsd4_open_downgrade(struct svc_rqst *rqstp,
3706 if (status) 3671 if (status)
3707 goto out; 3672 goto out;
3708 status = nfserr_inval; 3673 status = nfserr_inval;
3709 if (!test_bit(od->od_share_access, &stp->st_access_bmap)) { 3674 if (!test_access(od->od_share_access, stp)) {
3710 dprintk("NFSD:access not a subset current bitmap: 0x%lx, input access=%08x\n", 3675 dprintk("NFSD: access not a subset current bitmap: 0x%lx, input access=%08x\n",
3711 stp->st_access_bmap, od->od_share_access); 3676 stp->st_access_bmap, od->od_share_access);
3712 goto out; 3677 goto out;
3713 } 3678 }
3714 if (!test_bit(od->od_share_deny, &stp->st_deny_bmap)) { 3679 if (!test_deny(od->od_share_deny, stp)) {
3715 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n", 3680 dprintk("NFSD:deny not a subset current bitmap: 0x%lx, input deny=%08x\n",
3716 stp->st_deny_bmap, od->od_share_deny); 3681 stp->st_deny_bmap, od->od_share_deny);
3717 goto out; 3682 goto out;
3718 } 3683 }
3719 nfs4_stateid_downgrade(stp, od->od_share_access); 3684 nfs4_stateid_downgrade(stp, od->od_share_access);
3720 3685
3721 reset_union_bmap_deny(od->od_share_deny, &stp->st_deny_bmap); 3686 reset_union_bmap_deny(od->od_share_deny, stp);
3722 3687
3723 update_stateid(&stp->st_stid.sc_stateid); 3688 update_stateid(&stp->st_stid.sc_stateid);
3724 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t)); 3689 memcpy(&od->od_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
@@ -4008,13 +3973,13 @@ static void get_lock_access(struct nfs4_ol_stateid *lock_stp, u32 access)
4008 struct nfs4_file *fp = lock_stp->st_file; 3973 struct nfs4_file *fp = lock_stp->st_file;
4009 int oflag = nfs4_access_to_omode(access); 3974 int oflag = nfs4_access_to_omode(access);
4010 3975
4011 if (test_bit(access, &lock_stp->st_access_bmap)) 3976 if (test_access(access, lock_stp))
4012 return; 3977 return;
4013 nfs4_file_get_access(fp, oflag); 3978 nfs4_file_get_access(fp, oflag);
4014 __set_bit(access, &lock_stp->st_access_bmap); 3979 set_access(access, lock_stp);
4015} 3980}
4016 3981
4017__be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new) 3982static __be32 lookup_or_create_lock_state(struct nfsd4_compound_state *cstate, struct nfs4_ol_stateid *ost, struct nfsd4_lock *lock, struct nfs4_ol_stateid **lst, bool *new)
4018{ 3983{
4019 struct nfs4_file *fi = ost->st_file; 3984 struct nfs4_file *fi = ost->st_file;
4020 struct nfs4_openowner *oo = openowner(ost->st_stateowner); 3985 struct nfs4_openowner *oo = openowner(ost->st_stateowner);
@@ -4055,7 +4020,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4055 struct nfs4_openowner *open_sop = NULL; 4020 struct nfs4_openowner *open_sop = NULL;
4056 struct nfs4_lockowner *lock_sop = NULL; 4021 struct nfs4_lockowner *lock_sop = NULL;
4057 struct nfs4_ol_stateid *lock_stp; 4022 struct nfs4_ol_stateid *lock_stp;
4058 struct nfs4_file *fp;
4059 struct file *filp = NULL; 4023 struct file *filp = NULL;
4060 struct file_lock file_lock; 4024 struct file_lock file_lock;
4061 struct file_lock conflock; 4025 struct file_lock conflock;
@@ -4123,7 +4087,6 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
4123 goto out; 4087 goto out;
4124 } 4088 }
4125 lock_sop = lockowner(lock_stp->st_stateowner); 4089 lock_sop = lockowner(lock_stp->st_stateowner);
4126 fp = lock_stp->st_file;
4127 4090
4128 lkflg = setlkflg(lock->lk_type); 4091 lkflg = setlkflg(lock->lk_type);
4129 status = nfs4_check_openmode(lock_stp, lkflg); 4092 status = nfs4_check_openmode(lock_stp, lkflg);
@@ -4715,6 +4678,7 @@ nfs4_state_start(void)
4715 nfsd4_client_tracking_init(&init_net); 4678 nfsd4_client_tracking_init(&init_net);
4716 boot_time = get_seconds(); 4679 boot_time = get_seconds();
4717 locks_start_grace(&nfsd4_manager); 4680 locks_start_grace(&nfsd4_manager);
4681 grace_ended = false;
4718 printk(KERN_INFO "NFSD: starting %ld-second grace period\n", 4682 printk(KERN_INFO "NFSD: starting %ld-second grace period\n",
4719 nfsd4_grace); 4683 nfsd4_grace);
4720 ret = set_callback_cred(); 4684 ret = set_callback_cred();
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 74c00bc92b9a..4949667c84ea 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1674,12 +1674,12 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
1674 1674
1675static void write32(__be32 **p, u32 n) 1675static void write32(__be32 **p, u32 n)
1676{ 1676{
1677 *(*p)++ = n; 1677 *(*p)++ = htonl(n);
1678} 1678}
1679 1679
1680static void write64(__be32 **p, u64 n) 1680static void write64(__be32 **p, u64 n)
1681{ 1681{
1682 write32(p, (u32)(n >> 32)); 1682 write32(p, (n >> 32));
1683 write32(p, (u32)n); 1683 write32(p, (u32)n);
1684} 1684}
1685 1685
@@ -1744,15 +1744,16 @@ static void encode_seqid_op_tail(struct nfsd4_compoundres *resp, __be32 *save, _
1744} 1744}
1745 1745
1746/* Encode as an array of strings the string given with components 1746/* Encode as an array of strings the string given with components
1747 * separated @sep. 1747 * separated @sep, escaped with esc_enter and esc_exit.
1748 */ 1748 */
1749static __be32 nfsd4_encode_components(char sep, char *components, 1749static __be32 nfsd4_encode_components_esc(char sep, char *components,
1750 __be32 **pp, int *buflen) 1750 __be32 **pp, int *buflen,
1751 char esc_enter, char esc_exit)
1751{ 1752{
1752 __be32 *p = *pp; 1753 __be32 *p = *pp;
1753 __be32 *countp = p; 1754 __be32 *countp = p;
1754 int strlen, count=0; 1755 int strlen, count=0;
1755 char *str, *end; 1756 char *str, *end, *next;
1756 1757
1757 dprintk("nfsd4_encode_components(%s)\n", components); 1758 dprintk("nfsd4_encode_components(%s)\n", components);
1758 if ((*buflen -= 4) < 0) 1759 if ((*buflen -= 4) < 0)
@@ -1760,8 +1761,23 @@ static __be32 nfsd4_encode_components(char sep, char *components,
1760 WRITE32(0); /* We will fill this in with @count later */ 1761 WRITE32(0); /* We will fill this in with @count later */
1761 end = str = components; 1762 end = str = components;
1762 while (*end) { 1763 while (*end) {
1763 for (; *end && (*end != sep); end++) 1764 bool found_esc = false;
1764 ; /* Point to end of component */ 1765
1766 /* try to parse as esc_start, ..., esc_end, sep */
1767 if (*str == esc_enter) {
1768 for (; *end && (*end != esc_exit); end++)
1769 /* find esc_exit or end of string */;
1770 next = end + 1;
1771 if (*end && (!*next || *next == sep)) {
1772 str++;
1773 found_esc = true;
1774 }
1775 }
1776
1777 if (!found_esc)
1778 for (; *end && (*end != sep); end++)
1779 /* find sep or end of string */;
1780
1765 strlen = end - str; 1781 strlen = end - str;
1766 if (strlen) { 1782 if (strlen) {
1767 if ((*buflen -= ((XDR_QUADLEN(strlen) << 2) + 4)) < 0) 1783 if ((*buflen -= ((XDR_QUADLEN(strlen) << 2) + 4)) < 0)
@@ -1780,6 +1796,15 @@ static __be32 nfsd4_encode_components(char sep, char *components,
1780 return 0; 1796 return 0;
1781} 1797}
1782 1798
1799/* Encode as an array of strings the string given with components
1800 * separated @sep.
1801 */
1802static __be32 nfsd4_encode_components(char sep, char *components,
1803 __be32 **pp, int *buflen)
1804{
1805 return nfsd4_encode_components_esc(sep, components, pp, buflen, 0, 0);
1806}
1807
1783/* 1808/*
1784 * encode a location element of a fs_locations structure 1809 * encode a location element of a fs_locations structure
1785 */ 1810 */
@@ -1789,7 +1814,8 @@ static __be32 nfsd4_encode_fs_location4(struct nfsd4_fs_location *location,
1789 __be32 status; 1814 __be32 status;
1790 __be32 *p = *pp; 1815 __be32 *p = *pp;
1791 1816
1792 status = nfsd4_encode_components(':', location->hosts, &p, buflen); 1817 status = nfsd4_encode_components_esc(':', location->hosts, &p, buflen,
1818 '[', ']');
1793 if (status) 1819 if (status)
1794 return status; 1820 return status;
1795 status = nfsd4_encode_components('/', location->path, &p, buflen); 1821 status = nfsd4_encode_components('/', location->path, &p, buflen);
@@ -3251,7 +3277,7 @@ nfsd4_encode_write(struct nfsd4_compoundres *resp, __be32 nfserr, struct nfsd4_w
3251} 3277}
3252 3278
3253static __be32 3279static __be32
3254nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, int nfserr, 3280nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, __be32 nfserr,
3255 struct nfsd4_exchange_id *exid) 3281 struct nfsd4_exchange_id *exid)
3256{ 3282{
3257 __be32 *p; 3283 __be32 *p;
@@ -3306,7 +3332,7 @@ nfsd4_encode_exchange_id(struct nfsd4_compoundres *resp, int nfserr,
3306} 3332}
3307 3333
3308static __be32 3334static __be32
3309nfsd4_encode_create_session(struct nfsd4_compoundres *resp, int nfserr, 3335nfsd4_encode_create_session(struct nfsd4_compoundres *resp, __be32 nfserr,
3310 struct nfsd4_create_session *sess) 3336 struct nfsd4_create_session *sess)
3311{ 3337{
3312 __be32 *p; 3338 __be32 *p;
@@ -3355,14 +3381,14 @@ nfsd4_encode_create_session(struct nfsd4_compoundres *resp, int nfserr,
3355} 3381}
3356 3382
3357static __be32 3383static __be32
3358nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, int nfserr, 3384nfsd4_encode_destroy_session(struct nfsd4_compoundres *resp, __be32 nfserr,
3359 struct nfsd4_destroy_session *destroy_session) 3385 struct nfsd4_destroy_session *destroy_session)
3360{ 3386{
3361 return nfserr; 3387 return nfserr;
3362} 3388}
3363 3389
3364static __be32 3390static __be32
3365nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, int nfserr, 3391nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
3366 struct nfsd4_free_stateid *free_stateid) 3392 struct nfsd4_free_stateid *free_stateid)
3367{ 3393{
3368 __be32 *p; 3394 __be32 *p;
@@ -3371,13 +3397,13 @@ nfsd4_encode_free_stateid(struct nfsd4_compoundres *resp, int nfserr,
3371 return nfserr; 3397 return nfserr;
3372 3398
3373 RESERVE_SPACE(4); 3399 RESERVE_SPACE(4);
3374 WRITE32(nfserr); 3400 *p++ = nfserr;
3375 ADJUST_ARGS(); 3401 ADJUST_ARGS();
3376 return nfserr; 3402 return nfserr;
3377} 3403}
3378 3404
3379static __be32 3405static __be32
3380nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr, 3406nfsd4_encode_sequence(struct nfsd4_compoundres *resp, __be32 nfserr,
3381 struct nfsd4_sequence *seq) 3407 struct nfsd4_sequence *seq)
3382{ 3408{
3383 __be32 *p; 3409 __be32 *p;
@@ -3399,8 +3425,8 @@ nfsd4_encode_sequence(struct nfsd4_compoundres *resp, int nfserr,
3399 return 0; 3425 return 0;
3400} 3426}
3401 3427
3402__be32 3428static __be32
3403nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, int nfserr, 3429nfsd4_encode_test_stateid(struct nfsd4_compoundres *resp, __be32 nfserr,
3404 struct nfsd4_test_stateid *test_stateid) 3430 struct nfsd4_test_stateid *test_stateid)
3405{ 3431{
3406 struct nfsd4_test_stateid_id *stateid, *next; 3432 struct nfsd4_test_stateid_id *stateid, *next;
@@ -3503,7 +3529,7 @@ static nfsd4_enc nfsd4_enc_ops[] = {
3503 * Our se_fmaxresp_cached will always be a multiple of PAGE_SIZE, and so 3529 * Our se_fmaxresp_cached will always be a multiple of PAGE_SIZE, and so
3504 * will be at least a page and will therefore hold the xdr_buf head. 3530 * will be at least a page and will therefore hold the xdr_buf head.
3505 */ 3531 */
3506int nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad) 3532__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *resp, u32 pad)
3507{ 3533{
3508 struct xdr_buf *xb = &resp->rqstp->rq_res; 3534 struct xdr_buf *xb = &resp->rqstp->rq_res;
3509 struct nfsd4_session *session = NULL; 3535 struct nfsd4_session *session = NULL;
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 2c53be6d3579..c55298ed5772 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -127,7 +127,17 @@ static const struct file_operations transaction_ops = {
127 127
128static int exports_open(struct inode *inode, struct file *file) 128static int exports_open(struct inode *inode, struct file *file)
129{ 129{
130 return seq_open(file, &nfs_exports_op); 130 int err;
131 struct seq_file *seq;
132 struct nfsd_net *nn = net_generic(&init_net, nfsd_net_id);
133
134 err = seq_open(file, &nfs_exports_op);
135 if (err)
136 return err;
137
138 seq = file->private_data;
139 seq->private = nn->svc_export_cache;
140 return 0;
131} 141}
132 142
133static const struct file_operations exports_operations = { 143static const struct file_operations exports_operations = {
@@ -345,7 +355,7 @@ static ssize_t write_filehandle(struct file *file, char *buf, size_t size)
345 if (!dom) 355 if (!dom)
346 return -ENOMEM; 356 return -ENOMEM;
347 357
348 len = exp_rootfh(dom, path, &fh, maxsize); 358 len = exp_rootfh(&init_net, dom, path, &fh, maxsize);
349 auth_domain_put(dom); 359 auth_domain_put(dom);
350 if (len) 360 if (len)
351 return len; 361 return len;
@@ -651,6 +661,7 @@ static ssize_t __write_ports_addfd(char *buf)
651{ 661{
652 char *mesg = buf; 662 char *mesg = buf;
653 int fd, err; 663 int fd, err;
664 struct net *net = &init_net;
654 665
655 err = get_int(&mesg, &fd); 666 err = get_int(&mesg, &fd);
656 if (err != 0 || fd < 0) 667 if (err != 0 || fd < 0)
@@ -662,6 +673,8 @@ static ssize_t __write_ports_addfd(char *buf)
662 673
663 err = svc_addsock(nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT); 674 err = svc_addsock(nfsd_serv, fd, buf, SIMPLE_TRANSACTION_LIMIT);
664 if (err < 0) { 675 if (err < 0) {
676 if (nfsd_serv->sv_nrthreads == 1)
677 svc_shutdown_net(nfsd_serv, net);
665 svc_destroy(nfsd_serv); 678 svc_destroy(nfsd_serv);
666 return err; 679 return err;
667 } 680 }
@@ -699,6 +712,7 @@ static ssize_t __write_ports_addxprt(char *buf)
699 char transport[16]; 712 char transport[16];
700 struct svc_xprt *xprt; 713 struct svc_xprt *xprt;
701 int port, err; 714 int port, err;
715 struct net *net = &init_net;
702 716
703 if (sscanf(buf, "%15s %4u", transport, &port) != 2) 717 if (sscanf(buf, "%15s %4u", transport, &port) != 2)
704 return -EINVAL; 718 return -EINVAL;
@@ -710,12 +724,12 @@ static ssize_t __write_ports_addxprt(char *buf)
710 if (err != 0) 724 if (err != 0)
711 return err; 725 return err;
712 726
713 err = svc_create_xprt(nfsd_serv, transport, &init_net, 727 err = svc_create_xprt(nfsd_serv, transport, net,
714 PF_INET, port, SVC_SOCK_ANONYMOUS); 728 PF_INET, port, SVC_SOCK_ANONYMOUS);
715 if (err < 0) 729 if (err < 0)
716 goto out_err; 730 goto out_err;
717 731
718 err = svc_create_xprt(nfsd_serv, transport, &init_net, 732 err = svc_create_xprt(nfsd_serv, transport, net,
719 PF_INET6, port, SVC_SOCK_ANONYMOUS); 733 PF_INET6, port, SVC_SOCK_ANONYMOUS);
720 if (err < 0 && err != -EAFNOSUPPORT) 734 if (err < 0 && err != -EAFNOSUPPORT)
721 goto out_close; 735 goto out_close;
@@ -724,12 +738,14 @@ static ssize_t __write_ports_addxprt(char *buf)
724 nfsd_serv->sv_nrthreads--; 738 nfsd_serv->sv_nrthreads--;
725 return 0; 739 return 0;
726out_close: 740out_close:
727 xprt = svc_find_xprt(nfsd_serv, transport, &init_net, PF_INET, port); 741 xprt = svc_find_xprt(nfsd_serv, transport, net, PF_INET, port);
728 if (xprt != NULL) { 742 if (xprt != NULL) {
729 svc_close_xprt(xprt); 743 svc_close_xprt(xprt);
730 svc_xprt_put(xprt); 744 svc_xprt_put(xprt);
731 } 745 }
732out_err: 746out_err:
747 if (nfsd_serv->sv_nrthreads == 1)
748 svc_shutdown_net(nfsd_serv, net);
733 svc_destroy(nfsd_serv); 749 svc_destroy(nfsd_serv);
734 return err; 750 return err;
735} 751}
@@ -1127,7 +1143,34 @@ static int create_proc_exports_entry(void)
1127#endif 1143#endif
1128 1144
1129int nfsd_net_id; 1145int nfsd_net_id;
1146
1147static __net_init int nfsd_init_net(struct net *net)
1148{
1149 int retval;
1150
1151 retval = nfsd_export_init(net);
1152 if (retval)
1153 goto out_export_error;
1154 retval = nfsd_idmap_init(net);
1155 if (retval)
1156 goto out_idmap_error;
1157 return 0;
1158
1159out_idmap_error:
1160 nfsd_export_shutdown(net);
1161out_export_error:
1162 return retval;
1163}
1164
1165static __net_exit void nfsd_exit_net(struct net *net)
1166{
1167 nfsd_idmap_shutdown(net);
1168 nfsd_export_shutdown(net);
1169}
1170
1130static struct pernet_operations nfsd_net_ops = { 1171static struct pernet_operations nfsd_net_ops = {
1172 .init = nfsd_init_net,
1173 .exit = nfsd_exit_net,
1131 .id = &nfsd_net_id, 1174 .id = &nfsd_net_id,
1132 .size = sizeof(struct nfsd_net), 1175 .size = sizeof(struct nfsd_net),
1133}; 1176};
@@ -1154,16 +1197,10 @@ static int __init init_nfsd(void)
1154 retval = nfsd_reply_cache_init(); 1197 retval = nfsd_reply_cache_init();
1155 if (retval) 1198 if (retval)
1156 goto out_free_stat; 1199 goto out_free_stat;
1157 retval = nfsd_export_init();
1158 if (retval)
1159 goto out_free_cache;
1160 nfsd_lockd_init(); /* lockd->nfsd callbacks */ 1200 nfsd_lockd_init(); /* lockd->nfsd callbacks */
1161 retval = nfsd_idmap_init();
1162 if (retval)
1163 goto out_free_lockd;
1164 retval = create_proc_exports_entry(); 1201 retval = create_proc_exports_entry();
1165 if (retval) 1202 if (retval)
1166 goto out_free_idmap; 1203 goto out_free_lockd;
1167 retval = register_filesystem(&nfsd_fs_type); 1204 retval = register_filesystem(&nfsd_fs_type);
1168 if (retval) 1205 if (retval)
1169 goto out_free_all; 1206 goto out_free_all;
@@ -1171,12 +1208,8 @@ static int __init init_nfsd(void)
1171out_free_all: 1208out_free_all:
1172 remove_proc_entry("fs/nfs/exports", NULL); 1209 remove_proc_entry("fs/nfs/exports", NULL);
1173 remove_proc_entry("fs/nfs", NULL); 1210 remove_proc_entry("fs/nfs", NULL);
1174out_free_idmap:
1175 nfsd_idmap_shutdown();
1176out_free_lockd: 1211out_free_lockd:
1177 nfsd_lockd_shutdown(); 1212 nfsd_lockd_shutdown();
1178 nfsd_export_shutdown();
1179out_free_cache:
1180 nfsd_reply_cache_shutdown(); 1213 nfsd_reply_cache_shutdown();
1181out_free_stat: 1214out_free_stat:
1182 nfsd_stat_shutdown(); 1215 nfsd_stat_shutdown();
@@ -1192,13 +1225,11 @@ out_unregister_notifier:
1192 1225
1193static void __exit exit_nfsd(void) 1226static void __exit exit_nfsd(void)
1194{ 1227{
1195 nfsd_export_shutdown();
1196 nfsd_reply_cache_shutdown(); 1228 nfsd_reply_cache_shutdown();
1197 remove_proc_entry("fs/nfs/exports", NULL); 1229 remove_proc_entry("fs/nfs/exports", NULL);
1198 remove_proc_entry("fs/nfs", NULL); 1230 remove_proc_entry("fs/nfs", NULL);
1199 nfsd_stat_shutdown(); 1231 nfsd_stat_shutdown();
1200 nfsd_lockd_shutdown(); 1232 nfsd_lockd_shutdown();
1201 nfsd_idmap_shutdown();
1202 nfsd4_free_slabs(); 1233 nfsd4_free_slabs();
1203 nfsd_fault_inject_cleanup(); 1234 nfsd_fault_inject_cleanup();
1204 unregister_filesystem(&nfsd_fs_type); 1235 unregister_filesystem(&nfsd_fs_type);
diff --git a/fs/nfsd/nfsfh.c b/fs/nfsd/nfsfh.c
index 68454e75fce9..cc793005a87c 100644
--- a/fs/nfsd/nfsfh.c
+++ b/fs/nfsd/nfsfh.c
@@ -636,7 +636,7 @@ fh_put(struct svc_fh *fhp)
636#endif 636#endif
637 } 637 }
638 if (exp) { 638 if (exp) {
639 cache_put(&exp->h, &svc_export_cache); 639 exp_put(exp);
640 fhp->fh_export = NULL; 640 fhp->fh_export = NULL;
641 } 641 }
642 return; 642 return;
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 28dfad39f0c5..ee709fc8f58b 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -11,6 +11,7 @@
11#include <linux/module.h> 11#include <linux/module.h>
12#include <linux/fs_struct.h> 12#include <linux/fs_struct.h>
13#include <linux/swap.h> 13#include <linux/swap.h>
14#include <linux/nsproxy.h>
14 15
15#include <linux/sunrpc/stats.h> 16#include <linux/sunrpc/stats.h>
16#include <linux/sunrpc/svcsock.h> 17#include <linux/sunrpc/svcsock.h>
@@ -220,7 +221,7 @@ static int nfsd_startup(unsigned short port, int nrservs)
220 ret = nfsd_init_socks(port); 221 ret = nfsd_init_socks(port);
221 if (ret) 222 if (ret)
222 goto out_racache; 223 goto out_racache;
223 ret = lockd_up(); 224 ret = lockd_up(&init_net);
224 if (ret) 225 if (ret)
225 goto out_racache; 226 goto out_racache;
226 ret = nfs4_state_start(); 227 ret = nfs4_state_start();
@@ -229,7 +230,7 @@ static int nfsd_startup(unsigned short port, int nrservs)
229 nfsd_up = true; 230 nfsd_up = true;
230 return 0; 231 return 0;
231out_lockd: 232out_lockd:
232 lockd_down(); 233 lockd_down(&init_net);
233out_racache: 234out_racache:
234 nfsd_racache_shutdown(); 235 nfsd_racache_shutdown();
235 return ret; 236 return ret;
@@ -246,7 +247,7 @@ static void nfsd_shutdown(void)
246 if (!nfsd_up) 247 if (!nfsd_up)
247 return; 248 return;
248 nfs4_state_shutdown(); 249 nfs4_state_shutdown();
249 lockd_down(); 250 lockd_down(&init_net);
250 nfsd_racache_shutdown(); 251 nfsd_racache_shutdown();
251 nfsd_up = false; 252 nfsd_up = false;
252} 253}
@@ -261,7 +262,7 @@ static void nfsd_last_thread(struct svc_serv *serv, struct net *net)
261 262
262 printk(KERN_WARNING "nfsd: last server has exited, flushing export " 263 printk(KERN_WARNING "nfsd: last server has exited, flushing export "
263 "cache\n"); 264 "cache\n");
264 nfsd_export_flush(); 265 nfsd_export_flush(net);
265} 266}
266 267
267void nfsd_reset_versions(void) 268void nfsd_reset_versions(void)
@@ -330,6 +331,8 @@ static int nfsd_get_default_max_blksize(void)
330 331
331int nfsd_create_serv(void) 332int nfsd_create_serv(void)
332{ 333{
334 int error;
335
333 WARN_ON(!mutex_is_locked(&nfsd_mutex)); 336 WARN_ON(!mutex_is_locked(&nfsd_mutex));
334 if (nfsd_serv) { 337 if (nfsd_serv) {
335 svc_get(nfsd_serv); 338 svc_get(nfsd_serv);
@@ -343,6 +346,12 @@ int nfsd_create_serv(void)
343 if (nfsd_serv == NULL) 346 if (nfsd_serv == NULL)
344 return -ENOMEM; 347 return -ENOMEM;
345 348
349 error = svc_bind(nfsd_serv, current->nsproxy->net_ns);
350 if (error < 0) {
351 svc_destroy(nfsd_serv);
352 return error;
353 }
354
346 set_max_drc(); 355 set_max_drc();
347 do_gettimeofday(&nfssvc_boot); /* record boot time */ 356 do_gettimeofday(&nfssvc_boot); /* record boot time */
348 return 0; 357 return 0;
@@ -373,6 +382,7 @@ int nfsd_set_nrthreads(int n, int *nthreads)
373 int i = 0; 382 int i = 0;
374 int tot = 0; 383 int tot = 0;
375 int err = 0; 384 int err = 0;
385 struct net *net = &init_net;
376 386
377 WARN_ON(!mutex_is_locked(&nfsd_mutex)); 387 WARN_ON(!mutex_is_locked(&nfsd_mutex));
378 388
@@ -417,6 +427,9 @@ int nfsd_set_nrthreads(int n, int *nthreads)
417 if (err) 427 if (err)
418 break; 428 break;
419 } 429 }
430
431 if (nfsd_serv->sv_nrthreads == 1)
432 svc_shutdown_net(nfsd_serv, net);
420 svc_destroy(nfsd_serv); 433 svc_destroy(nfsd_serv);
421 434
422 return err; 435 return err;
@@ -432,6 +445,7 @@ nfsd_svc(unsigned short port, int nrservs)
432{ 445{
433 int error; 446 int error;
434 bool nfsd_up_before; 447 bool nfsd_up_before;
448 struct net *net = &init_net;
435 449
436 mutex_lock(&nfsd_mutex); 450 mutex_lock(&nfsd_mutex);
437 dprintk("nfsd: creating service\n"); 451 dprintk("nfsd: creating service\n");
@@ -464,6 +478,8 @@ out_shutdown:
464 if (error < 0 && !nfsd_up_before) 478 if (error < 0 && !nfsd_up_before)
465 nfsd_shutdown(); 479 nfsd_shutdown();
466out_destroy: 480out_destroy:
481 if (nfsd_serv->sv_nrthreads == 1)
482 svc_shutdown_net(nfsd_serv, net);
467 svc_destroy(nfsd_serv); /* Release server */ 483 svc_destroy(nfsd_serv); /* Release server */
468out: 484out:
469 mutex_unlock(&nfsd_mutex); 485 mutex_unlock(&nfsd_mutex);
@@ -547,6 +563,9 @@ nfsd(void *vrqstp)
547 nfsdstats.th_cnt --; 563 nfsdstats.th_cnt --;
548 564
549out: 565out:
566 if (rqstp->rq_server->sv_nrthreads == 1)
567 svc_shutdown_net(rqstp->rq_server, &init_net);
568
550 /* Release the thread */ 569 /* Release the thread */
551 svc_exit_thread(rqstp); 570 svc_exit_thread(rqstp);
552 571
@@ -659,8 +678,12 @@ int nfsd_pool_stats_open(struct inode *inode, struct file *file)
659int nfsd_pool_stats_release(struct inode *inode, struct file *file) 678int nfsd_pool_stats_release(struct inode *inode, struct file *file)
660{ 679{
661 int ret = seq_release(inode, file); 680 int ret = seq_release(inode, file);
681 struct net *net = &init_net;
682
662 mutex_lock(&nfsd_mutex); 683 mutex_lock(&nfsd_mutex);
663 /* this function really, really should have been called svc_put() */ 684 /* this function really, really should have been called svc_put() */
685 if (nfsd_serv->sv_nrthreads == 1)
686 svc_shutdown_net(nfsd_serv, net);
664 svc_destroy(nfsd_serv); 687 svc_destroy(nfsd_serv);
665 mutex_unlock(&nfsd_mutex); 688 mutex_unlock(&nfsd_mutex);
666 return ret; 689 return ret;
diff --git a/fs/nfsd/state.h b/fs/nfsd/state.h
index 89ab137d379a..849091e16ea6 100644
--- a/fs/nfsd/state.h
+++ b/fs/nfsd/state.h
@@ -232,7 +232,6 @@ struct nfs4_client {
232 time_t cl_time; /* time of last lease renewal */ 232 time_t cl_time; /* time of last lease renewal */
233 struct sockaddr_storage cl_addr; /* client ipaddress */ 233 struct sockaddr_storage cl_addr; /* client ipaddress */
234 u32 cl_flavor; /* setclientid pseudoflavor */ 234 u32 cl_flavor; /* setclientid pseudoflavor */
235 char *cl_principal; /* setclientid principal name */
236 struct svc_cred cl_cred; /* setclientid principal */ 235 struct svc_cred cl_cred; /* setclientid principal */
237 clientid_t cl_clientid; /* generated by server */ 236 clientid_t cl_clientid; /* generated by server */
238 nfs4_verifier cl_confirm; /* generated by server */ 237 nfs4_verifier cl_confirm; /* generated by server */
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 568666156ea4..c8bd9c3be7f7 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -2039,7 +2039,7 @@ nfsd_readdir(struct svc_rqst *rqstp, struct svc_fh *fhp, loff_t *offsetp,
2039 if (err) 2039 if (err)
2040 goto out; 2040 goto out;
2041 2041
2042 offset = vfs_llseek(file, offset, 0); 2042 offset = vfs_llseek(file, offset, SEEK_SET);
2043 if (offset < 0) { 2043 if (offset < 0) {
2044 err = nfserrno((int)offset); 2044 err = nfserrno((int)offset);
2045 goto out_close; 2045 goto out_close;
diff --git a/fs/nfsd/xdr4.h b/fs/nfsd/xdr4.h
index 1b3501598ab5..acd127d4ee82 100644
--- a/fs/nfsd/xdr4.h
+++ b/fs/nfsd/xdr4.h
@@ -60,7 +60,7 @@ struct nfsd4_compound_state {
60 __be32 *datap; 60 __be32 *datap;
61 size_t iovlen; 61 size_t iovlen;
62 u32 minorversion; 62 u32 minorversion;
63 u32 status; 63 __be32 status;
64 stateid_t current_stateid; 64 stateid_t current_stateid;
65 stateid_t save_stateid; 65 stateid_t save_stateid;
66 /* to indicate current and saved state id presents */ 66 /* to indicate current and saved state id presents */
@@ -364,7 +364,7 @@ struct nfsd4_test_stateid_id {
364}; 364};
365 365
366struct nfsd4_test_stateid { 366struct nfsd4_test_stateid {
367 __be32 ts_num_ids; 367 u32 ts_num_ids;
368 struct list_head ts_stateid_list; 368 struct list_head ts_stateid_list;
369}; 369};
370 370
@@ -549,7 +549,7 @@ int nfs4svc_decode_compoundargs(struct svc_rqst *, __be32 *,
549 struct nfsd4_compoundargs *); 549 struct nfsd4_compoundargs *);
550int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *, 550int nfs4svc_encode_compoundres(struct svc_rqst *, __be32 *,
551 struct nfsd4_compoundres *); 551 struct nfsd4_compoundres *);
552int nfsd4_check_resp_size(struct nfsd4_compoundres *, u32); 552__be32 nfsd4_check_resp_size(struct nfsd4_compoundres *, u32);
553void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *); 553void nfsd4_encode_operation(struct nfsd4_compoundres *, struct nfsd4_op *);
554void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op); 554void nfsd4_encode_replay(struct nfsd4_compoundres *resp, struct nfsd4_op *op);
555__be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp, 555__be32 nfsd4_encode_fattr(struct svc_fh *fhp, struct svc_export *exp,
diff --git a/fs/nilfs2/file.c b/fs/nilfs2/file.c
index 26601529dc17..62cebc8e1a1f 100644
--- a/fs/nilfs2/file.c
+++ b/fs/nilfs2/file.c
@@ -37,6 +37,7 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
37 * This function should be implemented when the writeback function 37 * This function should be implemented when the writeback function
38 * will be implemented. 38 * will be implemented.
39 */ 39 */
40 struct the_nilfs *nilfs;
40 struct inode *inode = file->f_mapping->host; 41 struct inode *inode = file->f_mapping->host;
41 int err; 42 int err;
42 43
@@ -45,18 +46,21 @@ int nilfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
45 return err; 46 return err;
46 mutex_lock(&inode->i_mutex); 47 mutex_lock(&inode->i_mutex);
47 48
48 if (!nilfs_inode_dirty(inode)) { 49 if (nilfs_inode_dirty(inode)) {
49 mutex_unlock(&inode->i_mutex); 50 if (datasync)
50 return 0; 51 err = nilfs_construct_dsync_segment(inode->i_sb, inode,
52 0, LLONG_MAX);
53 else
54 err = nilfs_construct_segment(inode->i_sb);
51 } 55 }
52
53 if (datasync)
54 err = nilfs_construct_dsync_segment(inode->i_sb, inode, 0,
55 LLONG_MAX);
56 else
57 err = nilfs_construct_segment(inode->i_sb);
58
59 mutex_unlock(&inode->i_mutex); 56 mutex_unlock(&inode->i_mutex);
57
58 nilfs = inode->i_sb->s_fs_info;
59 if (!err && nilfs_test_opt(nilfs, BARRIER)) {
60 err = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
61 if (err != -EIO)
62 err = 0;
63 }
60 return err; 64 return err;
61} 65}
62 66
diff --git a/fs/nilfs2/gcinode.c b/fs/nilfs2/gcinode.c
index 08a07a218d26..57ceaf33d177 100644
--- a/fs/nilfs2/gcinode.c
+++ b/fs/nilfs2/gcinode.c
@@ -191,6 +191,8 @@ void nilfs_remove_all_gcinodes(struct the_nilfs *nilfs)
191 while (!list_empty(head)) { 191 while (!list_empty(head)) {
192 ii = list_first_entry(head, struct nilfs_inode_info, i_dirty); 192 ii = list_first_entry(head, struct nilfs_inode_info, i_dirty);
193 list_del_init(&ii->i_dirty); 193 list_del_init(&ii->i_dirty);
194 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
195 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
194 iput(&ii->vfs_inode); 196 iput(&ii->vfs_inode);
195 } 197 }
196} 198}
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 2a70fce70c65..06658caa18bd 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -692,8 +692,14 @@ static int nilfs_ioctl_sync(struct inode *inode, struct file *filp,
692 if (ret < 0) 692 if (ret < 0)
693 return ret; 693 return ret;
694 694
695 nilfs = inode->i_sb->s_fs_info;
696 if (nilfs_test_opt(nilfs, BARRIER)) {
697 ret = blkdev_issue_flush(inode->i_sb->s_bdev, GFP_KERNEL, NULL);
698 if (ret == -EIO)
699 return ret;
700 }
701
695 if (argp != NULL) { 702 if (argp != NULL) {
696 nilfs = inode->i_sb->s_fs_info;
697 down_read(&nilfs->ns_segctor_sem); 703 down_read(&nilfs->ns_segctor_sem);
698 cno = nilfs->ns_cno - 1; 704 cno = nilfs->ns_cno - 1;
699 up_read(&nilfs->ns_segctor_sem); 705 up_read(&nilfs->ns_segctor_sem);
diff --git a/fs/nilfs2/namei.c b/fs/nilfs2/namei.c
index 0bb2c2010b95..b72847988b78 100644
--- a/fs/nilfs2/namei.c
+++ b/fs/nilfs2/namei.c
@@ -508,31 +508,29 @@ static struct dentry *nilfs_fh_to_parent(struct super_block *sb, struct fid *fh,
508 return nilfs_get_dentry(sb, fid->cno, fid->parent_ino, fid->parent_gen); 508 return nilfs_get_dentry(sb, fid->cno, fid->parent_ino, fid->parent_gen);
509} 509}
510 510
511static int nilfs_encode_fh(struct dentry *dentry, __u32 *fh, int *lenp, 511static int nilfs_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
512 int connectable) 512 struct inode *parent)
513{ 513{
514 struct nilfs_fid *fid = (struct nilfs_fid *)fh; 514 struct nilfs_fid *fid = (struct nilfs_fid *)fh;
515 struct inode *inode = dentry->d_inode;
516 struct nilfs_root *root = NILFS_I(inode)->i_root; 515 struct nilfs_root *root = NILFS_I(inode)->i_root;
517 int type; 516 int type;
518 517
519 if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE || 518 if (parent && *lenp < NILFS_FID_SIZE_CONNECTABLE) {
520 (connectable && *lenp < NILFS_FID_SIZE_CONNECTABLE)) 519 *lenp = NILFS_FID_SIZE_CONNECTABLE;
520 return 255;
521 }
522 if (*lenp < NILFS_FID_SIZE_NON_CONNECTABLE) {
523 *lenp = NILFS_FID_SIZE_NON_CONNECTABLE;
521 return 255; 524 return 255;
525 }
522 526
523 fid->cno = root->cno; 527 fid->cno = root->cno;
524 fid->ino = inode->i_ino; 528 fid->ino = inode->i_ino;
525 fid->gen = inode->i_generation; 529 fid->gen = inode->i_generation;
526 530
527 if (connectable && !S_ISDIR(inode->i_mode)) { 531 if (parent) {
528 struct inode *parent;
529
530 spin_lock(&dentry->d_lock);
531 parent = dentry->d_parent->d_inode;
532 fid->parent_ino = parent->i_ino; 532 fid->parent_ino = parent->i_ino;
533 fid->parent_gen = parent->i_generation; 533 fid->parent_gen = parent->i_generation;
534 spin_unlock(&dentry->d_lock);
535
536 type = FILEID_NILFS_WITH_PARENT; 534 type = FILEID_NILFS_WITH_PARENT;
537 *lenp = NILFS_FID_SIZE_CONNECTABLE; 535 *lenp = NILFS_FID_SIZE_CONNECTABLE;
538 } else { 536 } else {
diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c
index 0e72ad6f22aa..88e11fb346b6 100644
--- a/fs/nilfs2/segment.c
+++ b/fs/nilfs2/segment.c
@@ -2309,6 +2309,8 @@ nilfs_remove_written_gcinodes(struct the_nilfs *nilfs, struct list_head *head)
2309 if (!test_bit(NILFS_I_UPDATED, &ii->i_state)) 2309 if (!test_bit(NILFS_I_UPDATED, &ii->i_state))
2310 continue; 2310 continue;
2311 list_del_init(&ii->i_dirty); 2311 list_del_init(&ii->i_dirty);
2312 truncate_inode_pages(&ii->vfs_inode.i_data, 0);
2313 nilfs_btnode_cache_clear(&ii->i_btnode_cache);
2312 iput(&ii->vfs_inode); 2314 iput(&ii->vfs_inode);
2313 } 2315 }
2314} 2316}
diff --git a/fs/nls/Kconfig b/fs/nls/Kconfig
index a39edc41becc..e2ce79ef48c4 100644
--- a/fs/nls/Kconfig
+++ b/fs/nls/Kconfig
@@ -30,7 +30,7 @@ config NLS_DEFAULT
30 cp949, cp950, cp1251, cp1255, euc-jp, euc-kr, gb2312, iso8859-1, 30 cp949, cp950, cp1251, cp1255, euc-jp, euc-kr, gb2312, iso8859-1,
31 iso8859-2, iso8859-3, iso8859-4, iso8859-5, iso8859-6, iso8859-7, 31 iso8859-2, iso8859-3, iso8859-4, iso8859-5, iso8859-6, iso8859-7,
32 iso8859-8, iso8859-9, iso8859-13, iso8859-14, iso8859-15, 32 iso8859-8, iso8859-9, iso8859-13, iso8859-14, iso8859-15,
33 koi8-r, koi8-ru, koi8-u, sjis, tis-620, utf8. 33 koi8-r, koi8-ru, koi8-u, sjis, tis-620, macroman, utf8.
34 If you specify a wrong value, it will use the built-in NLS; 34 If you specify a wrong value, it will use the built-in NLS;
35 compatible with iso8859-1. 35 compatible with iso8859-1.
36 36
@@ -452,6 +452,161 @@ config NLS_KOI8_U
452 input/output character sets. Say Y here for the preferred Ukrainian 452 input/output character sets. Say Y here for the preferred Ukrainian
453 (koi8-u) and Belarusian (koi8-ru) character sets. 453 (koi8-u) and Belarusian (koi8-ru) character sets.
454 454
455config NLS_MAC_ROMAN
456 tristate "Codepage macroman"
457 ---help---
458 The Apple HFS file system family can deal with filenames in
459 native language character sets. These character sets are stored in
460 so-called MAC codepages. You need to include the appropriate
461 codepage if you want to be able to read/write these filenames on
462 Mac partitions correctly. This does apply to the filenames
463 only, not to the file contents. You can include several codepages;
464 say Y here if you want to include the Mac codepage that is used for
465 much of Europe -- United Kingdom, Germany, Spain, Italy, and [add
466 more countries here].
467
468 If unsure, say Y.
469
470config NLS_MAC_CELTIC
471 tristate "Codepage macceltic"
472 ---help---
473 The Apple HFS file system family can deal with filenames in
474 native language character sets. These character sets are stored in
475 so-called MAC codepages. You need to include the appropriate
476 codepage if you want to be able to read/write these filenames on
477 Mac partitions correctly. This does apply to the filenames
478 only, not to the file contents. You can include several codepages;
479 say Y here if you want to include the Mac codepage that is used for
480 Celtic.
481
482 If unsure, say Y.
483
484config NLS_MAC_CENTEURO
485 tristate "Codepage maccenteuro"
486 ---help---
487 The Apple HFS file system family can deal with filenames in
488 native language character sets. These character sets are stored in
489 so-called MAC codepages. You need to include the appropriate
490 codepage if you want to be able to read/write these filenames on
491 Mac partitions correctly. This does apply to the filenames
492 only, not to the file contents. You can include several codepages;
493 say Y here if you want to include the Mac codepage that is used for
494 Central Europe.
495
496 If unsure, say Y.
497
498config NLS_MAC_CROATIAN
499 tristate "Codepage maccroatian"
500 ---help---
501 The Apple HFS file system family can deal with filenames in
502 native language character sets. These character sets are stored in
503 so-called MAC codepages. You need to include the appropriate
504 codepage if you want to be able to read/write these filenames on
505 Mac partitions correctly. This does apply to the filenames
506 only, not to the file contents. You can include several codepages;
507 say Y here if you want to include the Mac codepage that is used for
508 Croatian.
509
510 If unsure, say Y.
511
512config NLS_MAC_CYRILLIC
513 tristate "Codepage maccyrillic"
514 ---help---
515 The Apple HFS file system family can deal with filenames in
516 native language character sets. These character sets are stored in
517 so-called MAC codepages. You need to include the appropriate
518 codepage if you want to be able to read/write these filenames on
519 Mac partitions correctly. This does apply to the filenames
520 only, not to the file contents. You can include several codepages;
521 say Y here if you want to include the Mac codepage that is used for
522 Cyrillic.
523
524 If unsure, say Y.
525
526config NLS_MAC_GAELIC
527 tristate "Codepage macgaelic"
528 ---help---
529 The Apple HFS file system family can deal with filenames in
530 native language character sets. These character sets are stored in
531 so-called MAC codepages. You need to include the appropriate
532 codepage if you want to be able to read/write these filenames on
533 Mac partitions correctly. This does apply to the filenames
534 only, not to the file contents. You can include several codepages;
535 say Y here if you want to include the Mac codepage that is used for
536 Gaelic.
537
538 If unsure, say Y.
539
540config NLS_MAC_GREEK
541 tristate "Codepage macgreek"
542 ---help---
543 The Apple HFS file system family can deal with filenames in
544 native language character sets. These character sets are stored in
545 so-called MAC codepages. You need to include the appropriate
546 codepage if you want to be able to read/write these filenames on
547 Mac partitions correctly. This does apply to the filenames
548 only, not to the file contents. You can include several codepages;
549 say Y here if you want to include the Mac codepage that is used for
550 Greek.
551
552 If unsure, say Y.
553
554config NLS_MAC_ICELAND
555 tristate "Codepage maciceland"
556 ---help---
557 The Apple HFS file system family can deal with filenames in
558 native language character sets. These character sets are stored in
559 so-called MAC codepages. You need to include the appropriate
560 codepage if you want to be able to read/write these filenames on
561 Mac partitions correctly. This does apply to the filenames
562 only, not to the file contents. You can include several codepages;
563 say Y here if you want to include the Mac codepage that is used for
564 Iceland.
565
566 If unsure, say Y.
567
568config NLS_MAC_INUIT
569 tristate "Codepage macinuit"
570 ---help---
571 The Apple HFS file system family can deal with filenames in
572 native language character sets. These character sets are stored in
573 so-called MAC codepages. You need to include the appropriate
574 codepage if you want to be able to read/write these filenames on
575 Mac partitions correctly. This does apply to the filenames
576 only, not to the file contents. You can include several codepages;
577 say Y here if you want to include the Mac codepage that is used for
578 Inuit.
579
580 If unsure, say Y.
581
582config NLS_MAC_ROMANIAN
583 tristate "Codepage macromanian"
584 ---help---
585 The Apple HFS file system family can deal with filenames in
586 native language character sets. These character sets are stored in
587 so-called MAC codepages. You need to include the appropriate
588 codepage if you want to be able to read/write these filenames on
589 Mac partitions correctly. This does apply to the filenames
590 only, not to the file contents. You can include several codepages;
591 say Y here if you want to include the Mac codepage that is used for
592 Romanian.
593
594 If unsure, say Y.
595
596config NLS_MAC_TURKISH
597 tristate "Codepage macturkish"
598 ---help---
599 The Apple HFS file system family can deal with filenames in
600 native language character sets. These character sets are stored in
601 so-called MAC codepages. You need to include the appropriate
602 codepage if you want to be able to read/write these filenames on
603 Mac partitions correctly. This does apply to the filenames
604 only, not to the file contents. You can include several codepages;
605 say Y here if you want to include the Mac codepage that is used for
606 Turkish.
607
608 If unsure, say Y.
609
455config NLS_UTF8 610config NLS_UTF8
456 tristate "NLS UTF-8" 611 tristate "NLS UTF-8"
457 help 612 help
diff --git a/fs/nls/Makefile b/fs/nls/Makefile
index f499dd7c3905..8ae37c1b5249 100644
--- a/fs/nls/Makefile
+++ b/fs/nls/Makefile
@@ -42,3 +42,14 @@ obj-$(CONFIG_NLS_ISO8859_15) += nls_iso8859-15.o
42obj-$(CONFIG_NLS_KOI8_R) += nls_koi8-r.o 42obj-$(CONFIG_NLS_KOI8_R) += nls_koi8-r.o
43obj-$(CONFIG_NLS_KOI8_U) += nls_koi8-u.o nls_koi8-ru.o 43obj-$(CONFIG_NLS_KOI8_U) += nls_koi8-u.o nls_koi8-ru.o
44obj-$(CONFIG_NLS_UTF8) += nls_utf8.o 44obj-$(CONFIG_NLS_UTF8) += nls_utf8.o
45obj-$(CONFIG_NLS_MAC_CELTIC) += mac-celtic.o
46obj-$(CONFIG_NLS_MAC_CENTEURO) += mac-centeuro.o
47obj-$(CONFIG_NLS_MAC_CROATIAN) += mac-croatian.o
48obj-$(CONFIG_NLS_MAC_CYRILLIC) += mac-cyrillic.o
49obj-$(CONFIG_NLS_MAC_GAELIC) += mac-gaelic.o
50obj-$(CONFIG_NLS_MAC_GREEK) += mac-greek.o
51obj-$(CONFIG_NLS_MAC_ICELAND) += mac-iceland.o
52obj-$(CONFIG_NLS_MAC_INUIT) += mac-inuit.o
53obj-$(CONFIG_NLS_MAC_ROMANIAN) += mac-romanian.o
54obj-$(CONFIG_NLS_MAC_ROMAN) += mac-roman.o
55obj-$(CONFIG_NLS_MAC_TURKISH) += mac-turkish.o
diff --git a/fs/nls/mac-celtic.c b/fs/nls/mac-celtic.c
new file mode 100644
index 000000000000..634a8b717b02
--- /dev/null
+++ b/fs/nls/mac-celtic.c
@@ -0,0 +1,602 @@
1/*
2 * linux/fs/nls/mac-celtic.c
3 *
4 * Charset macceltic translation tables.
5 * Generated automatically from the Unicode and charset
6 * tables from the Unicode Organization (www.unicode.org).
7 * The Unicode to charset table has only exact mappings.
8 */
9
10/*
11 * COPYRIGHT AND PERMISSION NOTICE
12 *
13 * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
14 * the Terms of Use in http://www.unicode.org/copyright.html.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of the Unicode data files and any associated documentation (the "Data
18 * Files") or Unicode software and any associated documentation (the
19 * "Software") to deal in the Data Files or Software without restriction,
20 * including without limitation the rights to use, copy, modify, merge,
21 * publish, distribute, and/or sell copies of the Data Files or Software, and
22 * to permit persons to whom the Data Files or Software are furnished to do
23 * so, provided that (a) the above copyright notice(s) and this permission
24 * notice appear with all copies of the Data Files or Software, (b) both the
25 * above copyright notice(s) and this permission notice appear in associated
26 * documentation, and (c) there is clear notice in each modified Data File or
27 * in the Software as well as in the documentation associated with the Data
28 * File(s) or Software that the data or software has been modified.
29 *
30 * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
31 * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
33 * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
34 * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
35 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
36 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
37 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
38 * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
39 *
40 * Except as contained in this notice, the name of a copyright holder shall
41 * not be used in advertising or otherwise to promote the sale, use or other
42 * dealings in these Data Files or Software without prior written
43 * authorization of the copyright holder.
44 */
45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/nls.h>
50#include <linux/errno.h>
51
52static const wchar_t charset2uni[256] = {
53 /* 0x00 */
54 0x0000, 0x0001, 0x0002, 0x0003,
55 0x0004, 0x0005, 0x0006, 0x0007,
56 0x0008, 0x0009, 0x000a, 0x000b,
57 0x000c, 0x000d, 0x000e, 0x000f,
58 /* 0x10 */
59 0x0010, 0x0011, 0x0012, 0x0013,
60 0x0014, 0x0015, 0x0016, 0x0017,
61 0x0018, 0x0019, 0x001a, 0x001b,
62 0x001c, 0x001d, 0x001e, 0x001f,
63 /* 0x20 */
64 0x0020, 0x0021, 0x0022, 0x0023,
65 0x0024, 0x0025, 0x0026, 0x0027,
66 0x0028, 0x0029, 0x002a, 0x002b,
67 0x002c, 0x002d, 0x002e, 0x002f,
68 /* 0x30 */
69 0x0030, 0x0031, 0x0032, 0x0033,
70 0x0034, 0x0035, 0x0036, 0x0037,
71 0x0038, 0x0039, 0x003a, 0x003b,
72 0x003c, 0x003d, 0x003e, 0x003f,
73 /* 0x40 */
74 0x0040, 0x0041, 0x0042, 0x0043,
75 0x0044, 0x0045, 0x0046, 0x0047,
76 0x0048, 0x0049, 0x004a, 0x004b,
77 0x004c, 0x004d, 0x004e, 0x004f,
78 /* 0x50 */
79 0x0050, 0x0051, 0x0052, 0x0053,
80 0x0054, 0x0055, 0x0056, 0x0057,
81 0x0058, 0x0059, 0x005a, 0x005b,
82 0x005c, 0x005d, 0x005e, 0x005f,
83 /* 0x60 */
84 0x0060, 0x0061, 0x0062, 0x0063,
85 0x0064, 0x0065, 0x0066, 0x0067,
86 0x0068, 0x0069, 0x006a, 0x006b,
87 0x006c, 0x006d, 0x006e, 0x006f,
88 /* 0x70 */
89 0x0070, 0x0071, 0x0072, 0x0073,
90 0x0074, 0x0075, 0x0076, 0x0077,
91 0x0078, 0x0079, 0x007a, 0x007b,
92 0x007c, 0x007d, 0x007e, 0x007f,
93 /* 0x80 */
94 0x00c4, 0x00c5, 0x00c7, 0x00c9,
95 0x00d1, 0x00d6, 0x00dc, 0x00e1,
96 0x00e0, 0x00e2, 0x00e4, 0x00e3,
97 0x00e5, 0x00e7, 0x00e9, 0x00e8,
98 /* 0x90 */
99 0x00ea, 0x00eb, 0x00ed, 0x00ec,
100 0x00ee, 0x00ef, 0x00f1, 0x00f3,
101 0x00f2, 0x00f4, 0x00f6, 0x00f5,
102 0x00fa, 0x00f9, 0x00fb, 0x00fc,
103 /* 0xa0 */
104 0x2020, 0x00b0, 0x00a2, 0x00a3,
105 0x00a7, 0x2022, 0x00b6, 0x00df,
106 0x00ae, 0x00a9, 0x2122, 0x00b4,
107 0x00a8, 0x2260, 0x00c6, 0x00d8,
108 /* 0xb0 */
109 0x221e, 0x00b1, 0x2264, 0x2265,
110 0x00a5, 0x00b5, 0x2202, 0x2211,
111 0x220f, 0x03c0, 0x222b, 0x00aa,
112 0x00ba, 0x03a9, 0x00e6, 0x00f8,
113 /* 0xc0 */
114 0x00bf, 0x00a1, 0x00ac, 0x221a,
115 0x0192, 0x2248, 0x2206, 0x00ab,
116 0x00bb, 0x2026, 0x00a0, 0x00c0,
117 0x00c3, 0x00d5, 0x0152, 0x0153,
118 /* 0xd0 */
119 0x2013, 0x2014, 0x201c, 0x201d,
120 0x2018, 0x2019, 0x00f7, 0x25ca,
121 0x00ff, 0x0178, 0x2044, 0x20ac,
122 0x2039, 0x203a, 0x0176, 0x0177,
123 /* 0xe0 */
124 0x2021, 0x00b7, 0x1ef2, 0x1ef3,
125 0x2030, 0x00c2, 0x00ca, 0x00c1,
126 0x00cb, 0x00c8, 0x00cd, 0x00ce,
127 0x00cf, 0x00cc, 0x00d3, 0x00d4,
128 /* 0xf0 */
129 0x2663, 0x00d2, 0x00da, 0x00db,
130 0x00d9, 0x0131, 0x00dd, 0x00fd,
131 0x0174, 0x0175, 0x1e84, 0x1e85,
132 0x1e80, 0x1e81, 0x1e82, 0x1e83,
133};
134
135static const unsigned char page00[256] = {
136 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
137 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
138 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
139 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
140 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
141 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
142 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
143 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
144 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
145 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
146 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
147 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
148 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
149 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
150 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
151 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
154 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
156 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
157 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
158 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
159 0x00, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
160 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
161 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
162 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
163 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xf6, 0x00, 0xa7, /* 0xd8-0xdf */
164 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
165 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
166 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
167 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xf7, 0x00, 0xd8, /* 0xf8-0xff */
168};
169
170static const unsigned char page01[256] = {
171 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
172 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
173 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
174 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
175 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
176 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
177 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
178 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
181 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
183 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
185 0x00, 0x00, 0x00, 0x00, 0xf8, 0xf9, 0xde, 0xdf, /* 0x70-0x77 */
186 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
188 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
189 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
190 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
191 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
196 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
197 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
199 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
203};
204
205static const unsigned char page03[256] = {
206 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
207 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
209 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
210 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
211 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
212 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
213 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
214 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
215 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
219 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
220 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
222 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
224 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
227 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
229 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
230 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
231 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
233 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
236 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
238};
239
240static const unsigned char page1e[256] = {
241 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
244 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
245 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
254 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
257 0xfc, 0xfd, 0xfe, 0xff, 0xfa, 0xfb, 0x00, 0x00, /* 0x80-0x87 */
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
260 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
262 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
271 0x00, 0x00, 0xe2, 0xe3, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
273};
274
275static const unsigned char page20[256] = {
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
278 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
279 0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
280 0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
282 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
283 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
284 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
297 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
303 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
308};
309
310static const unsigned char page21[256] = {
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
315 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
318 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
319 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
322 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
323 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
337 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
343};
344
345static const unsigned char page22[256] = {
346 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
347 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
348 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
349 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
350 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
351 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
352 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
353 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
354 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
355 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
356 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
357 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
358 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
367 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
370 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
373 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
374 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
375 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
378};
379
380static const unsigned char page25[256] = {
381 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
382 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
383 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
384 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
386 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
387 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
388 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
389 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
390 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
391 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
392 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
393 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
394 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
395 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
396 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
397 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
398 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
399 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
400 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
401 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
402 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
403 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
404 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
405 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
406 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
407 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
408 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
409 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
410 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
411 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
412 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
413};
414
415static const unsigned char page26[256] = {
416 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
417 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
418 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
419 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
420 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
421 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
422 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
423 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
424 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
425 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
426 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
427 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
428 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
429 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
430 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
431 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
432 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
433 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
434 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
435 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
436 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
437 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
438 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
439 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
440 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
441 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
442 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
443 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
444 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
445 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
446 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
447 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
448};
449
450static const unsigned char *const page_uni2charset[256] = {
451 page00, page01, NULL, page03, NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL, NULL, NULL, page1e, NULL,
455 page20, page21, page22, NULL, NULL, page25, page26, NULL,
456 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
461 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
462 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
463 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
464 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
465 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
479 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
480 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
481 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
482 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
483};
484
485static const unsigned char charset2lower[256] = {
486 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
487 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
488 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
489 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
490 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
491 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
492 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
493 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
494 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
495 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
496 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
497 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
498 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
499 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
500 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
501 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
502 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
503 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
504 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
505 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
506 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
507 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
508 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
509 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
510 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
511 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
512 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
513 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
514 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
515 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
516 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
517 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
518};
519
520static const unsigned char charset2upper[256] = {
521 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
522 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
523 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
524 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
525 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
526 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
527 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
528 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
529 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
530 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
531 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
532 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
533 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
534 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
535 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
536 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
537 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
538 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
539 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
540 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
541 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
542 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
543 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
544 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
545 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
546 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
547 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
548 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
549 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
550 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
551 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
552 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
553};
554
555static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
556{
557 const unsigned char *uni2charset;
558 unsigned char cl = uni & 0x00ff;
559 unsigned char ch = (uni & 0xff00) >> 8;
560
561 if (boundlen <= 0)
562 return -ENAMETOOLONG;
563
564 uni2charset = page_uni2charset[ch];
565 if (uni2charset && uni2charset[cl])
566 out[0] = uni2charset[cl];
567 else
568 return -EINVAL;
569 return 1;
570}
571
572static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
573{
574 *uni = charset2uni[*rawstring];
575 if (*uni == 0x0000)
576 return -EINVAL;
577 return 1;
578}
579
580static struct nls_table table = {
581 .charset = "macceltic",
582 .uni2char = uni2char,
583 .char2uni = char2uni,
584 .charset2lower = charset2lower,
585 .charset2upper = charset2upper,
586 .owner = THIS_MODULE,
587};
588
589static int __init init_nls_macceltic(void)
590{
591 return register_nls(&table);
592}
593
594static void __exit exit_nls_macceltic(void)
595{
596 unregister_nls(&table);
597}
598
599module_init(init_nls_macceltic)
600module_exit(exit_nls_macceltic)
601
602MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-centeuro.c b/fs/nls/mac-centeuro.c
new file mode 100644
index 000000000000..979e6265ac5e
--- /dev/null
+++ b/fs/nls/mac-centeuro.c
@@ -0,0 +1,532 @@
1/*
2 * linux/fs/nls/mac-centeuro.c
3 *
4 * Charset maccenteuro translation tables.
5 * Generated automatically from the Unicode and charset
6 * tables from the Unicode Organization (www.unicode.org).
7 * The Unicode to charset table has only exact mappings.
8 */
9
10/*
11 * COPYRIGHT AND PERMISSION NOTICE
12 *
13 * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
14 * the Terms of Use in http://www.unicode.org/copyright.html.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of the Unicode data files and any associated documentation (the "Data
18 * Files") or Unicode software and any associated documentation (the
19 * "Software") to deal in the Data Files or Software without restriction,
20 * including without limitation the rights to use, copy, modify, merge,
21 * publish, distribute, and/or sell copies of the Data Files or Software, and
22 * to permit persons to whom the Data Files or Software are furnished to do
23 * so, provided that (a) the above copyright notice(s) and this permission
24 * notice appear with all copies of the Data Files or Software, (b) both the
25 * above copyright notice(s) and this permission notice appear in associated
26 * documentation, and (c) there is clear notice in each modified Data File or
27 * in the Software as well as in the documentation associated with the Data
28 * File(s) or Software that the data or software has been modified.
29 *
30 * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
31 * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
33 * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
34 * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
35 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
36 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
37 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
38 * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
39 *
40 * Except as contained in this notice, the name of a copyright holder shall
41 * not be used in advertising or otherwise to promote the sale, use or other
42 * dealings in these Data Files or Software without prior written
43 * authorization of the copyright holder.
44 */
45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/nls.h>
50#include <linux/errno.h>
51
52static const wchar_t charset2uni[256] = {
53 /* 0x00 */
54 0x0000, 0x0001, 0x0002, 0x0003,
55 0x0004, 0x0005, 0x0006, 0x0007,
56 0x0008, 0x0009, 0x000a, 0x000b,
57 0x000c, 0x000d, 0x000e, 0x000f,
58 /* 0x10 */
59 0x0010, 0x0011, 0x0012, 0x0013,
60 0x0014, 0x0015, 0x0016, 0x0017,
61 0x0018, 0x0019, 0x001a, 0x001b,
62 0x001c, 0x001d, 0x001e, 0x001f,
63 /* 0x20 */
64 0x0020, 0x0021, 0x0022, 0x0023,
65 0x0024, 0x0025, 0x0026, 0x0027,
66 0x0028, 0x0029, 0x002a, 0x002b,
67 0x002c, 0x002d, 0x002e, 0x002f,
68 /* 0x30 */
69 0x0030, 0x0031, 0x0032, 0x0033,
70 0x0034, 0x0035, 0x0036, 0x0037,
71 0x0038, 0x0039, 0x003a, 0x003b,
72 0x003c, 0x003d, 0x003e, 0x003f,
73 /* 0x40 */
74 0x0040, 0x0041, 0x0042, 0x0043,
75 0x0044, 0x0045, 0x0046, 0x0047,
76 0x0048, 0x0049, 0x004a, 0x004b,
77 0x004c, 0x004d, 0x004e, 0x004f,
78 /* 0x50 */
79 0x0050, 0x0051, 0x0052, 0x0053,
80 0x0054, 0x0055, 0x0056, 0x0057,
81 0x0058, 0x0059, 0x005a, 0x005b,
82 0x005c, 0x005d, 0x005e, 0x005f,
83 /* 0x60 */
84 0x0060, 0x0061, 0x0062, 0x0063,
85 0x0064, 0x0065, 0x0066, 0x0067,
86 0x0068, 0x0069, 0x006a, 0x006b,
87 0x006c, 0x006d, 0x006e, 0x006f,
88 /* 0x70 */
89 0x0070, 0x0071, 0x0072, 0x0073,
90 0x0074, 0x0075, 0x0076, 0x0077,
91 0x0078, 0x0079, 0x007a, 0x007b,
92 0x007c, 0x007d, 0x007e, 0x007f,
93 /* 0x80 */
94 0x00c4, 0x0100, 0x0101, 0x00c9,
95 0x0104, 0x00d6, 0x00dc, 0x00e1,
96 0x0105, 0x010c, 0x00e4, 0x010d,
97 0x0106, 0x0107, 0x00e9, 0x0179,
98 /* 0x90 */
99 0x017a, 0x010e, 0x00ed, 0x010f,
100 0x0112, 0x0113, 0x0116, 0x00f3,
101 0x0117, 0x00f4, 0x00f6, 0x00f5,
102 0x00fa, 0x011a, 0x011b, 0x00fc,
103 /* 0xa0 */
104 0x2020, 0x00b0, 0x0118, 0x00a3,
105 0x00a7, 0x2022, 0x00b6, 0x00df,
106 0x00ae, 0x00a9, 0x2122, 0x0119,
107 0x00a8, 0x2260, 0x0123, 0x012e,
108 /* 0xb0 */
109 0x012f, 0x012a, 0x2264, 0x2265,
110 0x012b, 0x0136, 0x2202, 0x2211,
111 0x0142, 0x013b, 0x013c, 0x013d,
112 0x013e, 0x0139, 0x013a, 0x0145,
113 /* 0xc0 */
114 0x0146, 0x0143, 0x00ac, 0x221a,
115 0x0144, 0x0147, 0x2206, 0x00ab,
116 0x00bb, 0x2026, 0x00a0, 0x0148,
117 0x0150, 0x00d5, 0x0151, 0x014c,
118 /* 0xd0 */
119 0x2013, 0x2014, 0x201c, 0x201d,
120 0x2018, 0x2019, 0x00f7, 0x25ca,
121 0x014d, 0x0154, 0x0155, 0x0158,
122 0x2039, 0x203a, 0x0159, 0x0156,
123 /* 0xe0 */
124 0x0157, 0x0160, 0x201a, 0x201e,
125 0x0161, 0x015a, 0x015b, 0x00c1,
126 0x0164, 0x0165, 0x00cd, 0x017d,
127 0x017e, 0x016a, 0x00d3, 0x00d4,
128 /* 0xf0 */
129 0x016b, 0x016e, 0x00da, 0x016f,
130 0x0170, 0x0171, 0x0172, 0x0173,
131 0x00dd, 0x00fd, 0x0137, 0x017b,
132 0x0141, 0x017c, 0x0122, 0x02c7,
133};
134
135static const unsigned char page00[256] = {
136 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
137 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
138 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
139 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
140 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
141 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
142 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
143 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
144 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
145 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
146 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
147 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
148 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
149 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
150 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
151 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
154 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
156 0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
157 0xac, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
158 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */
159 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
160 0x00, 0xe7, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
161 0x00, 0x83, 0x00, 0x00, 0x00, 0xea, 0x00, 0x00, /* 0xc8-0xcf */
162 0x00, 0x00, 0x00, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
163 0x00, 0x00, 0xf2, 0x00, 0x86, 0xf8, 0x00, 0xa7, /* 0xd8-0xdf */
164 0x00, 0x87, 0x00, 0x00, 0x8a, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
165 0x00, 0x8e, 0x00, 0x00, 0x00, 0x92, 0x00, 0x00, /* 0xe8-0xef */
166 0x00, 0x00, 0x00, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
167 0x00, 0x00, 0x9c, 0x00, 0x9f, 0xf9, 0x00, 0x00, /* 0xf8-0xff */
168};
169
170static const unsigned char page01[256] = {
171 0x81, 0x82, 0x00, 0x00, 0x84, 0x88, 0x8c, 0x8d, /* 0x00-0x07 */
172 0x00, 0x00, 0x00, 0x00, 0x89, 0x8b, 0x91, 0x93, /* 0x08-0x0f */
173 0x00, 0x00, 0x94, 0x95, 0x00, 0x00, 0x96, 0x98, /* 0x10-0x17 */
174 0xa2, 0xab, 0x9d, 0x9e, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
175 0x00, 0x00, 0xfe, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
176 0x00, 0x00, 0xb1, 0xb4, 0x00, 0x00, 0xaf, 0xb0, /* 0x28-0x2f */
177 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb5, 0xfa, /* 0x30-0x37 */
178 0x00, 0xbd, 0xbe, 0xb9, 0xba, 0xbb, 0xbc, 0x00, /* 0x38-0x3f */
179 0x00, 0xfc, 0xb8, 0xc1, 0xc4, 0xbf, 0xc0, 0xc5, /* 0x40-0x47 */
180 0xcb, 0x00, 0x00, 0x00, 0xcf, 0xd8, 0x00, 0x00, /* 0x48-0x4f */
181 0xcc, 0xce, 0x00, 0x00, 0xd9, 0xda, 0xdf, 0xe0, /* 0x50-0x57 */
182 0xdb, 0xde, 0xe5, 0xe6, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
183 0xe1, 0xe4, 0x00, 0x00, 0xe8, 0xe9, 0x00, 0x00, /* 0x60-0x67 */
184 0x00, 0x00, 0xed, 0xf0, 0x00, 0x00, 0xf1, 0xf3, /* 0x68-0x6f */
185 0xf4, 0xf5, 0xf6, 0xf7, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
186 0x00, 0x8f, 0x90, 0xfb, 0xfd, 0xeb, 0xec, 0x00, /* 0x78-0x7f */
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
188 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
189 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
190 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
191 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
196 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
197 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
199 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
203};
204
205static const unsigned char page02[256] = {
206 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
207 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
209 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
210 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
211 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
212 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
213 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
214 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
215 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
219 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
220 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
222 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
224 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
227 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
229 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
230 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xff, /* 0xc0-0xc7 */
231 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
233 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
236 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
238};
239
240static const unsigned char page20[256] = {
241 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
243 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
244 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
245 0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
248 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
254 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
260 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
262 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
273};
274
275static const unsigned char page21[256] = {
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
278 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
279 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
280 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
282 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
283 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
284 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
303 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
308};
309
310static const unsigned char page22[256] = {
311 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
313 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
314 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
315 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
318 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
319 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
322 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
323 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
337 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
343};
344
345static const unsigned char page25[256] = {
346 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
347 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
348 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
349 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
350 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
351 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
352 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
353 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
354 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
355 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
356 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
357 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
358 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
367 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
370 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
371 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
373 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
374 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
375 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
378};
379
380static const unsigned char *const page_uni2charset[256] = {
381 page00, page01, page02, NULL, NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
383 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
384 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
385 page20, page21, page22, NULL, NULL, page25, NULL, NULL,
386 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
387 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
388 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
389 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
390 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
391 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
392 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
393 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
400 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
402 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
403 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
404 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
405 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
406 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
410 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
412 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
413};
414
415static const unsigned char charset2lower[256] = {
416 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
417 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
418 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
419 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
420 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
421 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
422 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
423 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
424 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
425 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
426 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
427 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
428 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
429 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
430 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
431 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
432 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
433 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
434 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
435 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
436 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
437 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
438 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
439 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
440 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
441 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
442 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
443 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
444 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
445 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
446 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
447 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
448};
449
450static const unsigned char charset2upper[256] = {
451 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
452 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
453 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
454 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
455 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
456 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
457 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
458 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
459 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
460 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
461 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
462 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
463 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
464 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
465 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
466 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
467 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
468 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
469 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
470 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
471 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
472 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
473 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
474 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
475 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
476 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
477 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
478 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
479 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
480 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
481 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
482 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
483};
484
485static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
486{
487 const unsigned char *uni2charset;
488 unsigned char cl = uni & 0x00ff;
489 unsigned char ch = (uni & 0xff00) >> 8;
490
491 if (boundlen <= 0)
492 return -ENAMETOOLONG;
493
494 uni2charset = page_uni2charset[ch];
495 if (uni2charset && uni2charset[cl])
496 out[0] = uni2charset[cl];
497 else
498 return -EINVAL;
499 return 1;
500}
501
502static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
503{
504 *uni = charset2uni[*rawstring];
505 if (*uni == 0x0000)
506 return -EINVAL;
507 return 1;
508}
509
510static struct nls_table table = {
511 .charset = "maccenteuro",
512 .uni2char = uni2char,
513 .char2uni = char2uni,
514 .charset2lower = charset2lower,
515 .charset2upper = charset2upper,
516 .owner = THIS_MODULE,
517};
518
519static int __init init_nls_maccenteuro(void)
520{
521 return register_nls(&table);
522}
523
524static void __exit exit_nls_maccenteuro(void)
525{
526 unregister_nls(&table);
527}
528
529module_init(init_nls_maccenteuro)
530module_exit(exit_nls_maccenteuro)
531
532MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-croatian.c b/fs/nls/mac-croatian.c
new file mode 100644
index 000000000000..dd3f675911ee
--- /dev/null
+++ b/fs/nls/mac-croatian.c
@@ -0,0 +1,602 @@
1/*
2 * linux/fs/nls/mac-croatian.c
3 *
4 * Charset maccroatian translation tables.
5 * Generated automatically from the Unicode and charset
6 * tables from the Unicode Organization (www.unicode.org).
7 * The Unicode to charset table has only exact mappings.
8 */
9
10/*
11 * COPYRIGHT AND PERMISSION NOTICE
12 *
13 * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
14 * the Terms of Use in http://www.unicode.org/copyright.html.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of the Unicode data files and any associated documentation (the "Data
18 * Files") or Unicode software and any associated documentation (the
19 * "Software") to deal in the Data Files or Software without restriction,
20 * including without limitation the rights to use, copy, modify, merge,
21 * publish, distribute, and/or sell copies of the Data Files or Software, and
22 * to permit persons to whom the Data Files or Software are furnished to do
23 * so, provided that (a) the above copyright notice(s) and this permission
24 * notice appear with all copies of the Data Files or Software, (b) both the
25 * above copyright notice(s) and this permission notice appear in associated
26 * documentation, and (c) there is clear notice in each modified Data File or
27 * in the Software as well as in the documentation associated with the Data
28 * File(s) or Software that the data or software has been modified.
29 *
30 * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
31 * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
33 * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
34 * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
35 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
36 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
37 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
38 * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
39 *
40 * Except as contained in this notice, the name of a copyright holder shall
41 * not be used in advertising or otherwise to promote the sale, use or other
42 * dealings in these Data Files or Software without prior written
43 * authorization of the copyright holder.
44 */
45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/nls.h>
50#include <linux/errno.h>
51
52static const wchar_t charset2uni[256] = {
53 /* 0x00 */
54 0x0000, 0x0001, 0x0002, 0x0003,
55 0x0004, 0x0005, 0x0006, 0x0007,
56 0x0008, 0x0009, 0x000a, 0x000b,
57 0x000c, 0x000d, 0x000e, 0x000f,
58 /* 0x10 */
59 0x0010, 0x0011, 0x0012, 0x0013,
60 0x0014, 0x0015, 0x0016, 0x0017,
61 0x0018, 0x0019, 0x001a, 0x001b,
62 0x001c, 0x001d, 0x001e, 0x001f,
63 /* 0x20 */
64 0x0020, 0x0021, 0x0022, 0x0023,
65 0x0024, 0x0025, 0x0026, 0x0027,
66 0x0028, 0x0029, 0x002a, 0x002b,
67 0x002c, 0x002d, 0x002e, 0x002f,
68 /* 0x30 */
69 0x0030, 0x0031, 0x0032, 0x0033,
70 0x0034, 0x0035, 0x0036, 0x0037,
71 0x0038, 0x0039, 0x003a, 0x003b,
72 0x003c, 0x003d, 0x003e, 0x003f,
73 /* 0x40 */
74 0x0040, 0x0041, 0x0042, 0x0043,
75 0x0044, 0x0045, 0x0046, 0x0047,
76 0x0048, 0x0049, 0x004a, 0x004b,
77 0x004c, 0x004d, 0x004e, 0x004f,
78 /* 0x50 */
79 0x0050, 0x0051, 0x0052, 0x0053,
80 0x0054, 0x0055, 0x0056, 0x0057,
81 0x0058, 0x0059, 0x005a, 0x005b,
82 0x005c, 0x005d, 0x005e, 0x005f,
83 /* 0x60 */
84 0x0060, 0x0061, 0x0062, 0x0063,
85 0x0064, 0x0065, 0x0066, 0x0067,
86 0x0068, 0x0069, 0x006a, 0x006b,
87 0x006c, 0x006d, 0x006e, 0x006f,
88 /* 0x70 */
89 0x0070, 0x0071, 0x0072, 0x0073,
90 0x0074, 0x0075, 0x0076, 0x0077,
91 0x0078, 0x0079, 0x007a, 0x007b,
92 0x007c, 0x007d, 0x007e, 0x007f,
93 /* 0x80 */
94 0x00c4, 0x00c5, 0x00c7, 0x00c9,
95 0x00d1, 0x00d6, 0x00dc, 0x00e1,
96 0x00e0, 0x00e2, 0x00e4, 0x00e3,
97 0x00e5, 0x00e7, 0x00e9, 0x00e8,
98 /* 0x90 */
99 0x00ea, 0x00eb, 0x00ed, 0x00ec,
100 0x00ee, 0x00ef, 0x00f1, 0x00f3,
101 0x00f2, 0x00f4, 0x00f6, 0x00f5,
102 0x00fa, 0x00f9, 0x00fb, 0x00fc,
103 /* 0xa0 */
104 0x2020, 0x00b0, 0x00a2, 0x00a3,
105 0x00a7, 0x2022, 0x00b6, 0x00df,
106 0x00ae, 0x0160, 0x2122, 0x00b4,
107 0x00a8, 0x2260, 0x017d, 0x00d8,
108 /* 0xb0 */
109 0x221e, 0x00b1, 0x2264, 0x2265,
110 0x2206, 0x00b5, 0x2202, 0x2211,
111 0x220f, 0x0161, 0x222b, 0x00aa,
112 0x00ba, 0x03a9, 0x017e, 0x00f8,
113 /* 0xc0 */
114 0x00bf, 0x00a1, 0x00ac, 0x221a,
115 0x0192, 0x2248, 0x0106, 0x00ab,
116 0x010c, 0x2026, 0x00a0, 0x00c0,
117 0x00c3, 0x00d5, 0x0152, 0x0153,
118 /* 0xd0 */
119 0x0110, 0x2014, 0x201c, 0x201d,
120 0x2018, 0x2019, 0x00f7, 0x25ca,
121 0xf8ff, 0x00a9, 0x2044, 0x20ac,
122 0x2039, 0x203a, 0x00c6, 0x00bb,
123 /* 0xe0 */
124 0x2013, 0x00b7, 0x201a, 0x201e,
125 0x2030, 0x00c2, 0x0107, 0x00c1,
126 0x010d, 0x00c8, 0x00cd, 0x00ce,
127 0x00cf, 0x00cc, 0x00d3, 0x00d4,
128 /* 0xf0 */
129 0x0111, 0x00d2, 0x00da, 0x00db,
130 0x00d9, 0x0131, 0x02c6, 0x02dc,
131 0x00af, 0x03c0, 0x00cb, 0x02da,
132 0x00b8, 0x00ca, 0x00e6, 0x02c7,
133};
134
135static const unsigned char page00[256] = {
136 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
137 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
138 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
139 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
140 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
141 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
142 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
143 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
144 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
145 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
146 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
147 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
148 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
149 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
150 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
151 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
154 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
156 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
157 0xac, 0xd9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
158 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
159 0xfc, 0x00, 0xbc, 0xdf, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
160 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xde, 0x82, /* 0xc0-0xc7 */
161 0xe9, 0x83, 0xfd, 0xfa, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
162 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
163 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
164 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xfe, 0x8d, /* 0xe0-0xe7 */
165 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
166 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
167 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0x00, /* 0xf8-0xff */
168};
169
170static const unsigned char page01[256] = {
171 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0xe6, /* 0x00-0x07 */
172 0x00, 0x00, 0x00, 0x00, 0xc8, 0xe8, 0x00, 0x00, /* 0x08-0x0f */
173 0xd0, 0xf0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
174 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
175 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
176 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
177 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
178 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
181 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
183 0xa9, 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
185 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
186 0x00, 0x00, 0x00, 0x00, 0x00, 0xae, 0xbe, 0x00, /* 0x78-0x7f */
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
188 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
189 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
190 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
191 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
196 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
197 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
199 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
203};
204
205static const unsigned char page02[256] = {
206 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
207 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
209 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
210 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
211 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
212 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
213 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
214 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
215 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
219 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
220 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
222 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
224 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
227 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
229 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
230 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
231 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
233 0x00, 0x00, 0xfb, 0x00, 0xf7, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
236 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
238};
239
240static const unsigned char page03[256] = {
241 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
244 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
245 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
254 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
260 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
262 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
265 0xf9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
273};
274
275static const unsigned char page20[256] = {
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
278 0x00, 0x00, 0x00, 0xe0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
279 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
280 0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
282 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
283 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
284 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
297 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
303 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
308};
309
310static const unsigned char page21[256] = {
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
315 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
318 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
319 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
322 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
323 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
337 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
343};
344
345static const unsigned char page22[256] = {
346 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xb4, 0x00, /* 0x00-0x07 */
347 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
348 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
349 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
350 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
351 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
352 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
353 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
354 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
355 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
356 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
357 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
358 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
367 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
370 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
373 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
374 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
375 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
378};
379
380static const unsigned char page25[256] = {
381 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
382 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
383 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
384 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
386 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
387 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
388 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
389 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
390 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
391 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
392 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
393 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
394 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
395 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
396 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
397 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
398 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
399 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
400 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
401 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
402 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
403 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
404 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
405 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
406 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
407 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
408 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
409 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
410 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
411 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
412 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
413};
414
415static const unsigned char pagef8[256] = {
416 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
417 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
418 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
419 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
420 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
421 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
422 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
423 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
424 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
425 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
426 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
427 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
428 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
429 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
430 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
431 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
432 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
433 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
434 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
435 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
436 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
437 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
438 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
439 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
440 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
441 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
442 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
443 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
444 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
445 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
446 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
447 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
448};
449
450static const unsigned char *const page_uni2charset[256] = {
451 page00, page01, page02, page03, NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
455 page20, page21, page22, NULL, NULL, page25, NULL, NULL,
456 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
461 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
462 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
463 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
464 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
465 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
479 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
480 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
481 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
482 pagef8, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
483};
484
485static const unsigned char charset2lower[256] = {
486 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
487 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
488 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
489 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
490 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
491 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
492 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
493 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
494 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
495 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
496 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
497 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
498 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
499 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
500 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
501 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
502 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
503 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
504 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
505 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
506 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
507 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
508 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
509 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
510 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
511 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
512 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
513 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
514 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
515 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
516 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
517 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
518};
519
520static const unsigned char charset2upper[256] = {
521 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
522 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
523 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
524 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
525 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
526 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
527 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
528 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
529 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
530 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
531 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
532 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
533 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
534 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
535 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
536 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
537 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
538 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
539 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
540 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
541 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
542 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
543 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
544 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
545 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
546 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
547 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
548 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
549 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
550 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
551 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
552 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
553};
554
555static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
556{
557 const unsigned char *uni2charset;
558 unsigned char cl = uni & 0x00ff;
559 unsigned char ch = (uni & 0xff00) >> 8;
560
561 if (boundlen <= 0)
562 return -ENAMETOOLONG;
563
564 uni2charset = page_uni2charset[ch];
565 if (uni2charset && uni2charset[cl])
566 out[0] = uni2charset[cl];
567 else
568 return -EINVAL;
569 return 1;
570}
571
572static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
573{
574 *uni = charset2uni[*rawstring];
575 if (*uni == 0x0000)
576 return -EINVAL;
577 return 1;
578}
579
580static struct nls_table table = {
581 .charset = "maccroatian",
582 .uni2char = uni2char,
583 .char2uni = char2uni,
584 .charset2lower = charset2lower,
585 .charset2upper = charset2upper,
586 .owner = THIS_MODULE,
587};
588
589static int __init init_nls_maccroatian(void)
590{
591 return register_nls(&table);
592}
593
594static void __exit exit_nls_maccroatian(void)
595{
596 unregister_nls(&table);
597}
598
599module_init(init_nls_maccroatian)
600module_exit(exit_nls_maccroatian)
601
602MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-cyrillic.c b/fs/nls/mac-cyrillic.c
new file mode 100644
index 000000000000..1112c84dd8bb
--- /dev/null
+++ b/fs/nls/mac-cyrillic.c
@@ -0,0 +1,497 @@
1/*
2 * linux/fs/nls/mac-cyrillic.c
3 *
4 * Charset maccyrillic translation tables.
5 * Generated automatically from the Unicode and charset
6 * tables from the Unicode Organization (www.unicode.org).
7 * The Unicode to charset table has only exact mappings.
8 */
9
10/*
11 * COPYRIGHT AND PERMISSION NOTICE
12 *
13 * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
14 * the Terms of Use in http://www.unicode.org/copyright.html.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of the Unicode data files and any associated documentation (the "Data
18 * Files") or Unicode software and any associated documentation (the
19 * "Software") to deal in the Data Files or Software without restriction,
20 * including without limitation the rights to use, copy, modify, merge,
21 * publish, distribute, and/or sell copies of the Data Files or Software, and
22 * to permit persons to whom the Data Files or Software are furnished to do
23 * so, provided that (a) the above copyright notice(s) and this permission
24 * notice appear with all copies of the Data Files or Software, (b) both the
25 * above copyright notice(s) and this permission notice appear in associated
26 * documentation, and (c) there is clear notice in each modified Data File or
27 * in the Software as well as in the documentation associated with the Data
28 * File(s) or Software that the data or software has been modified.
29 *
30 * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
31 * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
33 * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
34 * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
35 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
36 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
37 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
38 * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
39 *
40 * Except as contained in this notice, the name of a copyright holder shall
41 * not be used in advertising or otherwise to promote the sale, use or other
42 * dealings in these Data Files or Software without prior written
43 * authorization of the copyright holder.
44 */
45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/nls.h>
50#include <linux/errno.h>
51
52static const wchar_t charset2uni[256] = {
53 /* 0x00 */
54 0x0000, 0x0001, 0x0002, 0x0003,
55 0x0004, 0x0005, 0x0006, 0x0007,
56 0x0008, 0x0009, 0x000a, 0x000b,
57 0x000c, 0x000d, 0x000e, 0x000f,
58 /* 0x10 */
59 0x0010, 0x0011, 0x0012, 0x0013,
60 0x0014, 0x0015, 0x0016, 0x0017,
61 0x0018, 0x0019, 0x001a, 0x001b,
62 0x001c, 0x001d, 0x001e, 0x001f,
63 /* 0x20 */
64 0x0020, 0x0021, 0x0022, 0x0023,
65 0x0024, 0x0025, 0x0026, 0x0027,
66 0x0028, 0x0029, 0x002a, 0x002b,
67 0x002c, 0x002d, 0x002e, 0x002f,
68 /* 0x30 */
69 0x0030, 0x0031, 0x0032, 0x0033,
70 0x0034, 0x0035, 0x0036, 0x0037,
71 0x0038, 0x0039, 0x003a, 0x003b,
72 0x003c, 0x003d, 0x003e, 0x003f,
73 /* 0x40 */
74 0x0040, 0x0041, 0x0042, 0x0043,
75 0x0044, 0x0045, 0x0046, 0x0047,
76 0x0048, 0x0049, 0x004a, 0x004b,
77 0x004c, 0x004d, 0x004e, 0x004f,
78 /* 0x50 */
79 0x0050, 0x0051, 0x0052, 0x0053,
80 0x0054, 0x0055, 0x0056, 0x0057,
81 0x0058, 0x0059, 0x005a, 0x005b,
82 0x005c, 0x005d, 0x005e, 0x005f,
83 /* 0x60 */
84 0x0060, 0x0061, 0x0062, 0x0063,
85 0x0064, 0x0065, 0x0066, 0x0067,
86 0x0068, 0x0069, 0x006a, 0x006b,
87 0x006c, 0x006d, 0x006e, 0x006f,
88 /* 0x70 */
89 0x0070, 0x0071, 0x0072, 0x0073,
90 0x0074, 0x0075, 0x0076, 0x0077,
91 0x0078, 0x0079, 0x007a, 0x007b,
92 0x007c, 0x007d, 0x007e, 0x007f,
93 /* 0x80 */
94 0x0410, 0x0411, 0x0412, 0x0413,
95 0x0414, 0x0415, 0x0416, 0x0417,
96 0x0418, 0x0419, 0x041a, 0x041b,
97 0x041c, 0x041d, 0x041e, 0x041f,
98 /* 0x90 */
99 0x0420, 0x0421, 0x0422, 0x0423,
100 0x0424, 0x0425, 0x0426, 0x0427,
101 0x0428, 0x0429, 0x042a, 0x042b,
102 0x042c, 0x042d, 0x042e, 0x042f,
103 /* 0xa0 */
104 0x2020, 0x00b0, 0x0490, 0x00a3,
105 0x00a7, 0x2022, 0x00b6, 0x0406,
106 0x00ae, 0x00a9, 0x2122, 0x0402,
107 0x0452, 0x2260, 0x0403, 0x0453,
108 /* 0xb0 */
109 0x221e, 0x00b1, 0x2264, 0x2265,
110 0x0456, 0x00b5, 0x0491, 0x0408,
111 0x0404, 0x0454, 0x0407, 0x0457,
112 0x0409, 0x0459, 0x040a, 0x045a,
113 /* 0xc0 */
114 0x0458, 0x0405, 0x00ac, 0x221a,
115 0x0192, 0x2248, 0x2206, 0x00ab,
116 0x00bb, 0x2026, 0x00a0, 0x040b,
117 0x045b, 0x040c, 0x045c, 0x0455,
118 /* 0xd0 */
119 0x2013, 0x2014, 0x201c, 0x201d,
120 0x2018, 0x2019, 0x00f7, 0x201e,
121 0x040e, 0x045e, 0x040f, 0x045f,
122 0x2116, 0x0401, 0x0451, 0x044f,
123 /* 0xe0 */
124 0x0430, 0x0431, 0x0432, 0x0433,
125 0x0434, 0x0435, 0x0436, 0x0437,
126 0x0438, 0x0439, 0x043a, 0x043b,
127 0x043c, 0x043d, 0x043e, 0x043f,
128 /* 0xf0 */
129 0x0440, 0x0441, 0x0442, 0x0443,
130 0x0444, 0x0445, 0x0446, 0x0447,
131 0x0448, 0x0449, 0x044a, 0x044b,
132 0x044c, 0x044d, 0x044e, 0x20ac,
133};
134
135static const unsigned char page00[256] = {
136 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
137 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
138 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
139 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
140 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
141 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
142 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
143 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
144 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
145 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
146 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
147 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
148 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
149 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
150 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
151 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
154 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
156 0xca, 0x00, 0x00, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
157 0x00, 0xa9, 0x00, 0xc7, 0xc2, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
158 0xa1, 0xb1, 0x00, 0x00, 0x00, 0xb5, 0xa6, 0x00, /* 0xb0-0xb7 */
159 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
160 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
161 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
162 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
163 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
164 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
165 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
166 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xd6, /* 0xf0-0xf7 */
167 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
168};
169
170static const unsigned char page01[256] = {
171 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
172 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
173 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
174 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
175 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
176 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
177 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
178 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
181 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
183 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
185 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
186 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
188 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
189 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
190 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
191 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
196 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
197 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
199 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
203};
204
205static const unsigned char page04[256] = {
206 0x00, 0xdd, 0xab, 0xae, 0xb8, 0xc1, 0xa7, 0xba, /* 0x00-0x07 */
207 0xb7, 0xbc, 0xbe, 0xcb, 0xcd, 0x00, 0xd8, 0xda, /* 0x08-0x0f */
208 0x80, 0x81, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, /* 0x10-0x17 */
209 0x88, 0x89, 0x8a, 0x8b, 0x8c, 0x8d, 0x8e, 0x8f, /* 0x18-0x1f */
210 0x90, 0x91, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, /* 0x20-0x27 */
211 0x98, 0x99, 0x9a, 0x9b, 0x9c, 0x9d, 0x9e, 0x9f, /* 0x28-0x2f */
212 0xe0, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, /* 0x30-0x37 */
213 0xe8, 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xef, /* 0x38-0x3f */
214 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, /* 0x40-0x47 */
215 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0xfd, 0xfe, 0xdf, /* 0x48-0x4f */
216 0x00, 0xde, 0xac, 0xaf, 0xb9, 0xcf, 0xb4, 0xbb, /* 0x50-0x57 */
217 0xc0, 0xbd, 0xbf, 0xcc, 0xce, 0x00, 0xd9, 0xdb, /* 0x58-0x5f */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
219 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
220 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
222 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
224 0xa2, 0xb6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
227 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
229 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
230 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
231 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
233 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
236 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
238};
239
240static const unsigned char page20[256] = {
241 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
243 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
244 0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0xd7, 0x00, /* 0x18-0x1f */
245 0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
254 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
260 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
262 0x00, 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
273};
274
275static const unsigned char page21[256] = {
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
278 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xdc, 0x00, /* 0x10-0x17 */
279 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
280 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
282 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
283 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
284 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
303 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
308};
309
310static const unsigned char page22[256] = {
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
314 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
315 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
318 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
319 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
320 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
322 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
323 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
337 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
343};
344
345static const unsigned char *const page_uni2charset[256] = {
346 page00, page01, NULL, NULL, page04, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
350 page20, page21, page22, NULL, NULL, NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
352 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
354 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
355 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
356 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
357 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
358 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
359 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
360 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
361 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
362 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
366 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
368 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
369 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
370 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
373 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
374 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
375 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
376 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
377 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
378};
379
380static const unsigned char charset2lower[256] = {
381 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
382 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
383 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
384 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
385 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
386 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
387 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
388 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
389 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
390 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
391 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
392 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
393 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
394 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
395 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
396 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
397 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
398 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
399 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
400 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
401 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
402 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
403 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
404 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
405 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
406 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
407 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
408 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
409 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
410 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
411 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
412 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
413};
414
415static const unsigned char charset2upper[256] = {
416 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
417 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
418 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
419 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
420 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
421 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
422 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
423 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
424 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
425 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
426 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
427 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
428 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
429 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
430 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
431 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
432 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
433 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
434 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
435 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
436 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
437 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
438 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
439 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
440 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
441 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
442 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
443 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
444 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
445 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
446 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
447 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
448};
449
450static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
451{
452 const unsigned char *uni2charset;
453 unsigned char cl = uni & 0x00ff;
454 unsigned char ch = (uni & 0xff00) >> 8;
455
456 if (boundlen <= 0)
457 return -ENAMETOOLONG;
458
459 uni2charset = page_uni2charset[ch];
460 if (uni2charset && uni2charset[cl])
461 out[0] = uni2charset[cl];
462 else
463 return -EINVAL;
464 return 1;
465}
466
467static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
468{
469 *uni = charset2uni[*rawstring];
470 if (*uni == 0x0000)
471 return -EINVAL;
472 return 1;
473}
474
475static struct nls_table table = {
476 .charset = "maccyrillic",
477 .uni2char = uni2char,
478 .char2uni = char2uni,
479 .charset2lower = charset2lower,
480 .charset2upper = charset2upper,
481 .owner = THIS_MODULE,
482};
483
484static int __init init_nls_maccyrillic(void)
485{
486 return register_nls(&table);
487}
488
489static void __exit exit_nls_maccyrillic(void)
490{
491 unregister_nls(&table);
492}
493
494module_init(init_nls_maccyrillic)
495module_exit(exit_nls_maccyrillic)
496
497MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-gaelic.c b/fs/nls/mac-gaelic.c
new file mode 100644
index 000000000000..2de9158409c8
--- /dev/null
+++ b/fs/nls/mac-gaelic.c
@@ -0,0 +1,567 @@
1/*
2 * linux/fs/nls/mac-gaelic.c
3 *
4 * Charset macgaelic translation tables.
5 * Generated automatically from the Unicode and charset
6 * tables from the Unicode Organization (www.unicode.org).
7 * The Unicode to charset table has only exact mappings.
8 */
9
10/*
11 * COPYRIGHT AND PERMISSION NOTICE
12 *
13 * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
14 * the Terms of Use in http://www.unicode.org/copyright.html.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of the Unicode data files and any associated documentation (the "Data
18 * Files") or Unicode software and any associated documentation (the
19 * "Software") to deal in the Data Files or Software without restriction,
20 * including without limitation the rights to use, copy, modify, merge,
21 * publish, distribute, and/or sell copies of the Data Files or Software, and
22 * to permit persons to whom the Data Files or Software are furnished to do
23 * so, provided that (a) the above copyright notice(s) and this permission
24 * notice appear with all copies of the Data Files or Software, (b) both the
25 * above copyright notice(s) and this permission notice appear in associated
26 * documentation, and (c) there is clear notice in each modified Data File or
27 * in the Software as well as in the documentation associated with the Data
28 * File(s) or Software that the data or software has been modified.
29 *
30 * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
31 * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
33 * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
34 * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
35 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
36 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
37 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
38 * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
39 *
40 * Except as contained in this notice, the name of a copyright holder shall
41 * not be used in advertising or otherwise to promote the sale, use or other
42 * dealings in these Data Files or Software without prior written
43 * authorization of the copyright holder.
44 */
45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/nls.h>
50#include <linux/errno.h>
51
52static const wchar_t charset2uni[256] = {
53 /* 0x00 */
54 0x0000, 0x0001, 0x0002, 0x0003,
55 0x0004, 0x0005, 0x0006, 0x0007,
56 0x0008, 0x0009, 0x000a, 0x000b,
57 0x000c, 0x000d, 0x000e, 0x000f,
58 /* 0x10 */
59 0x0010, 0x0011, 0x0012, 0x0013,
60 0x0014, 0x0015, 0x0016, 0x0017,
61 0x0018, 0x0019, 0x001a, 0x001b,
62 0x001c, 0x001d, 0x001e, 0x001f,
63 /* 0x20 */
64 0x0020, 0x0021, 0x0022, 0x0023,
65 0x0024, 0x0025, 0x0026, 0x0027,
66 0x0028, 0x0029, 0x002a, 0x002b,
67 0x002c, 0x002d, 0x002e, 0x002f,
68 /* 0x30 */
69 0x0030, 0x0031, 0x0032, 0x0033,
70 0x0034, 0x0035, 0x0036, 0x0037,
71 0x0038, 0x0039, 0x003a, 0x003b,
72 0x003c, 0x003d, 0x003e, 0x003f,
73 /* 0x40 */
74 0x0040, 0x0041, 0x0042, 0x0043,
75 0x0044, 0x0045, 0x0046, 0x0047,
76 0x0048, 0x0049, 0x004a, 0x004b,
77 0x004c, 0x004d, 0x004e, 0x004f,
78 /* 0x50 */
79 0x0050, 0x0051, 0x0052, 0x0053,
80 0x0054, 0x0055, 0x0056, 0x0057,
81 0x0058, 0x0059, 0x005a, 0x005b,
82 0x005c, 0x005d, 0x005e, 0x005f,
83 /* 0x60 */
84 0x0060, 0x0061, 0x0062, 0x0063,
85 0x0064, 0x0065, 0x0066, 0x0067,
86 0x0068, 0x0069, 0x006a, 0x006b,
87 0x006c, 0x006d, 0x006e, 0x006f,
88 /* 0x70 */
89 0x0070, 0x0071, 0x0072, 0x0073,
90 0x0074, 0x0075, 0x0076, 0x0077,
91 0x0078, 0x0079, 0x007a, 0x007b,
92 0x007c, 0x007d, 0x007e, 0x007f,
93 /* 0x80 */
94 0x00c4, 0x00c5, 0x00c7, 0x00c9,
95 0x00d1, 0x00d6, 0x00dc, 0x00e1,
96 0x00e0, 0x00e2, 0x00e4, 0x00e3,
97 0x00e5, 0x00e7, 0x00e9, 0x00e8,
98 /* 0x90 */
99 0x00ea, 0x00eb, 0x00ed, 0x00ec,
100 0x00ee, 0x00ef, 0x00f1, 0x00f3,
101 0x00f2, 0x00f4, 0x00f6, 0x00f5,
102 0x00fa, 0x00f9, 0x00fb, 0x00fc,
103 /* 0xa0 */
104 0x2020, 0x00b0, 0x00a2, 0x00a3,
105 0x00a7, 0x2022, 0x00b6, 0x00df,
106 0x00ae, 0x00a9, 0x2122, 0x00b4,
107 0x00a8, 0x2260, 0x00c6, 0x00d8,
108 /* 0xb0 */
109 0x1e02, 0x00b1, 0x2264, 0x2265,
110 0x1e03, 0x010a, 0x010b, 0x1e0a,
111 0x1e0b, 0x1e1e, 0x1e1f, 0x0120,
112 0x0121, 0x1e40, 0x00e6, 0x00f8,
113 /* 0xc0 */
114 0x1e41, 0x1e56, 0x1e57, 0x027c,
115 0x0192, 0x017f, 0x1e60, 0x00ab,
116 0x00bb, 0x2026, 0x00a0, 0x00c0,
117 0x00c3, 0x00d5, 0x0152, 0x0153,
118 /* 0xd0 */
119 0x2013, 0x2014, 0x201c, 0x201d,
120 0x2018, 0x2019, 0x1e61, 0x1e9b,
121 0x00ff, 0x0178, 0x1e6a, 0x20ac,
122 0x2039, 0x203a, 0x0176, 0x0177,
123 /* 0xe0 */
124 0x1e6b, 0x00b7, 0x1ef2, 0x1ef3,
125 0x204a, 0x00c2, 0x00ca, 0x00c1,
126 0x00cb, 0x00c8, 0x00cd, 0x00ce,
127 0x00cf, 0x00cc, 0x00d3, 0x00d4,
128 /* 0xf0 */
129 0x2663, 0x00d2, 0x00da, 0x00db,
130 0x00d9, 0x0131, 0x00dd, 0x00fd,
131 0x0174, 0x0175, 0x1e84, 0x1e85,
132 0x1e80, 0x1e81, 0x1e82, 0x1e83,
133};
134
135static const unsigned char page00[256] = {
136 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
137 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
138 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
139 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
140 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
141 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
142 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
143 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
144 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
145 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
146 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
147 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
148 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
149 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
150 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
151 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
154 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
156 0xca, 0x00, 0xa2, 0xa3, 0x00, 0x00, 0x00, 0xa4, /* 0xa0-0xa7 */
157 0xac, 0xa9, 0x00, 0xc7, 0x00, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
158 0xa1, 0xb1, 0x00, 0x00, 0xab, 0x00, 0xa6, 0xe1, /* 0xb0-0xb7 */
159 0x00, 0x00, 0x00, 0xc8, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
160 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
161 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
162 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
163 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xf6, 0x00, 0xa7, /* 0xd8-0xdf */
164 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
165 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
166 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0x00, /* 0xf0-0xf7 */
167 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xf7, 0x00, 0xd8, /* 0xf8-0xff */
168};
169
170static const unsigned char page01[256] = {
171 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
172 0x00, 0x00, 0xb5, 0xb6, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
173 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
174 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
175 0xbb, 0xbc, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
176 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
177 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
178 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
181 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
183 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
185 0x00, 0x00, 0x00, 0x00, 0xf8, 0xf9, 0xde, 0xdf, /* 0x70-0x77 */
186 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc5, /* 0x78-0x7f */
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
188 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
189 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
190 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
191 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
196 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
197 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
199 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
203};
204
205static const unsigned char page02[256] = {
206 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
207 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
209 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
210 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
211 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
212 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
213 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
214 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
215 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
219 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
220 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
221 0x00, 0x00, 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, /* 0x78-0x7f */
222 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
224 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
227 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
229 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
230 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
231 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
233 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
236 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
238};
239
240static const unsigned char page1e[256] = {
241 0x00, 0x00, 0xb0, 0xb4, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
242 0x00, 0x00, 0xb7, 0xb8, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
244 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb9, 0xba, /* 0x18-0x1f */
245 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
249 0xbd, 0xc0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc1, 0xc2, /* 0x50-0x57 */
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
253 0xc6, 0xd6, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
254 0x00, 0x00, 0xda, 0xe0, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
257 0xfc, 0xfd, 0xfe, 0xff, 0xfa, 0xfb, 0x00, 0x00, /* 0x80-0x87 */
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
260 0x00, 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
262 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
271 0x00, 0x00, 0xe2, 0xe3, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
273};
274
275static const unsigned char page20[256] = {
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
278 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
279 0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
280 0xa0, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
282 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
283 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
284 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
285 0x00, 0x00, 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
297 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
303 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
308};
309
310static const unsigned char page21[256] = {
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
315 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
318 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
319 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
322 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
323 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
337 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
343};
344
345static const unsigned char page22[256] = {
346 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
347 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
348 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
349 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
350 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
351 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
352 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
353 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
354 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
355 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
356 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
357 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
358 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
367 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
370 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
373 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
374 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
375 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
378};
379
380static const unsigned char page26[256] = {
381 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
382 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
383 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
384 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
386 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
387 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
388 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
389 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
390 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
391 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
392 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
393 0x00, 0x00, 0x00, 0xf0, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
394 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
395 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
396 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
397 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
398 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
399 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
400 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
401 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
402 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
403 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
404 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
405 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
406 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
407 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
408 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
409 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
410 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
411 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
412 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
413};
414
415static const unsigned char *const page_uni2charset[256] = {
416 page00, page01, page02, NULL, NULL, NULL, NULL, NULL,
417 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
418 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
419 NULL, NULL, NULL, NULL, NULL, NULL, page1e, NULL,
420 page20, page21, page22, NULL, NULL, NULL, page26, NULL,
421 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
422 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
423 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
424 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
425 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
426 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
427 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
428 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
429 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
430 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
431 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
432 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
433 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
434 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
435 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
436 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
437 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
438 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
439 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
440 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
441 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
442 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
443 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
444 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
445 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
446 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
447 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
448};
449
450static const unsigned char charset2lower[256] = {
451 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
452 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
453 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
454 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
455 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
456 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
457 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
458 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
459 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
460 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
461 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
462 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
463 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
464 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
465 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
466 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
467 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
468 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
469 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
470 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
471 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
472 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
473 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
474 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
475 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
476 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
477 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
478 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
479 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
480 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
481 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
482 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
483};
484
485static const unsigned char charset2upper[256] = {
486 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
487 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
488 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
489 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
490 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
491 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
492 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
493 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
494 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
495 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
496 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
497 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
498 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
499 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
500 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
501 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
502 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
503 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
504 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
505 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
506 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
507 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
508 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
509 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
510 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
511 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
512 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
513 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
514 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
515 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
516 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
517 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
518};
519
520static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
521{
522 const unsigned char *uni2charset;
523 unsigned char cl = uni & 0x00ff;
524 unsigned char ch = (uni & 0xff00) >> 8;
525
526 if (boundlen <= 0)
527 return -ENAMETOOLONG;
528
529 uni2charset = page_uni2charset[ch];
530 if (uni2charset && uni2charset[cl])
531 out[0] = uni2charset[cl];
532 else
533 return -EINVAL;
534 return 1;
535}
536
537static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
538{
539 *uni = charset2uni[*rawstring];
540 if (*uni == 0x0000)
541 return -EINVAL;
542 return 1;
543}
544
545static struct nls_table table = {
546 .charset = "macgaelic",
547 .uni2char = uni2char,
548 .char2uni = char2uni,
549 .charset2lower = charset2lower,
550 .charset2upper = charset2upper,
551 .owner = THIS_MODULE,
552};
553
554static int __init init_nls_macgaelic(void)
555{
556 return register_nls(&table);
557}
558
559static void __exit exit_nls_macgaelic(void)
560{
561 unregister_nls(&table);
562}
563
564module_init(init_nls_macgaelic)
565module_exit(exit_nls_macgaelic)
566
567MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-greek.c b/fs/nls/mac-greek.c
new file mode 100644
index 000000000000..a86310082802
--- /dev/null
+++ b/fs/nls/mac-greek.c
@@ -0,0 +1,497 @@
1/*
2 * linux/fs/nls/mac-greek.c
3 *
4 * Charset macgreek translation tables.
5 * Generated automatically from the Unicode and charset
6 * tables from the Unicode Organization (www.unicode.org).
7 * The Unicode to charset table has only exact mappings.
8 */
9
10/*
11 * COPYRIGHT AND PERMISSION NOTICE
12 *
13 * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
14 * the Terms of Use in http://www.unicode.org/copyright.html.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of the Unicode data files and any associated documentation (the "Data
18 * Files") or Unicode software and any associated documentation (the
19 * "Software") to deal in the Data Files or Software without restriction,
20 * including without limitation the rights to use, copy, modify, merge,
21 * publish, distribute, and/or sell copies of the Data Files or Software, and
22 * to permit persons to whom the Data Files or Software are furnished to do
23 * so, provided that (a) the above copyright notice(s) and this permission
24 * notice appear with all copies of the Data Files or Software, (b) both the
25 * above copyright notice(s) and this permission notice appear in associated
26 * documentation, and (c) there is clear notice in each modified Data File or
27 * in the Software as well as in the documentation associated with the Data
28 * File(s) or Software that the data or software has been modified.
29 *
30 * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
31 * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
33 * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
34 * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
35 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
36 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
37 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
38 * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
39 *
40 * Except as contained in this notice, the name of a copyright holder shall
41 * not be used in advertising or otherwise to promote the sale, use or other
42 * dealings in these Data Files or Software without prior written
43 * authorization of the copyright holder.
44 */
45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/nls.h>
50#include <linux/errno.h>
51
52static const wchar_t charset2uni[256] = {
53 /* 0x00 */
54 0x0000, 0x0001, 0x0002, 0x0003,
55 0x0004, 0x0005, 0x0006, 0x0007,
56 0x0008, 0x0009, 0x000a, 0x000b,
57 0x000c, 0x000d, 0x000e, 0x000f,
58 /* 0x10 */
59 0x0010, 0x0011, 0x0012, 0x0013,
60 0x0014, 0x0015, 0x0016, 0x0017,
61 0x0018, 0x0019, 0x001a, 0x001b,
62 0x001c, 0x001d, 0x001e, 0x001f,
63 /* 0x20 */
64 0x0020, 0x0021, 0x0022, 0x0023,
65 0x0024, 0x0025, 0x0026, 0x0027,
66 0x0028, 0x0029, 0x002a, 0x002b,
67 0x002c, 0x002d, 0x002e, 0x002f,
68 /* 0x30 */
69 0x0030, 0x0031, 0x0032, 0x0033,
70 0x0034, 0x0035, 0x0036, 0x0037,
71 0x0038, 0x0039, 0x003a, 0x003b,
72 0x003c, 0x003d, 0x003e, 0x003f,
73 /* 0x40 */
74 0x0040, 0x0041, 0x0042, 0x0043,
75 0x0044, 0x0045, 0x0046, 0x0047,
76 0x0048, 0x0049, 0x004a, 0x004b,
77 0x004c, 0x004d, 0x004e, 0x004f,
78 /* 0x50 */
79 0x0050, 0x0051, 0x0052, 0x0053,
80 0x0054, 0x0055, 0x0056, 0x0057,
81 0x0058, 0x0059, 0x005a, 0x005b,
82 0x005c, 0x005d, 0x005e, 0x005f,
83 /* 0x60 */
84 0x0060, 0x0061, 0x0062, 0x0063,
85 0x0064, 0x0065, 0x0066, 0x0067,
86 0x0068, 0x0069, 0x006a, 0x006b,
87 0x006c, 0x006d, 0x006e, 0x006f,
88 /* 0x70 */
89 0x0070, 0x0071, 0x0072, 0x0073,
90 0x0074, 0x0075, 0x0076, 0x0077,
91 0x0078, 0x0079, 0x007a, 0x007b,
92 0x007c, 0x007d, 0x007e, 0x007f,
93 /* 0x80 */
94 0x00c4, 0x00b9, 0x00b2, 0x00c9,
95 0x00b3, 0x00d6, 0x00dc, 0x0385,
96 0x00e0, 0x00e2, 0x00e4, 0x0384,
97 0x00a8, 0x00e7, 0x00e9, 0x00e8,
98 /* 0x90 */
99 0x00ea, 0x00eb, 0x00a3, 0x2122,
100 0x00ee, 0x00ef, 0x2022, 0x00bd,
101 0x2030, 0x00f4, 0x00f6, 0x00a6,
102 0x20ac, 0x00f9, 0x00fb, 0x00fc,
103 /* 0xa0 */
104 0x2020, 0x0393, 0x0394, 0x0398,
105 0x039b, 0x039e, 0x03a0, 0x00df,
106 0x00ae, 0x00a9, 0x03a3, 0x03aa,
107 0x00a7, 0x2260, 0x00b0, 0x00b7,
108 /* 0xb0 */
109 0x0391, 0x00b1, 0x2264, 0x2265,
110 0x00a5, 0x0392, 0x0395, 0x0396,
111 0x0397, 0x0399, 0x039a, 0x039c,
112 0x03a6, 0x03ab, 0x03a8, 0x03a9,
113 /* 0xc0 */
114 0x03ac, 0x039d, 0x00ac, 0x039f,
115 0x03a1, 0x2248, 0x03a4, 0x00ab,
116 0x00bb, 0x2026, 0x00a0, 0x03a5,
117 0x03a7, 0x0386, 0x0388, 0x0153,
118 /* 0xd0 */
119 0x2013, 0x2015, 0x201c, 0x201d,
120 0x2018, 0x2019, 0x00f7, 0x0389,
121 0x038a, 0x038c, 0x038e, 0x03ad,
122 0x03ae, 0x03af, 0x03cc, 0x038f,
123 /* 0xe0 */
124 0x03cd, 0x03b1, 0x03b2, 0x03c8,
125 0x03b4, 0x03b5, 0x03c6, 0x03b3,
126 0x03b7, 0x03b9, 0x03be, 0x03ba,
127 0x03bb, 0x03bc, 0x03bd, 0x03bf,
128 /* 0xf0 */
129 0x03c0, 0x03ce, 0x03c1, 0x03c3,
130 0x03c4, 0x03b8, 0x03c9, 0x03c2,
131 0x03c7, 0x03c5, 0x03b6, 0x03ca,
132 0x03cb, 0x0390, 0x03b0, 0x00ad,
133};
134
135static const unsigned char page00[256] = {
136 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
137 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
138 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
139 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
140 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
141 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
142 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
143 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
144 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
145 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
146 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
147 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
148 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
149 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
150 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
151 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
154 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
156 0xca, 0x00, 0x00, 0x92, 0x00, 0xb4, 0x9b, 0xac, /* 0xa0-0xa7 */
157 0x8c, 0xa9, 0x00, 0xc7, 0xc2, 0xff, 0xa8, 0x00, /* 0xa8-0xaf */
158 0xae, 0xb1, 0x82, 0x84, 0x00, 0x00, 0x00, 0xaf, /* 0xb0-0xb7 */
159 0x00, 0x81, 0x00, 0xc8, 0x00, 0x97, 0x00, 0x00, /* 0xb8-0xbf */
160 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
161 0x00, 0x83, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
162 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x85, 0x00, /* 0xd0-0xd7 */
163 0x00, 0x00, 0x00, 0x00, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
164 0x88, 0x00, 0x89, 0x00, 0x8a, 0x00, 0x00, 0x8d, /* 0xe0-0xe7 */
165 0x8f, 0x8e, 0x90, 0x91, 0x00, 0x00, 0x94, 0x95, /* 0xe8-0xef */
166 0x00, 0x00, 0x00, 0x00, 0x99, 0x00, 0x9a, 0xd6, /* 0xf0-0xf7 */
167 0x00, 0x9d, 0x00, 0x9e, 0x9f, 0x00, 0x00, 0x00, /* 0xf8-0xff */
168};
169
170static const unsigned char page01[256] = {
171 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
172 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
173 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
174 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
175 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
176 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
177 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
178 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
181 0x00, 0x00, 0x00, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
183 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
185 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
186 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
188 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
189 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
190 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
191 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
196 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
197 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
199 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
203};
204
205static const unsigned char page03[256] = {
206 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
207 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
209 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
210 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
211 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
212 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
213 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
214 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
215 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
219 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
220 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
222 0x00, 0x00, 0x00, 0x00, 0x8b, 0x87, 0xcd, 0x00, /* 0x80-0x87 */
223 0xce, 0xd7, 0xd8, 0x00, 0xd9, 0x00, 0xda, 0xdf, /* 0x88-0x8f */
224 0xfd, 0xb0, 0xb5, 0xa1, 0xa2, 0xb6, 0xb7, 0xb8, /* 0x90-0x97 */
225 0xa3, 0xb9, 0xba, 0xa4, 0xbb, 0xc1, 0xa5, 0xc3, /* 0x98-0x9f */
226 0xa6, 0xc4, 0x00, 0xaa, 0xc6, 0xcb, 0xbc, 0xcc, /* 0xa0-0xa7 */
227 0xbe, 0xbf, 0xab, 0xbd, 0xc0, 0xdb, 0xdc, 0xdd, /* 0xa8-0xaf */
228 0xfe, 0xe1, 0xe2, 0xe7, 0xe4, 0xe5, 0xfa, 0xe8, /* 0xb0-0xb7 */
229 0xf5, 0xe9, 0xeb, 0xec, 0xed, 0xee, 0xea, 0xef, /* 0xb8-0xbf */
230 0xf0, 0xf2, 0xf7, 0xf3, 0xf4, 0xf9, 0xe6, 0xf8, /* 0xc0-0xc7 */
231 0xe3, 0xf6, 0xfb, 0xfc, 0xde, 0xe0, 0xf1, 0x00, /* 0xc8-0xcf */
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
233 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
236 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
238};
239
240static const unsigned char page20[256] = {
241 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
243 0x00, 0x00, 0x00, 0xd0, 0x00, 0xd1, 0x00, 0x00, /* 0x10-0x17 */
244 0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
245 0xa0, 0x00, 0x96, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
247 0x98, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
254 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
260 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
262 0x00, 0x00, 0x00, 0x00, 0x9c, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
273};
274
275static const unsigned char page21[256] = {
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
278 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
279 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
280 0x00, 0x00, 0x93, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
282 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
283 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
284 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
303 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
308};
309
310static const unsigned char page22[256] = {
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
315 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
318 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
319 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
320 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
322 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
323 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
337 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
343};
344
345static const unsigned char *const page_uni2charset[256] = {
346 page00, page01, NULL, page03, NULL, NULL, NULL, NULL,
347 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
348 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
349 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
350 page20, page21, page22, NULL, NULL, NULL, NULL, NULL,
351 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
352 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
353 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
354 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
355 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
356 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
357 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
358 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
359 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
360 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
361 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
362 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
363 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
364 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
365 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
366 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
367 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
368 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
369 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
370 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
371 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
372 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
373 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
374 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
375 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
376 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
377 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
378};
379
380static const unsigned char charset2lower[256] = {
381 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
382 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
383 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
384 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
385 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
386 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
387 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
388 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
389 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
390 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
391 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
392 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
393 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
394 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
395 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
396 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
397 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
398 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
399 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
400 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
401 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
402 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
403 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
404 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
405 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
406 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
407 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
408 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
409 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
410 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
411 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
412 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
413};
414
415static const unsigned char charset2upper[256] = {
416 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
417 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
418 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
419 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
420 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
421 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
422 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
423 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
424 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
425 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
426 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
427 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
428 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
429 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
430 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
431 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
432 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
433 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
434 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
435 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
436 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
437 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
438 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
439 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
440 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
441 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
442 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
443 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
444 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
445 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
446 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
447 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
448};
449
450static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
451{
452 const unsigned char *uni2charset;
453 unsigned char cl = uni & 0x00ff;
454 unsigned char ch = (uni & 0xff00) >> 8;
455
456 if (boundlen <= 0)
457 return -ENAMETOOLONG;
458
459 uni2charset = page_uni2charset[ch];
460 if (uni2charset && uni2charset[cl])
461 out[0] = uni2charset[cl];
462 else
463 return -EINVAL;
464 return 1;
465}
466
467static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
468{
469 *uni = charset2uni[*rawstring];
470 if (*uni == 0x0000)
471 return -EINVAL;
472 return 1;
473}
474
475static struct nls_table table = {
476 .charset = "macgreek",
477 .uni2char = uni2char,
478 .char2uni = char2uni,
479 .charset2lower = charset2lower,
480 .charset2upper = charset2upper,
481 .owner = THIS_MODULE,
482};
483
484static int __init init_nls_macgreek(void)
485{
486 return register_nls(&table);
487}
488
489static void __exit exit_nls_macgreek(void)
490{
491 unregister_nls(&table);
492}
493
494module_init(init_nls_macgreek)
495module_exit(exit_nls_macgreek)
496
497MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-iceland.c b/fs/nls/mac-iceland.c
new file mode 100644
index 000000000000..babe2998d5ce
--- /dev/null
+++ b/fs/nls/mac-iceland.c
@@ -0,0 +1,602 @@
1/*
2 * linux/fs/nls/mac-iceland.c
3 *
4 * Charset maciceland translation tables.
5 * Generated automatically from the Unicode and charset
6 * tables from the Unicode Organization (www.unicode.org).
7 * The Unicode to charset table has only exact mappings.
8 */
9
10/*
11 * COPYRIGHT AND PERMISSION NOTICE
12 *
13 * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
14 * the Terms of Use in http://www.unicode.org/copyright.html.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of the Unicode data files and any associated documentation (the "Data
18 * Files") or Unicode software and any associated documentation (the
19 * "Software") to deal in the Data Files or Software without restriction,
20 * including without limitation the rights to use, copy, modify, merge,
21 * publish, distribute, and/or sell copies of the Data Files or Software, and
22 * to permit persons to whom the Data Files or Software are furnished to do
23 * so, provided that (a) the above copyright notice(s) and this permission
24 * notice appear with all copies of the Data Files or Software, (b) both the
25 * above copyright notice(s) and this permission notice appear in associated
26 * documentation, and (c) there is clear notice in each modified Data File or
27 * in the Software as well as in the documentation associated with the Data
28 * File(s) or Software that the data or software has been modified.
29 *
30 * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
31 * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
33 * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
34 * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
35 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
36 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
37 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
38 * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
39 *
40 * Except as contained in this notice, the name of a copyright holder shall
41 * not be used in advertising or otherwise to promote the sale, use or other
42 * dealings in these Data Files or Software without prior written
43 * authorization of the copyright holder.
44 */
45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/nls.h>
50#include <linux/errno.h>
51
52static const wchar_t charset2uni[256] = {
53 /* 0x00 */
54 0x0000, 0x0001, 0x0002, 0x0003,
55 0x0004, 0x0005, 0x0006, 0x0007,
56 0x0008, 0x0009, 0x000a, 0x000b,
57 0x000c, 0x000d, 0x000e, 0x000f,
58 /* 0x10 */
59 0x0010, 0x0011, 0x0012, 0x0013,
60 0x0014, 0x0015, 0x0016, 0x0017,
61 0x0018, 0x0019, 0x001a, 0x001b,
62 0x001c, 0x001d, 0x001e, 0x001f,
63 /* 0x20 */
64 0x0020, 0x0021, 0x0022, 0x0023,
65 0x0024, 0x0025, 0x0026, 0x0027,
66 0x0028, 0x0029, 0x002a, 0x002b,
67 0x002c, 0x002d, 0x002e, 0x002f,
68 /* 0x30 */
69 0x0030, 0x0031, 0x0032, 0x0033,
70 0x0034, 0x0035, 0x0036, 0x0037,
71 0x0038, 0x0039, 0x003a, 0x003b,
72 0x003c, 0x003d, 0x003e, 0x003f,
73 /* 0x40 */
74 0x0040, 0x0041, 0x0042, 0x0043,
75 0x0044, 0x0045, 0x0046, 0x0047,
76 0x0048, 0x0049, 0x004a, 0x004b,
77 0x004c, 0x004d, 0x004e, 0x004f,
78 /* 0x50 */
79 0x0050, 0x0051, 0x0052, 0x0053,
80 0x0054, 0x0055, 0x0056, 0x0057,
81 0x0058, 0x0059, 0x005a, 0x005b,
82 0x005c, 0x005d, 0x005e, 0x005f,
83 /* 0x60 */
84 0x0060, 0x0061, 0x0062, 0x0063,
85 0x0064, 0x0065, 0x0066, 0x0067,
86 0x0068, 0x0069, 0x006a, 0x006b,
87 0x006c, 0x006d, 0x006e, 0x006f,
88 /* 0x70 */
89 0x0070, 0x0071, 0x0072, 0x0073,
90 0x0074, 0x0075, 0x0076, 0x0077,
91 0x0078, 0x0079, 0x007a, 0x007b,
92 0x007c, 0x007d, 0x007e, 0x007f,
93 /* 0x80 */
94 0x00c4, 0x00c5, 0x00c7, 0x00c9,
95 0x00d1, 0x00d6, 0x00dc, 0x00e1,
96 0x00e0, 0x00e2, 0x00e4, 0x00e3,
97 0x00e5, 0x00e7, 0x00e9, 0x00e8,
98 /* 0x90 */
99 0x00ea, 0x00eb, 0x00ed, 0x00ec,
100 0x00ee, 0x00ef, 0x00f1, 0x00f3,
101 0x00f2, 0x00f4, 0x00f6, 0x00f5,
102 0x00fa, 0x00f9, 0x00fb, 0x00fc,
103 /* 0xa0 */
104 0x00dd, 0x00b0, 0x00a2, 0x00a3,
105 0x00a7, 0x2022, 0x00b6, 0x00df,
106 0x00ae, 0x00a9, 0x2122, 0x00b4,
107 0x00a8, 0x2260, 0x00c6, 0x00d8,
108 /* 0xb0 */
109 0x221e, 0x00b1, 0x2264, 0x2265,
110 0x00a5, 0x00b5, 0x2202, 0x2211,
111 0x220f, 0x03c0, 0x222b, 0x00aa,
112 0x00ba, 0x03a9, 0x00e6, 0x00f8,
113 /* 0xc0 */
114 0x00bf, 0x00a1, 0x00ac, 0x221a,
115 0x0192, 0x2248, 0x2206, 0x00ab,
116 0x00bb, 0x2026, 0x00a0, 0x00c0,
117 0x00c3, 0x00d5, 0x0152, 0x0153,
118 /* 0xd0 */
119 0x2013, 0x2014, 0x201c, 0x201d,
120 0x2018, 0x2019, 0x00f7, 0x25ca,
121 0x00ff, 0x0178, 0x2044, 0x20ac,
122 0x00d0, 0x00f0, 0x00de, 0x00fe,
123 /* 0xe0 */
124 0x00fd, 0x00b7, 0x201a, 0x201e,
125 0x2030, 0x00c2, 0x00ca, 0x00c1,
126 0x00cb, 0x00c8, 0x00cd, 0x00ce,
127 0x00cf, 0x00cc, 0x00d3, 0x00d4,
128 /* 0xf0 */
129 0xf8ff, 0x00d2, 0x00da, 0x00db,
130 0x00d9, 0x0131, 0x02c6, 0x02dc,
131 0x00af, 0x02d8, 0x02d9, 0x02da,
132 0x00b8, 0x02dd, 0x02db, 0x02c7,
133};
134
135static const unsigned char page00[256] = {
136 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
137 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
138 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
139 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
140 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
141 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
142 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
143 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
144 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
145 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
146 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
147 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
148 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
149 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
150 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
151 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
154 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
156 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
157 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
158 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
159 0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
160 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
161 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
162 0xdc, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
163 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0xa0, 0xde, 0xa7, /* 0xd8-0xdf */
164 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
165 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
166 0xdd, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
167 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0xe0, 0xdf, 0xd8, /* 0xf8-0xff */
168};
169
170static const unsigned char page01[256] = {
171 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
172 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
173 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
174 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
175 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
176 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
177 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
178 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
181 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
183 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
185 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
186 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
188 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
189 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
190 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
191 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
196 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
197 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
199 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
203};
204
205static const unsigned char page02[256] = {
206 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
207 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
209 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
210 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
211 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
212 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
213 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
214 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
215 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
219 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
220 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
222 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
224 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
227 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
229 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
230 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
231 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
233 0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
236 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
238};
239
240static const unsigned char page03[256] = {
241 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
244 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
245 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
254 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
260 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
262 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
265 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
273};
274
275static const unsigned char page20[256] = {
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
278 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
279 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
280 0x00, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
282 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
283 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
284 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
297 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
303 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
308};
309
310static const unsigned char page21[256] = {
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
315 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
318 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
319 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
322 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
323 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
337 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
343};
344
345static const unsigned char page22[256] = {
346 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
347 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
348 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
349 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
350 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
351 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
352 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
353 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
354 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
355 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
356 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
357 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
358 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
367 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
370 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
373 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
374 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
375 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
378};
379
380static const unsigned char page25[256] = {
381 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
382 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
383 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
384 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
386 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
387 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
388 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
389 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
390 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
391 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
392 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
393 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
394 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
395 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
396 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
397 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
398 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
399 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
400 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
401 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
402 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
403 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
404 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
405 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
406 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
407 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
408 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
409 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
410 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
411 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
412 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
413};
414
415static const unsigned char pagef8[256] = {
416 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
417 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
418 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
419 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
420 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
421 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
422 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
423 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
424 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
425 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
426 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
427 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
428 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
429 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
430 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
431 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
432 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
433 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
434 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
435 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
436 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
437 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
438 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
439 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
440 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
441 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
442 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
443 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
444 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
445 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
446 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
447 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
448};
449
450static const unsigned char *const page_uni2charset[256] = {
451 page00, page01, page02, page03, NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
455 page20, page21, page22, NULL, NULL, page25, NULL, NULL,
456 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
461 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
462 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
463 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
464 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
465 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
479 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
480 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
481 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
482 pagef8, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
483};
484
485static const unsigned char charset2lower[256] = {
486 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
487 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
488 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
489 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
490 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
491 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
492 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
493 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
494 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
495 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
496 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
497 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
498 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
499 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
500 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
501 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
502 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
503 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
504 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
505 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
506 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
507 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
508 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
509 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
510 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
511 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
512 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
513 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
514 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
515 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
516 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
517 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
518};
519
520static const unsigned char charset2upper[256] = {
521 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
522 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
523 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
524 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
525 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
526 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
527 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
528 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
529 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
530 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
531 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
532 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
533 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
534 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
535 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
536 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
537 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
538 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
539 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
540 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
541 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
542 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
543 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
544 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
545 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
546 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
547 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
548 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
549 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
550 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
551 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
552 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
553};
554
555static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
556{
557 const unsigned char *uni2charset;
558 unsigned char cl = uni & 0x00ff;
559 unsigned char ch = (uni & 0xff00) >> 8;
560
561 if (boundlen <= 0)
562 return -ENAMETOOLONG;
563
564 uni2charset = page_uni2charset[ch];
565 if (uni2charset && uni2charset[cl])
566 out[0] = uni2charset[cl];
567 else
568 return -EINVAL;
569 return 1;
570}
571
572static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
573{
574 *uni = charset2uni[*rawstring];
575 if (*uni == 0x0000)
576 return -EINVAL;
577 return 1;
578}
579
580static struct nls_table table = {
581 .charset = "maciceland",
582 .uni2char = uni2char,
583 .char2uni = char2uni,
584 .charset2lower = charset2lower,
585 .charset2upper = charset2upper,
586 .owner = THIS_MODULE,
587};
588
589static int __init init_nls_maciceland(void)
590{
591 return register_nls(&table);
592}
593
594static void __exit exit_nls_maciceland(void)
595{
596 unregister_nls(&table);
597}
598
599module_init(init_nls_maciceland)
600module_exit(exit_nls_maciceland)
601
602MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-inuit.c b/fs/nls/mac-inuit.c
new file mode 100644
index 000000000000..312364f010dc
--- /dev/null
+++ b/fs/nls/mac-inuit.c
@@ -0,0 +1,532 @@
1/*
2 * linux/fs/nls/mac-inuit.c
3 *
4 * Charset macinuit translation tables.
5 * Generated automatically from the Unicode and charset
6 * tables from the Unicode Organization (www.unicode.org).
7 * The Unicode to charset table has only exact mappings.
8 */
9
10/*
11 * COPYRIGHT AND PERMISSION NOTICE
12 *
13 * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
14 * the Terms of Use in http://www.unicode.org/copyright.html.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of the Unicode data files and any associated documentation (the "Data
18 * Files") or Unicode software and any associated documentation (the
19 * "Software") to deal in the Data Files or Software without restriction,
20 * including without limitation the rights to use, copy, modify, merge,
21 * publish, distribute, and/or sell copies of the Data Files or Software, and
22 * to permit persons to whom the Data Files or Software are furnished to do
23 * so, provided that (a) the above copyright notice(s) and this permission
24 * notice appear with all copies of the Data Files or Software, (b) both the
25 * above copyright notice(s) and this permission notice appear in associated
26 * documentation, and (c) there is clear notice in each modified Data File or
27 * in the Software as well as in the documentation associated with the Data
28 * File(s) or Software that the data or software has been modified.
29 *
30 * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
31 * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
33 * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
34 * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
35 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
36 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
37 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
38 * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
39 *
40 * Except as contained in this notice, the name of a copyright holder shall
41 * not be used in advertising or otherwise to promote the sale, use or other
42 * dealings in these Data Files or Software without prior written
43 * authorization of the copyright holder.
44 */
45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/nls.h>
50#include <linux/errno.h>
51
52static const wchar_t charset2uni[256] = {
53 /* 0x00 */
54 0x0000, 0x0001, 0x0002, 0x0003,
55 0x0004, 0x0005, 0x0006, 0x0007,
56 0x0008, 0x0009, 0x000a, 0x000b,
57 0x000c, 0x000d, 0x000e, 0x000f,
58 /* 0x10 */
59 0x0010, 0x0011, 0x0012, 0x0013,
60 0x0014, 0x0015, 0x0016, 0x0017,
61 0x0018, 0x0019, 0x001a, 0x001b,
62 0x001c, 0x001d, 0x001e, 0x001f,
63 /* 0x20 */
64 0x0020, 0x0021, 0x0022, 0x0023,
65 0x0024, 0x0025, 0x0026, 0x0027,
66 0x0028, 0x0029, 0x002a, 0x002b,
67 0x002c, 0x002d, 0x002e, 0x002f,
68 /* 0x30 */
69 0x0030, 0x0031, 0x0032, 0x0033,
70 0x0034, 0x0035, 0x0036, 0x0037,
71 0x0038, 0x0039, 0x003a, 0x003b,
72 0x003c, 0x003d, 0x003e, 0x003f,
73 /* 0x40 */
74 0x0040, 0x0041, 0x0042, 0x0043,
75 0x0044, 0x0045, 0x0046, 0x0047,
76 0x0048, 0x0049, 0x004a, 0x004b,
77 0x004c, 0x004d, 0x004e, 0x004f,
78 /* 0x50 */
79 0x0050, 0x0051, 0x0052, 0x0053,
80 0x0054, 0x0055, 0x0056, 0x0057,
81 0x0058, 0x0059, 0x005a, 0x005b,
82 0x005c, 0x005d, 0x005e, 0x005f,
83 /* 0x60 */
84 0x0060, 0x0061, 0x0062, 0x0063,
85 0x0064, 0x0065, 0x0066, 0x0067,
86 0x0068, 0x0069, 0x006a, 0x006b,
87 0x006c, 0x006d, 0x006e, 0x006f,
88 /* 0x70 */
89 0x0070, 0x0071, 0x0072, 0x0073,
90 0x0074, 0x0075, 0x0076, 0x0077,
91 0x0078, 0x0079, 0x007a, 0x007b,
92 0x007c, 0x007d, 0x007e, 0x007f,
93 /* 0x80 */
94 0x1403, 0x1404, 0x1405, 0x1406,
95 0x140a, 0x140b, 0x1431, 0x1432,
96 0x1433, 0x1434, 0x1438, 0x1439,
97 0x1449, 0x144e, 0x144f, 0x1450,
98 /* 0x90 */
99 0x1451, 0x1455, 0x1456, 0x1466,
100 0x146d, 0x146e, 0x146f, 0x1470,
101 0x1472, 0x1473, 0x1483, 0x148b,
102 0x148c, 0x148d, 0x148e, 0x1490,
103 /* 0xa0 */
104 0x1491, 0x00b0, 0x14a1, 0x14a5,
105 0x14a6, 0x2022, 0x00b6, 0x14a7,
106 0x00ae, 0x00a9, 0x2122, 0x14a8,
107 0x14aa, 0x14ab, 0x14bb, 0x14c2,
108 /* 0xb0 */
109 0x14c3, 0x14c4, 0x14c5, 0x14c7,
110 0x14c8, 0x14d0, 0x14ef, 0x14f0,
111 0x14f1, 0x14f2, 0x14f4, 0x14f5,
112 0x1505, 0x14d5, 0x14d6, 0x14d7,
113 /* 0xc0 */
114 0x14d8, 0x14da, 0x14db, 0x14ea,
115 0x1528, 0x1529, 0x152a, 0x152b,
116 0x152d, 0x2026, 0x00a0, 0x152e,
117 0x153e, 0x1555, 0x1556, 0x1557,
118 /* 0xd0 */
119 0x2013, 0x2014, 0x201c, 0x201d,
120 0x2018, 0x2019, 0x1558, 0x1559,
121 0x155a, 0x155d, 0x1546, 0x1547,
122 0x1548, 0x1549, 0x154b, 0x154c,
123 /* 0xe0 */
124 0x1550, 0x157f, 0x1580, 0x1581,
125 0x1582, 0x1583, 0x1584, 0x1585,
126 0x158f, 0x1590, 0x1591, 0x1592,
127 0x1593, 0x1594, 0x1595, 0x1671,
128 /* 0xf0 */
129 0x1672, 0x1673, 0x1674, 0x1675,
130 0x1676, 0x1596, 0x15a0, 0x15a1,
131 0x15a2, 0x15a3, 0x15a4, 0x15a5,
132 0x15a6, 0x157c, 0x0141, 0x0142,
133};
134
135static const unsigned char page00[256] = {
136 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
137 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
138 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
139 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
140 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
141 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
142 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
143 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
144 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
145 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
146 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
147 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
148 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
149 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
150 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
151 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
154 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
156 0xca, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
157 0x00, 0xa9, 0x00, 0x00, 0x00, 0x00, 0xa8, 0x00, /* 0xa8-0xaf */
158 0xa1, 0x00, 0x00, 0x00, 0x00, 0x00, 0xa6, 0x00, /* 0xb0-0xb7 */
159 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
160 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
161 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
162 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
163 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
164 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
165 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
166 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
167 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
168};
169
170static const unsigned char page01[256] = {
171 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
172 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
173 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
174 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
175 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
176 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
177 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
178 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
179 0x00, 0xfe, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
181 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
183 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
185 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
186 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
188 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
189 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
190 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
191 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
196 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
197 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
199 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
203};
204
205static const unsigned char page14[256] = {
206 0x00, 0x00, 0x00, 0x80, 0x81, 0x82, 0x83, 0x00, /* 0x00-0x07 */
207 0x00, 0x00, 0x84, 0x85, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
209 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
210 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
211 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
212 0x00, 0x86, 0x87, 0x88, 0x89, 0x00, 0x00, 0x00, /* 0x30-0x37 */
213 0x8a, 0x8b, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
214 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
215 0x00, 0x8c, 0x00, 0x00, 0x00, 0x00, 0x8d, 0x8e, /* 0x48-0x4f */
216 0x8f, 0x90, 0x00, 0x00, 0x00, 0x91, 0x92, 0x00, /* 0x50-0x57 */
217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x93, 0x00, /* 0x60-0x67 */
219 0x00, 0x00, 0x00, 0x00, 0x00, 0x94, 0x95, 0x96, /* 0x68-0x6f */
220 0x97, 0x00, 0x98, 0x99, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
222 0x00, 0x00, 0x00, 0x9a, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
223 0x00, 0x00, 0x00, 0x9b, 0x9c, 0x9d, 0x9e, 0x00, /* 0x88-0x8f */
224 0x9f, 0xa0, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
226 0x00, 0xa2, 0x00, 0x00, 0x00, 0xa3, 0xa4, 0xa7, /* 0xa0-0xa7 */
227 0xab, 0x00, 0xac, 0xad, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
229 0x00, 0x00, 0x00, 0xae, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
230 0x00, 0x00, 0xaf, 0xb0, 0xb1, 0xb2, 0x00, 0xb3, /* 0xc0-0xc7 */
231 0xb4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
232 0xb5, 0x00, 0x00, 0x00, 0x00, 0xbd, 0xbe, 0xbf, /* 0xd0-0xd7 */
233 0xc0, 0x00, 0xc1, 0xc2, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
235 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0x00, 0xb6, /* 0xe8-0xef */
236 0xb7, 0xb8, 0xb9, 0x00, 0xba, 0xbb, 0x00, 0x00, /* 0xf0-0xf7 */
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
238};
239
240static const unsigned char page15[256] = {
241 0x00, 0x00, 0x00, 0x00, 0x00, 0xbc, 0x00, 0x00, /* 0x00-0x07 */
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
244 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
245 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
246 0xc4, 0xc5, 0xc6, 0xc7, 0x00, 0xc8, 0xcb, 0x00, /* 0x28-0x2f */
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x00, /* 0x38-0x3f */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdb, /* 0x40-0x47 */
250 0xdc, 0xdd, 0x00, 0xde, 0xdf, 0x00, 0x00, 0x00, /* 0x48-0x4f */
251 0xe0, 0x00, 0x00, 0x00, 0x00, 0xcd, 0xce, 0xcf, /* 0x50-0x57 */
252 0xd6, 0xd7, 0xd8, 0x00, 0x00, 0xd9, 0x00, 0x00, /* 0x58-0x5f */
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
254 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
256 0x00, 0x00, 0x00, 0x00, 0xfd, 0x00, 0x00, 0xe1, /* 0x78-0x7f */
257 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0x00, 0x00, /* 0x80-0x87 */
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xe8, /* 0x88-0x8f */
259 0xe9, 0xea, 0xeb, 0xec, 0xed, 0xee, 0xf5, 0x00, /* 0x90-0x97 */
260 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
261 0xf6, 0xf7, 0xf8, 0xf9, 0xfa, 0xfb, 0xfc, 0x00, /* 0xa0-0xa7 */
262 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
265 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
273};
274
275static const unsigned char page16[256] = {
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
278 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
279 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
280 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
282 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
283 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
284 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
290 0x00, 0xef, 0xf0, 0xf1, 0xf2, 0xf3, 0xf4, 0x00, /* 0x70-0x77 */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
303 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
308};
309
310static const unsigned char page20[256] = {
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
313 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
314 0xd4, 0xd5, 0x00, 0x00, 0xd2, 0xd3, 0x00, 0x00, /* 0x18-0x1f */
315 0x00, 0x00, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
318 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
319 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
322 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
323 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
337 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
343};
344
345static const unsigned char page21[256] = {
346 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
347 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
348 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
349 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
350 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
351 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
352 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
353 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
354 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
355 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
356 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
357 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
358 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
367 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
370 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
373 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
374 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
375 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
378};
379
380static const unsigned char *const page_uni2charset[256] = {
381 page00, page01, NULL, NULL, NULL, NULL, NULL, NULL,
382 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
383 NULL, NULL, NULL, NULL, page14, page15, page16, NULL,
384 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
385 page20, page21, NULL, NULL, NULL, NULL, NULL, NULL,
386 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
387 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
388 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
389 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
390 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
391 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
392 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
393 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
394 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
395 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
396 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
397 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
398 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
399 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
400 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
401 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
402 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
403 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
404 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
405 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
406 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
407 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
408 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
409 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
410 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
411 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
412 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
413};
414
415static const unsigned char charset2lower[256] = {
416 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
417 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
418 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
419 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
420 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
421 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
422 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
423 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
424 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
425 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
426 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
427 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
428 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
429 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
430 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
431 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
432 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
433 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
434 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
435 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
436 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
437 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
438 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
439 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
440 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
441 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
442 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
443 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
444 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
445 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
446 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
447 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
448};
449
450static const unsigned char charset2upper[256] = {
451 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x00-0x07 */
452 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x08-0x0f */
453 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x10-0x17 */
454 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x18-0x1f */
455 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x20-0x27 */
456 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x28-0x2f */
457 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x30-0x37 */
458 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x38-0x3f */
459 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x40-0x47 */
460 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x48-0x4f */
461 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x50-0x57 */
462 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x58-0x5f */
463 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x60-0x67 */
464 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x68-0x6f */
465 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x70-0x77 */
466 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x78-0x7f */
467 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x80-0x87 */
468 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x88-0x8f */
469 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x90-0x97 */
470 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0x98-0x9f */
471 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa0-0xa7 */
472 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xa8-0xaf */
473 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb0-0xb7 */
474 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xb8-0xbf */
475 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc0-0xc7 */
476 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xc8-0xcf */
477 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd0-0xd7 */
478 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xd8-0xdf */
479 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe0-0xe7 */
480 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xe8-0xef */
481 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf0-0xf7 */
482 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, 0xfe, /* 0xf8-0xff */
483};
484
485static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
486{
487 const unsigned char *uni2charset;
488 unsigned char cl = uni & 0x00ff;
489 unsigned char ch = (uni & 0xff00) >> 8;
490
491 if (boundlen <= 0)
492 return -ENAMETOOLONG;
493
494 uni2charset = page_uni2charset[ch];
495 if (uni2charset && uni2charset[cl])
496 out[0] = uni2charset[cl];
497 else
498 return -EINVAL;
499 return 1;
500}
501
502static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
503{
504 *uni = charset2uni[*rawstring];
505 if (*uni == 0x0000)
506 return -EINVAL;
507 return 1;
508}
509
510static struct nls_table table = {
511 .charset = "macinuit",
512 .uni2char = uni2char,
513 .char2uni = char2uni,
514 .charset2lower = charset2lower,
515 .charset2upper = charset2upper,
516 .owner = THIS_MODULE,
517};
518
519static int __init init_nls_macinuit(void)
520{
521 return register_nls(&table);
522}
523
524static void __exit exit_nls_macinuit(void)
525{
526 unregister_nls(&table);
527}
528
529module_init(init_nls_macinuit)
530module_exit(exit_nls_macinuit)
531
532MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-roman.c b/fs/nls/mac-roman.c
new file mode 100644
index 000000000000..53ce0809cbd2
--- /dev/null
+++ b/fs/nls/mac-roman.c
@@ -0,0 +1,637 @@
1/*
2 * linux/fs/nls/mac-roman.c
3 *
4 * Charset macroman translation tables.
5 * Generated automatically from the Unicode and charset
6 * tables from the Unicode Organization (www.unicode.org).
7 * The Unicode to charset table has only exact mappings.
8 */
9
10/*
11 * COPYRIGHT AND PERMISSION NOTICE
12 *
13 * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
14 * the Terms of Use in http://www.unicode.org/copyright.html.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of the Unicode data files and any associated documentation (the "Data
18 * Files") or Unicode software and any associated documentation (the
19 * "Software") to deal in the Data Files or Software without restriction,
20 * including without limitation the rights to use, copy, modify, merge,
21 * publish, distribute, and/or sell copies of the Data Files or Software, and
22 * to permit persons to whom the Data Files or Software are furnished to do
23 * so, provided that (a) the above copyright notice(s) and this permission
24 * notice appear with all copies of the Data Files or Software, (b) both the
25 * above copyright notice(s) and this permission notice appear in associated
26 * documentation, and (c) there is clear notice in each modified Data File or
27 * in the Software as well as in the documentation associated with the Data
28 * File(s) or Software that the data or software has been modified.
29 *
30 * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
31 * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
33 * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
34 * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
35 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
36 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
37 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
38 * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
39 *
40 * Except as contained in this notice, the name of a copyright holder shall
41 * not be used in advertising or otherwise to promote the sale, use or other
42 * dealings in these Data Files or Software without prior written
43 * authorization of the copyright holder.
44 */
45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/nls.h>
50#include <linux/errno.h>
51
52static const wchar_t charset2uni[256] = {
53 /* 0x00 */
54 0x0000, 0x0001, 0x0002, 0x0003,
55 0x0004, 0x0005, 0x0006, 0x0007,
56 0x0008, 0x0009, 0x000a, 0x000b,
57 0x000c, 0x000d, 0x000e, 0x000f,
58 /* 0x10 */
59 0x0010, 0x0011, 0x0012, 0x0013,
60 0x0014, 0x0015, 0x0016, 0x0017,
61 0x0018, 0x0019, 0x001a, 0x001b,
62 0x001c, 0x001d, 0x001e, 0x001f,
63 /* 0x20 */
64 0x0020, 0x0021, 0x0022, 0x0023,
65 0x0024, 0x0025, 0x0026, 0x0027,
66 0x0028, 0x0029, 0x002a, 0x002b,
67 0x002c, 0x002d, 0x002e, 0x002f,
68 /* 0x30 */
69 0x0030, 0x0031, 0x0032, 0x0033,
70 0x0034, 0x0035, 0x0036, 0x0037,
71 0x0038, 0x0039, 0x003a, 0x003b,
72 0x003c, 0x003d, 0x003e, 0x003f,
73 /* 0x40 */
74 0x0040, 0x0041, 0x0042, 0x0043,
75 0x0044, 0x0045, 0x0046, 0x0047,
76 0x0048, 0x0049, 0x004a, 0x004b,
77 0x004c, 0x004d, 0x004e, 0x004f,
78 /* 0x50 */
79 0x0050, 0x0051, 0x0052, 0x0053,
80 0x0054, 0x0055, 0x0056, 0x0057,
81 0x0058, 0x0059, 0x005a, 0x005b,
82 0x005c, 0x005d, 0x005e, 0x005f,
83 /* 0x60 */
84 0x0060, 0x0061, 0x0062, 0x0063,
85 0x0064, 0x0065, 0x0066, 0x0067,
86 0x0068, 0x0069, 0x006a, 0x006b,
87 0x006c, 0x006d, 0x006e, 0x006f,
88 /* 0x70 */
89 0x0070, 0x0071, 0x0072, 0x0073,
90 0x0074, 0x0075, 0x0076, 0x0077,
91 0x0078, 0x0079, 0x007a, 0x007b,
92 0x007c, 0x007d, 0x007e, 0x007f,
93 /* 0x80 */
94 0x00c4, 0x00c5, 0x00c7, 0x00c9,
95 0x00d1, 0x00d6, 0x00dc, 0x00e1,
96 0x00e0, 0x00e2, 0x00e4, 0x00e3,
97 0x00e5, 0x00e7, 0x00e9, 0x00e8,
98 /* 0x90 */
99 0x00ea, 0x00eb, 0x00ed, 0x00ec,
100 0x00ee, 0x00ef, 0x00f1, 0x00f3,
101 0x00f2, 0x00f4, 0x00f6, 0x00f5,
102 0x00fa, 0x00f9, 0x00fb, 0x00fc,
103 /* 0xa0 */
104 0x2020, 0x00b0, 0x00a2, 0x00a3,
105 0x00a7, 0x2022, 0x00b6, 0x00df,
106 0x00ae, 0x00a9, 0x2122, 0x00b4,
107 0x00a8, 0x2260, 0x00c6, 0x00d8,
108 /* 0xb0 */
109 0x221e, 0x00b1, 0x2264, 0x2265,
110 0x00a5, 0x00b5, 0x2202, 0x2211,
111 0x220f, 0x03c0, 0x222b, 0x00aa,
112 0x00ba, 0x03a9, 0x00e6, 0x00f8,
113 /* 0xc0 */
114 0x00bf, 0x00a1, 0x00ac, 0x221a,
115 0x0192, 0x2248, 0x2206, 0x00ab,
116 0x00bb, 0x2026, 0x00a0, 0x00c0,
117 0x00c3, 0x00d5, 0x0152, 0x0153,
118 /* 0xd0 */
119 0x2013, 0x2014, 0x201c, 0x201d,
120 0x2018, 0x2019, 0x00f7, 0x25ca,
121 0x00ff, 0x0178, 0x2044, 0x20ac,
122 0x2039, 0x203a, 0xfb01, 0xfb02,
123 /* 0xe0 */
124 0x2021, 0x00b7, 0x201a, 0x201e,
125 0x2030, 0x00c2, 0x00ca, 0x00c1,
126 0x00cb, 0x00c8, 0x00cd, 0x00ce,
127 0x00cf, 0x00cc, 0x00d3, 0x00d4,
128 /* 0xf0 */
129 0xf8ff, 0x00d2, 0x00da, 0x00db,
130 0x00d9, 0x0131, 0x02c6, 0x02dc,
131 0x00af, 0x02d8, 0x02d9, 0x02da,
132 0x00b8, 0x02dd, 0x02db, 0x02c7,
133};
134
135static const unsigned char page00[256] = {
136 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
137 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
138 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
139 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
140 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
141 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
142 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
143 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
144 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
145 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
146 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
147 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
148 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
149 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
150 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
151 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
154 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
156 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
157 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
158 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
159 0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
160 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
161 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
162 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
163 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
164 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
165 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
166 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
167 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
168};
169
170static const unsigned char page01[256] = {
171 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
172 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
173 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
174 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
175 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
176 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
177 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
178 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
181 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
183 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
185 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
186 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
188 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
189 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
190 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
191 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
196 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
197 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
199 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
203};
204
205static const unsigned char page02[256] = {
206 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
207 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
209 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
210 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
211 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
212 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
213 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
214 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
215 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
219 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
220 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
222 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
224 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
227 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
229 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
230 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
231 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
233 0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
236 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
238};
239
240static const unsigned char page03[256] = {
241 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
244 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
245 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
254 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
260 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
262 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
265 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
273};
274
275static const unsigned char page20[256] = {
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
278 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
279 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
280 0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
282 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
283 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
284 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
297 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
303 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
308};
309
310static const unsigned char page21[256] = {
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
315 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
318 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
319 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
322 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
323 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
337 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
343};
344
345static const unsigned char page22[256] = {
346 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
347 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
348 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
349 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
350 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
351 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
352 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
353 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
354 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
355 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
356 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
357 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
358 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
367 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
370 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
373 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
374 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
375 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
378};
379
380static const unsigned char page25[256] = {
381 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
382 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
383 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
384 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
386 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
387 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
388 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
389 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
390 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
391 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
392 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
393 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
394 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
395 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
396 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
397 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
398 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
399 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
400 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
401 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
402 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
403 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
404 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
405 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
406 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
407 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
408 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
409 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
410 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
411 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
412 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
413};
414
415static const unsigned char pagef8[256] = {
416 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
417 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
418 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
419 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
420 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
421 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
422 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
423 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
424 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
425 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
426 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
427 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
428 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
429 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
430 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
431 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
432 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
433 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
434 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
435 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
436 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
437 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
438 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
439 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
440 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
441 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
442 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
443 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
444 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
445 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
446 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
447 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
448};
449
450static const unsigned char pagefb[256] = {
451 0x00, 0xde, 0xdf, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
452 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
453 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
454 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
455 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
456 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
457 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
458 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
459 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
460 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
461 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
462 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
463 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
464 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
465 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
466 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
467 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
468 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
469 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
470 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
471 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
472 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
473 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
474 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
475 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
476 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
477 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
478 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
479 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
480 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
481 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
482 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
483};
484
485static const unsigned char *const page_uni2charset[256] = {
486 page00, page01, page02, page03, NULL, NULL, NULL, NULL,
487 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
488 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
489 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
490 page20, page21, page22, NULL, NULL, page25, NULL, NULL,
491 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
492 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
493 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
494 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
495 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
496 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
497 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
498 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
499 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
500 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
501 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
502 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
503 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
504 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
505 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
506 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
507 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
508 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
509 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
510 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
511 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
512 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
513 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
514 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
515 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
516 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
517 pagef8, NULL, NULL, pagefb, NULL, NULL, NULL, NULL,
518};
519
520static const unsigned char charset2lower[256] = {
521 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
522 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
523 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
524 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
525 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
526 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
527 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
528 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
529 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
530 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
531 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
532 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
533 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
534 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
535 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
536 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
537 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
538 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
539 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
540 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
541 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
542 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
543 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
544 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
545 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
546 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
547 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
548 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
549 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
550 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
551 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
552 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
553};
554
555static const unsigned char charset2upper[256] = {
556 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
557 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
558 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
559 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
560 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
561 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
562 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
563 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
564 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
565 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
566 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
567 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
568 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
569 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
570 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
571 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
572 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
573 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
574 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
575 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
576 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
577 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
578 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
579 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
580 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
581 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
582 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
583 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
584 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
585 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
586 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
587 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
588};
589
590static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
591{
592 const unsigned char *uni2charset;
593 unsigned char cl = uni & 0x00ff;
594 unsigned char ch = (uni & 0xff00) >> 8;
595
596 if (boundlen <= 0)
597 return -ENAMETOOLONG;
598
599 uni2charset = page_uni2charset[ch];
600 if (uni2charset && uni2charset[cl])
601 out[0] = uni2charset[cl];
602 else
603 return -EINVAL;
604 return 1;
605}
606
607static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
608{
609 *uni = charset2uni[*rawstring];
610 if (*uni == 0x0000)
611 return -EINVAL;
612 return 1;
613}
614
615static struct nls_table table = {
616 .charset = "macroman",
617 .uni2char = uni2char,
618 .char2uni = char2uni,
619 .charset2lower = charset2lower,
620 .charset2upper = charset2upper,
621 .owner = THIS_MODULE,
622};
623
624static int __init init_nls_macroman(void)
625{
626 return register_nls(&table);
627}
628
629static void __exit exit_nls_macroman(void)
630{
631 unregister_nls(&table);
632}
633
634module_init(init_nls_macroman)
635module_exit(exit_nls_macroman)
636
637MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-romanian.c b/fs/nls/mac-romanian.c
new file mode 100644
index 000000000000..add6f7a0c666
--- /dev/null
+++ b/fs/nls/mac-romanian.c
@@ -0,0 +1,602 @@
1/*
2 * linux/fs/nls/mac-romanian.c
3 *
4 * Charset macromanian translation tables.
5 * Generated automatically from the Unicode and charset
6 * tables from the Unicode Organization (www.unicode.org).
7 * The Unicode to charset table has only exact mappings.
8 */
9
10/*
11 * COPYRIGHT AND PERMISSION NOTICE
12 *
13 * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
14 * the Terms of Use in http://www.unicode.org/copyright.html.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of the Unicode data files and any associated documentation (the "Data
18 * Files") or Unicode software and any associated documentation (the
19 * "Software") to deal in the Data Files or Software without restriction,
20 * including without limitation the rights to use, copy, modify, merge,
21 * publish, distribute, and/or sell copies of the Data Files or Software, and
22 * to permit persons to whom the Data Files or Software are furnished to do
23 * so, provided that (a) the above copyright notice(s) and this permission
24 * notice appear with all copies of the Data Files or Software, (b) both the
25 * above copyright notice(s) and this permission notice appear in associated
26 * documentation, and (c) there is clear notice in each modified Data File or
27 * in the Software as well as in the documentation associated with the Data
28 * File(s) or Software that the data or software has been modified.
29 *
30 * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
31 * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
33 * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
34 * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
35 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
36 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
37 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
38 * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
39 *
40 * Except as contained in this notice, the name of a copyright holder shall
41 * not be used in advertising or otherwise to promote the sale, use or other
42 * dealings in these Data Files or Software without prior written
43 * authorization of the copyright holder.
44 */
45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/nls.h>
50#include <linux/errno.h>
51
52static const wchar_t charset2uni[256] = {
53 /* 0x00 */
54 0x0000, 0x0001, 0x0002, 0x0003,
55 0x0004, 0x0005, 0x0006, 0x0007,
56 0x0008, 0x0009, 0x000a, 0x000b,
57 0x000c, 0x000d, 0x000e, 0x000f,
58 /* 0x10 */
59 0x0010, 0x0011, 0x0012, 0x0013,
60 0x0014, 0x0015, 0x0016, 0x0017,
61 0x0018, 0x0019, 0x001a, 0x001b,
62 0x001c, 0x001d, 0x001e, 0x001f,
63 /* 0x20 */
64 0x0020, 0x0021, 0x0022, 0x0023,
65 0x0024, 0x0025, 0x0026, 0x0027,
66 0x0028, 0x0029, 0x002a, 0x002b,
67 0x002c, 0x002d, 0x002e, 0x002f,
68 /* 0x30 */
69 0x0030, 0x0031, 0x0032, 0x0033,
70 0x0034, 0x0035, 0x0036, 0x0037,
71 0x0038, 0x0039, 0x003a, 0x003b,
72 0x003c, 0x003d, 0x003e, 0x003f,
73 /* 0x40 */
74 0x0040, 0x0041, 0x0042, 0x0043,
75 0x0044, 0x0045, 0x0046, 0x0047,
76 0x0048, 0x0049, 0x004a, 0x004b,
77 0x004c, 0x004d, 0x004e, 0x004f,
78 /* 0x50 */
79 0x0050, 0x0051, 0x0052, 0x0053,
80 0x0054, 0x0055, 0x0056, 0x0057,
81 0x0058, 0x0059, 0x005a, 0x005b,
82 0x005c, 0x005d, 0x005e, 0x005f,
83 /* 0x60 */
84 0x0060, 0x0061, 0x0062, 0x0063,
85 0x0064, 0x0065, 0x0066, 0x0067,
86 0x0068, 0x0069, 0x006a, 0x006b,
87 0x006c, 0x006d, 0x006e, 0x006f,
88 /* 0x70 */
89 0x0070, 0x0071, 0x0072, 0x0073,
90 0x0074, 0x0075, 0x0076, 0x0077,
91 0x0078, 0x0079, 0x007a, 0x007b,
92 0x007c, 0x007d, 0x007e, 0x007f,
93 /* 0x80 */
94 0x00c4, 0x00c5, 0x00c7, 0x00c9,
95 0x00d1, 0x00d6, 0x00dc, 0x00e1,
96 0x00e0, 0x00e2, 0x00e4, 0x00e3,
97 0x00e5, 0x00e7, 0x00e9, 0x00e8,
98 /* 0x90 */
99 0x00ea, 0x00eb, 0x00ed, 0x00ec,
100 0x00ee, 0x00ef, 0x00f1, 0x00f3,
101 0x00f2, 0x00f4, 0x00f6, 0x00f5,
102 0x00fa, 0x00f9, 0x00fb, 0x00fc,
103 /* 0xa0 */
104 0x2020, 0x00b0, 0x00a2, 0x00a3,
105 0x00a7, 0x2022, 0x00b6, 0x00df,
106 0x00ae, 0x00a9, 0x2122, 0x00b4,
107 0x00a8, 0x2260, 0x0102, 0x0218,
108 /* 0xb0 */
109 0x221e, 0x00b1, 0x2264, 0x2265,
110 0x00a5, 0x00b5, 0x2202, 0x2211,
111 0x220f, 0x03c0, 0x222b, 0x00aa,
112 0x00ba, 0x03a9, 0x0103, 0x0219,
113 /* 0xc0 */
114 0x00bf, 0x00a1, 0x00ac, 0x221a,
115 0x0192, 0x2248, 0x2206, 0x00ab,
116 0x00bb, 0x2026, 0x00a0, 0x00c0,
117 0x00c3, 0x00d5, 0x0152, 0x0153,
118 /* 0xd0 */
119 0x2013, 0x2014, 0x201c, 0x201d,
120 0x2018, 0x2019, 0x00f7, 0x25ca,
121 0x00ff, 0x0178, 0x2044, 0x20ac,
122 0x2039, 0x203a, 0x021a, 0x021b,
123 /* 0xe0 */
124 0x2021, 0x00b7, 0x201a, 0x201e,
125 0x2030, 0x00c2, 0x00ca, 0x00c1,
126 0x00cb, 0x00c8, 0x00cd, 0x00ce,
127 0x00cf, 0x00cc, 0x00d3, 0x00d4,
128 /* 0xf0 */
129 0xf8ff, 0x00d2, 0x00da, 0x00db,
130 0x00d9, 0x0131, 0x02c6, 0x02dc,
131 0x00af, 0x02d8, 0x02d9, 0x02da,
132 0x00b8, 0x02dd, 0x02db, 0x02c7,
133};
134
135static const unsigned char page00[256] = {
136 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
137 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
138 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
139 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
140 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
141 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
142 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
143 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
144 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
145 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
146 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
147 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
148 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
149 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
150 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
151 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
154 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
156 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
157 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
158 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
159 0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
160 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0x00, 0x82, /* 0xc0-0xc7 */
161 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
162 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
163 0x00, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
164 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0x00, 0x8d, /* 0xe0-0xe7 */
165 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
166 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
167 0x00, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
168};
169
170static const unsigned char page01[256] = {
171 0x00, 0x00, 0xae, 0xbe, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
172 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
173 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
174 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
175 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
176 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
177 0x00, 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
178 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
181 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
183 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
185 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
186 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
188 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
189 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
190 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
191 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
196 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
197 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
199 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
203};
204
205static const unsigned char page02[256] = {
206 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
207 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
209 0xaf, 0xbf, 0xde, 0xdf, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
210 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
211 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
212 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
213 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
214 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
215 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
219 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
220 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
222 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
224 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
227 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
229 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
230 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
231 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
233 0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
236 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
238};
239
240static const unsigned char page03[256] = {
241 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
244 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
245 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
254 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
260 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
262 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
265 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
273};
274
275static const unsigned char page20[256] = {
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
278 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
279 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
280 0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
282 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
283 0x00, 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
284 0x00, 0x00, 0x00, 0x00, 0xda, 0x00, 0x00, 0x00, /* 0x40-0x47 */
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
297 0x00, 0x00, 0x00, 0x00, 0xdb, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
303 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
308};
309
310static const unsigned char page21[256] = {
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
315 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
318 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
319 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
322 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
323 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
337 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
343};
344
345static const unsigned char page22[256] = {
346 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
347 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
348 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
349 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
350 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
351 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
352 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
353 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
354 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
355 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
356 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
357 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
358 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
367 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
370 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
373 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
374 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
375 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
378};
379
380static const unsigned char page25[256] = {
381 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
382 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
383 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
384 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
386 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
387 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
388 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
389 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
390 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
391 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
392 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
393 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
394 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
395 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
396 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
397 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
398 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
399 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
400 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
401 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
402 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
403 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
404 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
405 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
406 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
407 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
408 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
409 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
410 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
411 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
412 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
413};
414
415static const unsigned char pagef8[256] = {
416 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
417 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
418 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
419 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
420 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
421 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
422 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
423 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
424 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
425 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
426 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
427 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
428 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
429 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
430 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
431 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
432 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
433 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
434 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
435 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
436 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
437 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
438 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
439 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
440 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
441 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
442 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
443 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
444 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
445 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
446 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
447 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
448};
449
450static const unsigned char *const page_uni2charset[256] = {
451 page00, page01, page02, page03, NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
455 page20, page21, page22, NULL, NULL, page25, NULL, NULL,
456 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
461 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
462 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
463 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
464 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
465 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
479 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
480 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
481 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
482 pagef8, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
483};
484
485static const unsigned char charset2lower[256] = {
486 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
487 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
488 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
489 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
490 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
491 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
492 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
493 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
494 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
495 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
496 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
497 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
498 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
499 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
500 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
501 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
502 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
503 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
504 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
505 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
506 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
507 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
508 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
509 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
510 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
511 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
512 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
513 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
514 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
515 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
516 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
517 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
518};
519
520static const unsigned char charset2upper[256] = {
521 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
522 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
523 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
524 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
525 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
526 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
527 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
528 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
529 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
530 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
531 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
532 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
533 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
534 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
535 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
536 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
537 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
538 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
539 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
540 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
541 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
542 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
543 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
544 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
545 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
546 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
547 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
548 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
549 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
550 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
551 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
552 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
553};
554
555static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
556{
557 const unsigned char *uni2charset;
558 unsigned char cl = uni & 0x00ff;
559 unsigned char ch = (uni & 0xff00) >> 8;
560
561 if (boundlen <= 0)
562 return -ENAMETOOLONG;
563
564 uni2charset = page_uni2charset[ch];
565 if (uni2charset && uni2charset[cl])
566 out[0] = uni2charset[cl];
567 else
568 return -EINVAL;
569 return 1;
570}
571
572static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
573{
574 *uni = charset2uni[*rawstring];
575 if (*uni == 0x0000)
576 return -EINVAL;
577 return 1;
578}
579
580static struct nls_table table = {
581 .charset = "macromanian",
582 .uni2char = uni2char,
583 .char2uni = char2uni,
584 .charset2lower = charset2lower,
585 .charset2upper = charset2upper,
586 .owner = THIS_MODULE,
587};
588
589static int __init init_nls_macromanian(void)
590{
591 return register_nls(&table);
592}
593
594static void __exit exit_nls_macromanian(void)
595{
596 unregister_nls(&table);
597}
598
599module_init(init_nls_macromanian)
600module_exit(exit_nls_macromanian)
601
602MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/nls/mac-turkish.c b/fs/nls/mac-turkish.c
new file mode 100644
index 000000000000..dffa96d5de00
--- /dev/null
+++ b/fs/nls/mac-turkish.c
@@ -0,0 +1,602 @@
1/*
2 * linux/fs/nls/mac-turkish.c
3 *
4 * Charset macturkish translation tables.
5 * Generated automatically from the Unicode and charset
6 * tables from the Unicode Organization (www.unicode.org).
7 * The Unicode to charset table has only exact mappings.
8 */
9
10/*
11 * COPYRIGHT AND PERMISSION NOTICE
12 *
13 * Copyright 1991-2012 Unicode, Inc. All rights reserved. Distributed under
14 * the Terms of Use in http://www.unicode.org/copyright.html.
15 *
16 * Permission is hereby granted, free of charge, to any person obtaining a
17 * copy of the Unicode data files and any associated documentation (the "Data
18 * Files") or Unicode software and any associated documentation (the
19 * "Software") to deal in the Data Files or Software without restriction,
20 * including without limitation the rights to use, copy, modify, merge,
21 * publish, distribute, and/or sell copies of the Data Files or Software, and
22 * to permit persons to whom the Data Files or Software are furnished to do
23 * so, provided that (a) the above copyright notice(s) and this permission
24 * notice appear with all copies of the Data Files or Software, (b) both the
25 * above copyright notice(s) and this permission notice appear in associated
26 * documentation, and (c) there is clear notice in each modified Data File or
27 * in the Software as well as in the documentation associated with the Data
28 * File(s) or Software that the data or software has been modified.
29 *
30 * THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY
31 * KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
32 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF
33 * THIRD PARTY RIGHTS. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS
34 * INCLUDED IN THIS NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT
35 * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
36 * USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
37 * OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
38 * PERFORMANCE OF THE DATA FILES OR SOFTWARE.
39 *
40 * Except as contained in this notice, the name of a copyright holder shall
41 * not be used in advertising or otherwise to promote the sale, use or other
42 * dealings in these Data Files or Software without prior written
43 * authorization of the copyright holder.
44 */
45
46#include <linux/module.h>
47#include <linux/kernel.h>
48#include <linux/string.h>
49#include <linux/nls.h>
50#include <linux/errno.h>
51
52static const wchar_t charset2uni[256] = {
53 /* 0x00 */
54 0x0000, 0x0001, 0x0002, 0x0003,
55 0x0004, 0x0005, 0x0006, 0x0007,
56 0x0008, 0x0009, 0x000a, 0x000b,
57 0x000c, 0x000d, 0x000e, 0x000f,
58 /* 0x10 */
59 0x0010, 0x0011, 0x0012, 0x0013,
60 0x0014, 0x0015, 0x0016, 0x0017,
61 0x0018, 0x0019, 0x001a, 0x001b,
62 0x001c, 0x001d, 0x001e, 0x001f,
63 /* 0x20 */
64 0x0020, 0x0021, 0x0022, 0x0023,
65 0x0024, 0x0025, 0x0026, 0x0027,
66 0x0028, 0x0029, 0x002a, 0x002b,
67 0x002c, 0x002d, 0x002e, 0x002f,
68 /* 0x30 */
69 0x0030, 0x0031, 0x0032, 0x0033,
70 0x0034, 0x0035, 0x0036, 0x0037,
71 0x0038, 0x0039, 0x003a, 0x003b,
72 0x003c, 0x003d, 0x003e, 0x003f,
73 /* 0x40 */
74 0x0040, 0x0041, 0x0042, 0x0043,
75 0x0044, 0x0045, 0x0046, 0x0047,
76 0x0048, 0x0049, 0x004a, 0x004b,
77 0x004c, 0x004d, 0x004e, 0x004f,
78 /* 0x50 */
79 0x0050, 0x0051, 0x0052, 0x0053,
80 0x0054, 0x0055, 0x0056, 0x0057,
81 0x0058, 0x0059, 0x005a, 0x005b,
82 0x005c, 0x005d, 0x005e, 0x005f,
83 /* 0x60 */
84 0x0060, 0x0061, 0x0062, 0x0063,
85 0x0064, 0x0065, 0x0066, 0x0067,
86 0x0068, 0x0069, 0x006a, 0x006b,
87 0x006c, 0x006d, 0x006e, 0x006f,
88 /* 0x70 */
89 0x0070, 0x0071, 0x0072, 0x0073,
90 0x0074, 0x0075, 0x0076, 0x0077,
91 0x0078, 0x0079, 0x007a, 0x007b,
92 0x007c, 0x007d, 0x007e, 0x007f,
93 /* 0x80 */
94 0x00c4, 0x00c5, 0x00c7, 0x00c9,
95 0x00d1, 0x00d6, 0x00dc, 0x00e1,
96 0x00e0, 0x00e2, 0x00e4, 0x00e3,
97 0x00e5, 0x00e7, 0x00e9, 0x00e8,
98 /* 0x90 */
99 0x00ea, 0x00eb, 0x00ed, 0x00ec,
100 0x00ee, 0x00ef, 0x00f1, 0x00f3,
101 0x00f2, 0x00f4, 0x00f6, 0x00f5,
102 0x00fa, 0x00f9, 0x00fb, 0x00fc,
103 /* 0xa0 */
104 0x2020, 0x00b0, 0x00a2, 0x00a3,
105 0x00a7, 0x2022, 0x00b6, 0x00df,
106 0x00ae, 0x00a9, 0x2122, 0x00b4,
107 0x00a8, 0x2260, 0x00c6, 0x00d8,
108 /* 0xb0 */
109 0x221e, 0x00b1, 0x2264, 0x2265,
110 0x00a5, 0x00b5, 0x2202, 0x2211,
111 0x220f, 0x03c0, 0x222b, 0x00aa,
112 0x00ba, 0x03a9, 0x00e6, 0x00f8,
113 /* 0xc0 */
114 0x00bf, 0x00a1, 0x00ac, 0x221a,
115 0x0192, 0x2248, 0x2206, 0x00ab,
116 0x00bb, 0x2026, 0x00a0, 0x00c0,
117 0x00c3, 0x00d5, 0x0152, 0x0153,
118 /* 0xd0 */
119 0x2013, 0x2014, 0x201c, 0x201d,
120 0x2018, 0x2019, 0x00f7, 0x25ca,
121 0x00ff, 0x0178, 0x011e, 0x011f,
122 0x0130, 0x0131, 0x015e, 0x015f,
123 /* 0xe0 */
124 0x2021, 0x00b7, 0x201a, 0x201e,
125 0x2030, 0x00c2, 0x00ca, 0x00c1,
126 0x00cb, 0x00c8, 0x00cd, 0x00ce,
127 0x00cf, 0x00cc, 0x00d3, 0x00d4,
128 /* 0xf0 */
129 0xf8ff, 0x00d2, 0x00da, 0x00db,
130 0x00d9, 0xf8a0, 0x02c6, 0x02dc,
131 0x00af, 0x02d8, 0x02d9, 0x02da,
132 0x00b8, 0x02dd, 0x02db, 0x02c7,
133};
134
135static const unsigned char page00[256] = {
136 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, /* 0x00-0x07 */
137 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, /* 0x08-0x0f */
138 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, /* 0x10-0x17 */
139 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, /* 0x18-0x1f */
140 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, /* 0x20-0x27 */
141 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, /* 0x28-0x2f */
142 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, /* 0x30-0x37 */
143 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f, /* 0x38-0x3f */
144 0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, /* 0x40-0x47 */
145 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f, /* 0x48-0x4f */
146 0x50, 0x51, 0x52, 0x53, 0x54, 0x55, 0x56, 0x57, /* 0x50-0x57 */
147 0x58, 0x59, 0x5a, 0x5b, 0x5c, 0x5d, 0x5e, 0x5f, /* 0x58-0x5f */
148 0x60, 0x61, 0x62, 0x63, 0x64, 0x65, 0x66, 0x67, /* 0x60-0x67 */
149 0x68, 0x69, 0x6a, 0x6b, 0x6c, 0x6d, 0x6e, 0x6f, /* 0x68-0x6f */
150 0x70, 0x71, 0x72, 0x73, 0x74, 0x75, 0x76, 0x77, /* 0x70-0x77 */
151 0x78, 0x79, 0x7a, 0x7b, 0x7c, 0x7d, 0x7e, 0x7f, /* 0x78-0x7f */
152 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
153 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
154 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
155 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
156 0xca, 0xc1, 0xa2, 0xa3, 0x00, 0xb4, 0x00, 0xa4, /* 0xa0-0xa7 */
157 0xac, 0xa9, 0xbb, 0xc7, 0xc2, 0x00, 0xa8, 0xf8, /* 0xa8-0xaf */
158 0xa1, 0xb1, 0x00, 0x00, 0xab, 0xb5, 0xa6, 0xe1, /* 0xb0-0xb7 */
159 0xfc, 0x00, 0xbc, 0xc8, 0x00, 0x00, 0x00, 0xc0, /* 0xb8-0xbf */
160 0xcb, 0xe7, 0xe5, 0xcc, 0x80, 0x81, 0xae, 0x82, /* 0xc0-0xc7 */
161 0xe9, 0x83, 0xe6, 0xe8, 0xed, 0xea, 0xeb, 0xec, /* 0xc8-0xcf */
162 0x00, 0x84, 0xf1, 0xee, 0xef, 0xcd, 0x85, 0x00, /* 0xd0-0xd7 */
163 0xaf, 0xf4, 0xf2, 0xf3, 0x86, 0x00, 0x00, 0xa7, /* 0xd8-0xdf */
164 0x88, 0x87, 0x89, 0x8b, 0x8a, 0x8c, 0xbe, 0x8d, /* 0xe0-0xe7 */
165 0x8f, 0x8e, 0x90, 0x91, 0x93, 0x92, 0x94, 0x95, /* 0xe8-0xef */
166 0x00, 0x96, 0x98, 0x97, 0x99, 0x9b, 0x9a, 0xd6, /* 0xf0-0xf7 */
167 0xbf, 0x9d, 0x9c, 0x9e, 0x9f, 0x00, 0x00, 0xd8, /* 0xf8-0xff */
168};
169
170static const unsigned char page01[256] = {
171 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
172 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
173 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
174 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xda, 0xdb, /* 0x18-0x1f */
175 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
176 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
177 0xdc, 0xdd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
178 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
179 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
180 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
181 0x00, 0x00, 0xce, 0xcf, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
182 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xdf, /* 0x58-0x5f */
183 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
184 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
185 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
186 0xd9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
187 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
188 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
189 0x00, 0x00, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
190 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
191 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
192 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
193 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
194 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
195 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
196 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
197 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
198 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
199 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
200 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
201 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
202 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
203};
204
205static const unsigned char page02[256] = {
206 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
207 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
208 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
209 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
210 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
211 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
212 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
213 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
214 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
215 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
216 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
217 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
218 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
219 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
220 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
221 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
222 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
223 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
224 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
225 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
226 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
227 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
228 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
229 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
230 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xff, /* 0xc0-0xc7 */
231 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
232 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
233 0xf9, 0xfa, 0xfb, 0xfe, 0xf7, 0xfd, 0x00, 0x00, /* 0xd8-0xdf */
234 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
235 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
236 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
237 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
238};
239
240static const unsigned char page03[256] = {
241 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
242 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
243 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
244 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
245 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
246 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
247 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
248 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
249 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
250 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
251 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
252 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
253 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
254 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
255 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
256 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
257 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
258 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
259 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
260 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
261 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
262 0x00, 0xbd, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
263 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
264 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
265 0xb9, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
266 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
267 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
268 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
269 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
270 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
271 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
272 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
273};
274
275static const unsigned char page20[256] = {
276 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
277 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
278 0x00, 0x00, 0x00, 0xd0, 0xd1, 0x00, 0x00, 0x00, /* 0x10-0x17 */
279 0xd4, 0xd5, 0xe2, 0x00, 0xd2, 0xd3, 0xe3, 0x00, /* 0x18-0x1f */
280 0xa0, 0xe0, 0xa5, 0x00, 0x00, 0x00, 0xc9, 0x00, /* 0x20-0x27 */
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
282 0xe4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
283 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
284 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
285 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
286 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
287 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
288 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
289 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
290 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
291 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
292 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
293 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
294 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
295 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
296 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
297 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
298 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
300 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
301 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
302 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
303 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
304 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
305 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
306 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
307 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
308};
309
310static const unsigned char page21[256] = {
311 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
312 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
313 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
314 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
315 0x00, 0x00, 0xaa, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
316 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
317 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
318 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
319 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
320 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
321 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
322 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
323 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
324 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
325 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
326 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
327 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
328 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
329 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
330 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
331 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
332 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
333 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
334 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
335 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
336 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
337 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
338 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
339 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
340 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
341 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
342 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
343};
344
345static const unsigned char page22[256] = {
346 0x00, 0x00, 0xb6, 0x00, 0x00, 0x00, 0xc6, 0x00, /* 0x00-0x07 */
347 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xb8, /* 0x08-0x0f */
348 0x00, 0xb7, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
349 0x00, 0x00, 0xc3, 0x00, 0x00, 0x00, 0xb0, 0x00, /* 0x18-0x1f */
350 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
351 0x00, 0x00, 0x00, 0xba, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
352 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
353 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
354 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
355 0xc5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
356 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
357 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
358 0xad, 0x00, 0x00, 0x00, 0xb2, 0xb3, 0x00, 0x00, /* 0x60-0x67 */
359 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
360 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
361 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
362 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
363 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
364 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
365 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
366 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
367 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
368 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
369 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
370 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
371 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
372 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
373 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
374 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
375 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
376 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
377 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
378};
379
380static const unsigned char page25[256] = {
381 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
382 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
383 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
384 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
385 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
386 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
387 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
388 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
389 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
390 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
391 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
392 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
393 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
394 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
395 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
396 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
397 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
398 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
399 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
400 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
401 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
402 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
403 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
404 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
405 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
406 0x00, 0x00, 0xd7, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
407 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
408 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
409 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
410 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
411 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
412 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf8-0xff */
413};
414
415static const unsigned char pagef8[256] = {
416 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x00-0x07 */
417 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x08-0x0f */
418 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x10-0x17 */
419 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x18-0x1f */
420 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x20-0x27 */
421 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x28-0x2f */
422 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x30-0x37 */
423 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x38-0x3f */
424 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x40-0x47 */
425 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x48-0x4f */
426 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x50-0x57 */
427 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x58-0x5f */
428 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x60-0x67 */
429 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x68-0x6f */
430 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x70-0x77 */
431 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x78-0x7f */
432 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x80-0x87 */
433 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x88-0x8f */
434 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x90-0x97 */
435 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0x98-0x9f */
436 0xf5, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa0-0xa7 */
437 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xa8-0xaf */
438 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb0-0xb7 */
439 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xb8-0xbf */
440 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc0-0xc7 */
441 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xc8-0xcf */
442 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd0-0xd7 */
443 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xd8-0xdf */
444 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe0-0xe7 */
445 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xe8-0xef */
446 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 0xf0-0xf7 */
447 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf0, /* 0xf8-0xff */
448};
449
450static const unsigned char *const page_uni2charset[256] = {
451 page00, page01, page02, page03, NULL, NULL, NULL, NULL,
452 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
453 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
454 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
455 page20, page21, page22, NULL, NULL, page25, NULL, NULL,
456 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
457 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
458 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
459 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
460 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
461 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
462 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
463 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
464 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
465 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
466 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
467 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
468 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
469 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
470 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
471 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
472 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
473 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
474 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
475 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
476 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
477 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
478 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
479 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
480 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
481 NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
482 pagef8, NULL, NULL, NULL, NULL, NULL, NULL, NULL,
483};
484
485static const unsigned char charset2lower[256] = {
486 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
487 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
488 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
489 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
490 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
491 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
492 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
493 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
494 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
495 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
496 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
497 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
498 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
499 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
500 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
501 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
502 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
503 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
504 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
505 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
506 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
507 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
508 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
509 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
510 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
511 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
512 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
513 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
514 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
515 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
516 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
517 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
518};
519
520static const unsigned char charset2upper[256] = {
521 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x00-0x07 */
522 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x08-0x0f */
523 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x10-0x17 */
524 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x18-0x1f */
525 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x20-0x27 */
526 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x28-0x2f */
527 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x30-0x37 */
528 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x38-0x3f */
529 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x40-0x47 */
530 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x48-0x4f */
531 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x50-0x57 */
532 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x58-0x5f */
533 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x60-0x67 */
534 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x68-0x6f */
535 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x70-0x77 */
536 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x78-0x7f */
537 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x80-0x87 */
538 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x88-0x8f */
539 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x90-0x97 */
540 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0x98-0x9f */
541 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa0-0xa7 */
542 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xa8-0xaf */
543 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb0-0xb7 */
544 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xb8-0xbf */
545 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc0-0xc7 */
546 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xc8-0xcf */
547 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd0-0xd7 */
548 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xd8-0xdf */
549 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe0-0xe7 */
550 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xe8-0xef */
551 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf0-0xf7 */
552 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 0xf8-0xff */
553};
554
555static int uni2char(wchar_t uni, unsigned char *out, int boundlen)
556{
557 const unsigned char *uni2charset;
558 unsigned char cl = uni & 0x00ff;
559 unsigned char ch = (uni & 0xff00) >> 8;
560
561 if (boundlen <= 0)
562 return -ENAMETOOLONG;
563
564 uni2charset = page_uni2charset[ch];
565 if (uni2charset && uni2charset[cl])
566 out[0] = uni2charset[cl];
567 else
568 return -EINVAL;
569 return 1;
570}
571
572static int char2uni(const unsigned char *rawstring, int boundlen, wchar_t *uni)
573{
574 *uni = charset2uni[*rawstring];
575 if (*uni == 0x0000)
576 return -EINVAL;
577 return 1;
578}
579
580static struct nls_table table = {
581 .charset = "macturkish",
582 .uni2char = uni2char,
583 .char2uni = char2uni,
584 .charset2lower = charset2lower,
585 .charset2upper = charset2upper,
586 .owner = THIS_MODULE,
587};
588
589static int __init init_nls_macturkish(void)
590{
591 return register_nls(&table);
592}
593
594static void __exit exit_nls_macturkish(void)
595{
596 unregister_nls(&table);
597}
598
599module_init(init_nls_macturkish)
600module_exit(exit_nls_macturkish)
601
602MODULE_LICENSE("Dual BSD/GPL");
diff --git a/fs/notify/fsnotify.c b/fs/notify/fsnotify.c
index ccb14d3fc0de..b39c5c161adb 100644
--- a/fs/notify/fsnotify.c
+++ b/fs/notify/fsnotify.c
@@ -123,7 +123,7 @@ int __fsnotify_parent(struct path *path, struct dentry *dentry, __u32 mask)
123} 123}
124EXPORT_SYMBOL_GPL(__fsnotify_parent); 124EXPORT_SYMBOL_GPL(__fsnotify_parent);
125 125
126static int send_to_group(struct inode *to_tell, struct vfsmount *mnt, 126static int send_to_group(struct inode *to_tell,
127 struct fsnotify_mark *inode_mark, 127 struct fsnotify_mark *inode_mark,
128 struct fsnotify_mark *vfsmount_mark, 128 struct fsnotify_mark *vfsmount_mark,
129 __u32 mask, void *data, 129 __u32 mask, void *data,
@@ -168,10 +168,10 @@ static int send_to_group(struct inode *to_tell, struct vfsmount *mnt,
168 vfsmount_test_mask &= ~inode_mark->ignored_mask; 168 vfsmount_test_mask &= ~inode_mark->ignored_mask;
169 } 169 }
170 170
171 pr_debug("%s: group=%p to_tell=%p mnt=%p mask=%x inode_mark=%p" 171 pr_debug("%s: group=%p to_tell=%p mask=%x inode_mark=%p"
172 " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x" 172 " inode_test_mask=%x vfsmount_mark=%p vfsmount_test_mask=%x"
173 " data=%p data_is=%d cookie=%d event=%p\n", 173 " data=%p data_is=%d cookie=%d event=%p\n",
174 __func__, group, to_tell, mnt, mask, inode_mark, 174 __func__, group, to_tell, mask, inode_mark,
175 inode_test_mask, vfsmount_mark, vfsmount_test_mask, data, 175 inode_test_mask, vfsmount_mark, vfsmount_test_mask, data,
176 data_is, cookie, *event); 176 data_is, cookie, *event);
177 177
@@ -258,16 +258,16 @@ int fsnotify(struct inode *to_tell, __u32 mask, void *data, int data_is,
258 258
259 if (inode_group > vfsmount_group) { 259 if (inode_group > vfsmount_group) {
260 /* handle inode */ 260 /* handle inode */
261 ret = send_to_group(to_tell, NULL, inode_mark, NULL, mask, data, 261 ret = send_to_group(to_tell, inode_mark, NULL, mask, data,
262 data_is, cookie, file_name, &event); 262 data_is, cookie, file_name, &event);
263 /* we didn't use the vfsmount_mark */ 263 /* we didn't use the vfsmount_mark */
264 vfsmount_group = NULL; 264 vfsmount_group = NULL;
265 } else if (vfsmount_group > inode_group) { 265 } else if (vfsmount_group > inode_group) {
266 ret = send_to_group(to_tell, &mnt->mnt, NULL, vfsmount_mark, mask, data, 266 ret = send_to_group(to_tell, NULL, vfsmount_mark, mask, data,
267 data_is, cookie, file_name, &event); 267 data_is, cookie, file_name, &event);
268 inode_group = NULL; 268 inode_group = NULL;
269 } else { 269 } else {
270 ret = send_to_group(to_tell, &mnt->mnt, inode_mark, vfsmount_mark, 270 ret = send_to_group(to_tell, inode_mark, vfsmount_mark,
271 mask, data, data_is, cookie, file_name, 271 mask, data, data_is, cookie, file_name,
272 &event); 272 &event);
273 } 273 }
diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
index 8639169221c7..7389d2d5e51d 100644
--- a/fs/ntfs/file.c
+++ b/fs/ntfs/file.c
@@ -2096,7 +2096,9 @@ static ssize_t ntfs_file_aio_write_nolock(struct kiocb *iocb,
2096 err = file_remove_suid(file); 2096 err = file_remove_suid(file);
2097 if (err) 2097 if (err)
2098 goto out; 2098 goto out;
2099 file_update_time(file); 2099 err = file_update_time(file);
2100 if (err)
2101 goto out;
2100 written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos, 2102 written = ntfs_file_buffered_write(iocb, iov, nr_segs, pos, ppos,
2101 count); 2103 count);
2102out: 2104out:
diff --git a/fs/ocfs2/blockcheck.c b/fs/ocfs2/blockcheck.c
index c7ee03c22226..0725e6054650 100644
--- a/fs/ocfs2/blockcheck.c
+++ b/fs/ocfs2/blockcheck.c
@@ -422,45 +422,46 @@ int ocfs2_block_check_validate(void *data, size_t blocksize,
422 struct ocfs2_blockcheck_stats *stats) 422 struct ocfs2_blockcheck_stats *stats)
423{ 423{
424 int rc = 0; 424 int rc = 0;
425 struct ocfs2_block_check check; 425 u32 bc_crc32e;
426 u16 bc_ecc;
426 u32 crc, ecc; 427 u32 crc, ecc;
427 428
428 ocfs2_blockcheck_inc_check(stats); 429 ocfs2_blockcheck_inc_check(stats);
429 430
430 check.bc_crc32e = le32_to_cpu(bc->bc_crc32e); 431 bc_crc32e = le32_to_cpu(bc->bc_crc32e);
431 check.bc_ecc = le16_to_cpu(bc->bc_ecc); 432 bc_ecc = le16_to_cpu(bc->bc_ecc);
432 433
433 memset(bc, 0, sizeof(struct ocfs2_block_check)); 434 memset(bc, 0, sizeof(struct ocfs2_block_check));
434 435
435 /* Fast path - if the crc32 validates, we're good to go */ 436 /* Fast path - if the crc32 validates, we're good to go */
436 crc = crc32_le(~0, data, blocksize); 437 crc = crc32_le(~0, data, blocksize);
437 if (crc == check.bc_crc32e) 438 if (crc == bc_crc32e)
438 goto out; 439 goto out;
439 440
440 ocfs2_blockcheck_inc_failure(stats); 441 ocfs2_blockcheck_inc_failure(stats);
441 mlog(ML_ERROR, 442 mlog(ML_ERROR,
442 "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n", 443 "CRC32 failed: stored: 0x%x, computed 0x%x. Applying ECC.\n",
443 (unsigned int)check.bc_crc32e, (unsigned int)crc); 444 (unsigned int)bc_crc32e, (unsigned int)crc);
444 445
445 /* Ok, try ECC fixups */ 446 /* Ok, try ECC fixups */
446 ecc = ocfs2_hamming_encode_block(data, blocksize); 447 ecc = ocfs2_hamming_encode_block(data, blocksize);
447 ocfs2_hamming_fix_block(data, blocksize, ecc ^ check.bc_ecc); 448 ocfs2_hamming_fix_block(data, blocksize, ecc ^ bc_ecc);
448 449
449 /* And check the crc32 again */ 450 /* And check the crc32 again */
450 crc = crc32_le(~0, data, blocksize); 451 crc = crc32_le(~0, data, blocksize);
451 if (crc == check.bc_crc32e) { 452 if (crc == bc_crc32e) {
452 ocfs2_blockcheck_inc_recover(stats); 453 ocfs2_blockcheck_inc_recover(stats);
453 goto out; 454 goto out;
454 } 455 }
455 456
456 mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n", 457 mlog(ML_ERROR, "Fixed CRC32 failed: stored: 0x%x, computed 0x%x\n",
457 (unsigned int)check.bc_crc32e, (unsigned int)crc); 458 (unsigned int)bc_crc32e, (unsigned int)crc);
458 459
459 rc = -EIO; 460 rc = -EIO;
460 461
461out: 462out:
462 bc->bc_crc32e = cpu_to_le32(check.bc_crc32e); 463 bc->bc_crc32e = cpu_to_le32(bc_crc32e);
463 bc->bc_ecc = cpu_to_le16(check.bc_ecc); 464 bc->bc_ecc = cpu_to_le16(bc_ecc);
464 465
465 return rc; 466 return rc;
466} 467}
@@ -528,7 +529,8 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
528 struct ocfs2_blockcheck_stats *stats) 529 struct ocfs2_blockcheck_stats *stats)
529{ 530{
530 int i, rc = 0; 531 int i, rc = 0;
531 struct ocfs2_block_check check; 532 u32 bc_crc32e;
533 u16 bc_ecc;
532 u32 crc, ecc, fix; 534 u32 crc, ecc, fix;
533 535
534 BUG_ON(nr < 0); 536 BUG_ON(nr < 0);
@@ -538,21 +540,21 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
538 540
539 ocfs2_blockcheck_inc_check(stats); 541 ocfs2_blockcheck_inc_check(stats);
540 542
541 check.bc_crc32e = le32_to_cpu(bc->bc_crc32e); 543 bc_crc32e = le32_to_cpu(bc->bc_crc32e);
542 check.bc_ecc = le16_to_cpu(bc->bc_ecc); 544 bc_ecc = le16_to_cpu(bc->bc_ecc);
543 545
544 memset(bc, 0, sizeof(struct ocfs2_block_check)); 546 memset(bc, 0, sizeof(struct ocfs2_block_check));
545 547
546 /* Fast path - if the crc32 validates, we're good to go */ 548 /* Fast path - if the crc32 validates, we're good to go */
547 for (i = 0, crc = ~0; i < nr; i++) 549 for (i = 0, crc = ~0; i < nr; i++)
548 crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); 550 crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
549 if (crc == check.bc_crc32e) 551 if (crc == bc_crc32e)
550 goto out; 552 goto out;
551 553
552 ocfs2_blockcheck_inc_failure(stats); 554 ocfs2_blockcheck_inc_failure(stats);
553 mlog(ML_ERROR, 555 mlog(ML_ERROR,
554 "CRC32 failed: stored: %u, computed %u. Applying ECC.\n", 556 "CRC32 failed: stored: %u, computed %u. Applying ECC.\n",
555 (unsigned int)check.bc_crc32e, (unsigned int)crc); 557 (unsigned int)bc_crc32e, (unsigned int)crc);
556 558
557 /* Ok, try ECC fixups */ 559 /* Ok, try ECC fixups */
558 for (i = 0, ecc = 0; i < nr; i++) { 560 for (i = 0, ecc = 0; i < nr; i++) {
@@ -565,7 +567,7 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
565 bhs[i]->b_size * 8, 567 bhs[i]->b_size * 8,
566 bhs[i]->b_size * 8 * i); 568 bhs[i]->b_size * 8 * i);
567 } 569 }
568 fix = ecc ^ check.bc_ecc; 570 fix = ecc ^ bc_ecc;
569 for (i = 0; i < nr; i++) { 571 for (i = 0; i < nr; i++) {
570 /* 572 /*
571 * Try the fix against each buffer. It will only affect 573 * Try the fix against each buffer. It will only affect
@@ -578,19 +580,19 @@ int ocfs2_block_check_validate_bhs(struct buffer_head **bhs, int nr,
578 /* And check the crc32 again */ 580 /* And check the crc32 again */
579 for (i = 0, crc = ~0; i < nr; i++) 581 for (i = 0, crc = ~0; i < nr; i++)
580 crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size); 582 crc = crc32_le(crc, bhs[i]->b_data, bhs[i]->b_size);
581 if (crc == check.bc_crc32e) { 583 if (crc == bc_crc32e) {
582 ocfs2_blockcheck_inc_recover(stats); 584 ocfs2_blockcheck_inc_recover(stats);
583 goto out; 585 goto out;
584 } 586 }
585 587
586 mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n", 588 mlog(ML_ERROR, "Fixed CRC32 failed: stored: %u, computed %u\n",
587 (unsigned int)check.bc_crc32e, (unsigned int)crc); 589 (unsigned int)bc_crc32e, (unsigned int)crc);
588 590
589 rc = -EIO; 591 rc = -EIO;
590 592
591out: 593out:
592 bc->bc_crc32e = cpu_to_le32(check.bc_crc32e); 594 bc->bc_crc32e = cpu_to_le32(bc_crc32e);
593 bc->bc_ecc = cpu_to_le16(check.bc_ecc); 595 bc->bc_ecc = cpu_to_le16(bc_ecc);
594 596
595 return rc; 597 return rc;
596} 598}
diff --git a/fs/ocfs2/dlm/dlmast.c b/fs/ocfs2/dlm/dlmast.c
index 3a3ed4bb794b..fbec0be62326 100644
--- a/fs/ocfs2/dlm/dlmast.c
+++ b/fs/ocfs2/dlm/dlmast.c
@@ -293,7 +293,7 @@ int dlm_proxy_ast_handler(struct o2net_msg *msg, u32 len, void *data,
293 struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf; 293 struct dlm_proxy_ast *past = (struct dlm_proxy_ast *) msg->buf;
294 char *name; 294 char *name;
295 struct list_head *iter, *head=NULL; 295 struct list_head *iter, *head=NULL;
296 u64 cookie; 296 __be64 cookie;
297 u32 flags; 297 u32 flags;
298 u8 node; 298 u8 node;
299 299
diff --git a/fs/ocfs2/dlm/dlmcommon.h b/fs/ocfs2/dlm/dlmcommon.h
index a5952ceecba5..de854cca12a2 100644
--- a/fs/ocfs2/dlm/dlmcommon.h
+++ b/fs/ocfs2/dlm/dlmcommon.h
@@ -679,7 +679,7 @@ struct dlm_query_join_packet {
679}; 679};
680 680
681union dlm_query_join_response { 681union dlm_query_join_response {
682 u32 intval; 682 __be32 intval;
683 struct dlm_query_join_packet packet; 683 struct dlm_query_join_packet packet;
684}; 684};
685 685
@@ -755,8 +755,8 @@ struct dlm_query_region {
755struct dlm_node_info { 755struct dlm_node_info {
756 u8 ni_nodenum; 756 u8 ni_nodenum;
757 u8 pad1; 757 u8 pad1;
758 u16 ni_ipv4_port; 758 __be16 ni_ipv4_port;
759 u32 ni_ipv4_address; 759 __be32 ni_ipv4_address;
760}; 760};
761 761
762struct dlm_query_nodeinfo { 762struct dlm_query_nodeinfo {
diff --git a/fs/ocfs2/dlm/dlmdomain.c b/fs/ocfs2/dlm/dlmdomain.c
index 92f2ead0fab6..9e89d70df337 100644
--- a/fs/ocfs2/dlm/dlmdomain.c
+++ b/fs/ocfs2/dlm/dlmdomain.c
@@ -818,7 +818,7 @@ static void dlm_query_join_packet_to_wire(struct dlm_query_join_packet *packet,
818 union dlm_query_join_response response; 818 union dlm_query_join_response response;
819 819
820 response.packet = *packet; 820 response.packet = *packet;
821 *wire = cpu_to_be32(response.intval); 821 *wire = be32_to_cpu(response.intval);
822} 822}
823 823
824static void dlm_query_join_wire_to_packet(u32 wire, 824static void dlm_query_join_wire_to_packet(u32 wire,
diff --git a/fs/ocfs2/export.c b/fs/ocfs2/export.c
index 745db42528d5..322216a5f0dd 100644
--- a/fs/ocfs2/export.c
+++ b/fs/ocfs2/export.c
@@ -177,21 +177,23 @@ bail:
177 return parent; 177 return parent;
178} 178}
179 179
180static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len, 180static int ocfs2_encode_fh(struct inode *inode, u32 *fh_in, int *max_len,
181 int connectable) 181 struct inode *parent)
182{ 182{
183 struct inode *inode = dentry->d_inode;
184 int len = *max_len; 183 int len = *max_len;
185 int type = 1; 184 int type = 1;
186 u64 blkno; 185 u64 blkno;
187 u32 generation; 186 u32 generation;
188 __le32 *fh = (__force __le32 *) fh_in; 187 __le32 *fh = (__force __le32 *) fh_in;
189 188
189#ifdef TRACE_HOOKS_ARE_NOT_BRAINDEAD_IN_YOUR_OPINION
190#error "You go ahead and fix that mess, then. Somehow"
190 trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len, 191 trace_ocfs2_encode_fh_begin(dentry, dentry->d_name.len,
191 dentry->d_name.name, 192 dentry->d_name.name,
192 fh, len, connectable); 193 fh, len, connectable);
194#endif
193 195
194 if (connectable && (len < 6)) { 196 if (parent && (len < 6)) {
195 *max_len = 6; 197 *max_len = 6;
196 type = 255; 198 type = 255;
197 goto bail; 199 goto bail;
@@ -211,12 +213,7 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
211 fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff)); 213 fh[1] = cpu_to_le32((u32)(blkno & 0xffffffff));
212 fh[2] = cpu_to_le32(generation); 214 fh[2] = cpu_to_le32(generation);
213 215
214 if (connectable && !S_ISDIR(inode->i_mode)) { 216 if (parent) {
215 struct inode *parent;
216
217 spin_lock(&dentry->d_lock);
218
219 parent = dentry->d_parent->d_inode;
220 blkno = OCFS2_I(parent)->ip_blkno; 217 blkno = OCFS2_I(parent)->ip_blkno;
221 generation = parent->i_generation; 218 generation = parent->i_generation;
222 219
@@ -224,8 +221,6 @@ static int ocfs2_encode_fh(struct dentry *dentry, u32 *fh_in, int *max_len,
224 fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff)); 221 fh[4] = cpu_to_le32((u32)(blkno & 0xffffffff));
225 fh[5] = cpu_to_le32(generation); 222 fh[5] = cpu_to_le32(generation);
226 223
227 spin_unlock(&dentry->d_lock);
228
229 len = 6; 224 len = 6;
230 type = 2; 225 type = 2;
231 226
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 735514ca400f..d89e08a81eda 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -273,11 +273,13 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
273 inode->i_gid = le32_to_cpu(fe->i_gid); 273 inode->i_gid = le32_to_cpu(fe->i_gid);
274 274
275 /* Fast symlinks will have i_size but no allocated clusters. */ 275 /* Fast symlinks will have i_size but no allocated clusters. */
276 if (S_ISLNK(inode->i_mode) && !fe->i_clusters) 276 if (S_ISLNK(inode->i_mode) && !fe->i_clusters) {
277 inode->i_blocks = 0; 277 inode->i_blocks = 0;
278 else 278 inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
279 } else {
279 inode->i_blocks = ocfs2_inode_sector_count(inode); 280 inode->i_blocks = ocfs2_inode_sector_count(inode);
280 inode->i_mapping->a_ops = &ocfs2_aops; 281 inode->i_mapping->a_ops = &ocfs2_aops;
282 }
281 inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime); 283 inode->i_atime.tv_sec = le64_to_cpu(fe->i_atime);
282 inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec); 284 inode->i_atime.tv_nsec = le32_to_cpu(fe->i_atime_nsec);
283 inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime); 285 inode->i_mtime.tv_sec = le64_to_cpu(fe->i_mtime);
@@ -331,10 +333,7 @@ void ocfs2_populate_inode(struct inode *inode, struct ocfs2_dinode *fe,
331 OCFS2_I(inode)->ip_dir_lock_gen = 1; 333 OCFS2_I(inode)->ip_dir_lock_gen = 1;
332 break; 334 break;
333 case S_IFLNK: 335 case S_IFLNK:
334 if (ocfs2_inode_is_fast_symlink(inode)) 336 inode->i_op = &ocfs2_symlink_inode_operations;
335 inode->i_op = &ocfs2_fast_symlink_inode_operations;
336 else
337 inode->i_op = &ocfs2_symlink_inode_operations;
338 i_size_write(inode, le64_to_cpu(fe->i_size)); 337 i_size_write(inode, le64_to_cpu(fe->i_size));
339 break; 338 break;
340 default: 339 default:
diff --git a/fs/ocfs2/ioctl.c b/fs/ocfs2/ioctl.c
index a1a1bfd652c9..d96f7f81d8dd 100644
--- a/fs/ocfs2/ioctl.c
+++ b/fs/ocfs2/ioctl.c
@@ -864,7 +864,7 @@ int ocfs2_info_handle(struct inode *inode, struct ocfs2_info *info,
864 if (status) 864 if (status)
865 break; 865 break;
866 866
867 reqp = (struct ocfs2_info_request *)(unsigned long)req_addr; 867 reqp = (struct ocfs2_info_request __user *)(unsigned long)req_addr;
868 if (!reqp) { 868 if (!reqp) {
869 status = -EINVAL; 869 status = -EINVAL;
870 goto bail; 870 goto bail;
@@ -888,9 +888,11 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
888 struct ocfs2_space_resv sr; 888 struct ocfs2_space_resv sr;
889 struct ocfs2_new_group_input input; 889 struct ocfs2_new_group_input input;
890 struct reflink_arguments args; 890 struct reflink_arguments args;
891 const char *old_path, *new_path; 891 const char __user *old_path;
892 const char __user *new_path;
892 bool preserve; 893 bool preserve;
893 struct ocfs2_info info; 894 struct ocfs2_info info;
895 void __user *argp = (void __user *)arg;
894 896
895 switch (cmd) { 897 switch (cmd) {
896 case OCFS2_IOC_GETFLAGS: 898 case OCFS2_IOC_GETFLAGS:
@@ -937,17 +939,15 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
937 939
938 return ocfs2_group_add(inode, &input); 940 return ocfs2_group_add(inode, &input);
939 case OCFS2_IOC_REFLINK: 941 case OCFS2_IOC_REFLINK:
940 if (copy_from_user(&args, (struct reflink_arguments *)arg, 942 if (copy_from_user(&args, argp, sizeof(args)))
941 sizeof(args)))
942 return -EFAULT; 943 return -EFAULT;
943 old_path = (const char *)(unsigned long)args.old_path; 944 old_path = (const char __user *)(unsigned long)args.old_path;
944 new_path = (const char *)(unsigned long)args.new_path; 945 new_path = (const char __user *)(unsigned long)args.new_path;
945 preserve = (args.preserve != 0); 946 preserve = (args.preserve != 0);
946 947
947 return ocfs2_reflink_ioctl(inode, old_path, new_path, preserve); 948 return ocfs2_reflink_ioctl(inode, old_path, new_path, preserve);
948 case OCFS2_IOC_INFO: 949 case OCFS2_IOC_INFO:
949 if (copy_from_user(&info, (struct ocfs2_info __user *)arg, 950 if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
950 sizeof(struct ocfs2_info)))
951 return -EFAULT; 951 return -EFAULT;
952 952
953 return ocfs2_info_handle(inode, &info, 0); 953 return ocfs2_info_handle(inode, &info, 0);
@@ -960,22 +960,20 @@ long ocfs2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
960 if (!capable(CAP_SYS_ADMIN)) 960 if (!capable(CAP_SYS_ADMIN))
961 return -EPERM; 961 return -EPERM;
962 962
963 if (copy_from_user(&range, (struct fstrim_range *)arg, 963 if (copy_from_user(&range, argp, sizeof(range)))
964 sizeof(range)))
965 return -EFAULT; 964 return -EFAULT;
966 965
967 ret = ocfs2_trim_fs(sb, &range); 966 ret = ocfs2_trim_fs(sb, &range);
968 if (ret < 0) 967 if (ret < 0)
969 return ret; 968 return ret;
970 969
971 if (copy_to_user((struct fstrim_range *)arg, &range, 970 if (copy_to_user(argp, &range, sizeof(range)))
972 sizeof(range)))
973 return -EFAULT; 971 return -EFAULT;
974 972
975 return 0; 973 return 0;
976 } 974 }
977 case OCFS2_IOC_MOVE_EXT: 975 case OCFS2_IOC_MOVE_EXT:
978 return ocfs2_ioctl_move_extents(filp, (void __user *)arg); 976 return ocfs2_ioctl_move_extents(filp, argp);
979 default: 977 default:
980 return -ENOTTY; 978 return -ENOTTY;
981 } 979 }
@@ -988,6 +986,7 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
988 struct reflink_arguments args; 986 struct reflink_arguments args;
989 struct inode *inode = file->f_path.dentry->d_inode; 987 struct inode *inode = file->f_path.dentry->d_inode;
990 struct ocfs2_info info; 988 struct ocfs2_info info;
989 void __user *argp = (void __user *)arg;
991 990
992 switch (cmd) { 991 switch (cmd) {
993 case OCFS2_IOC32_GETFLAGS: 992 case OCFS2_IOC32_GETFLAGS:
@@ -1006,16 +1005,14 @@ long ocfs2_compat_ioctl(struct file *file, unsigned cmd, unsigned long arg)
1006 case FITRIM: 1005 case FITRIM:
1007 break; 1006 break;
1008 case OCFS2_IOC_REFLINK: 1007 case OCFS2_IOC_REFLINK:
1009 if (copy_from_user(&args, (struct reflink_arguments *)arg, 1008 if (copy_from_user(&args, argp, sizeof(args)))
1010 sizeof(args)))
1011 return -EFAULT; 1009 return -EFAULT;
1012 preserve = (args.preserve != 0); 1010 preserve = (args.preserve != 0);
1013 1011
1014 return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path), 1012 return ocfs2_reflink_ioctl(inode, compat_ptr(args.old_path),
1015 compat_ptr(args.new_path), preserve); 1013 compat_ptr(args.new_path), preserve);
1016 case OCFS2_IOC_INFO: 1014 case OCFS2_IOC_INFO:
1017 if (copy_from_user(&info, (struct ocfs2_info __user *)arg, 1015 if (copy_from_user(&info, argp, sizeof(struct ocfs2_info)))
1018 sizeof(struct ocfs2_info)))
1019 return -EFAULT; 1016 return -EFAULT;
1020 1017
1021 return ocfs2_info_handle(inode, &info, 1); 1018 return ocfs2_info_handle(inode, &info, 1);
diff --git a/fs/ocfs2/move_extents.c b/fs/ocfs2/move_extents.c
index b1e3fce72ea4..6083432f667e 100644
--- a/fs/ocfs2/move_extents.c
+++ b/fs/ocfs2/move_extents.c
@@ -1082,8 +1082,7 @@ int ocfs2_ioctl_move_extents(struct file *filp, void __user *argp)
1082 context->file = filp; 1082 context->file = filp;
1083 1083
1084 if (argp) { 1084 if (argp) {
1085 if (copy_from_user(&range, (struct ocfs2_move_extents *)argp, 1085 if (copy_from_user(&range, argp, sizeof(range))) {
1086 sizeof(range))) {
1087 status = -EFAULT; 1086 status = -EFAULT;
1088 goto out; 1087 goto out;
1089 } 1088 }
@@ -1138,8 +1137,7 @@ out:
1138 * length and new_offset even if failure happens somewhere. 1137 * length and new_offset even if failure happens somewhere.
1139 */ 1138 */
1140 if (argp) { 1139 if (argp) {
1141 if (copy_to_user((struct ocfs2_move_extents *)argp, &range, 1140 if (copy_to_user(argp, &range, sizeof(range)))
1142 sizeof(range)))
1143 status = -EFAULT; 1141 status = -EFAULT;
1144 } 1142 }
1145 1143
diff --git a/fs/ocfs2/namei.c b/fs/ocfs2/namei.c
index a9856e3eaaf0..9f39c640cddf 100644
--- a/fs/ocfs2/namei.c
+++ b/fs/ocfs2/namei.c
@@ -1724,15 +1724,16 @@ static int ocfs2_symlink(struct inode *dir,
1724 fe = (struct ocfs2_dinode *) new_fe_bh->b_data; 1724 fe = (struct ocfs2_dinode *) new_fe_bh->b_data;
1725 inode->i_rdev = 0; 1725 inode->i_rdev = 0;
1726 newsize = l - 1; 1726 newsize = l - 1;
1727 inode->i_op = &ocfs2_symlink_inode_operations;
1727 if (l > ocfs2_fast_symlink_chars(sb)) { 1728 if (l > ocfs2_fast_symlink_chars(sb)) {
1728 u32 offset = 0; 1729 u32 offset = 0;
1729 1730
1730 inode->i_op = &ocfs2_symlink_inode_operations;
1731 status = dquot_alloc_space_nodirty(inode, 1731 status = dquot_alloc_space_nodirty(inode,
1732 ocfs2_clusters_to_bytes(osb->sb, 1)); 1732 ocfs2_clusters_to_bytes(osb->sb, 1));
1733 if (status) 1733 if (status)
1734 goto bail; 1734 goto bail;
1735 did_quota = 1; 1735 did_quota = 1;
1736 inode->i_mapping->a_ops = &ocfs2_aops;
1736 status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0, 1737 status = ocfs2_add_inode_data(osb, inode, &offset, 1, 0,
1737 new_fe_bh, 1738 new_fe_bh,
1738 handle, data_ac, NULL, 1739 handle, data_ac, NULL,
@@ -1750,7 +1751,7 @@ static int ocfs2_symlink(struct inode *dir,
1750 i_size_write(inode, newsize); 1751 i_size_write(inode, newsize);
1751 inode->i_blocks = ocfs2_inode_sector_count(inode); 1752 inode->i_blocks = ocfs2_inode_sector_count(inode);
1752 } else { 1753 } else {
1753 inode->i_op = &ocfs2_fast_symlink_inode_operations; 1754 inode->i_mapping->a_ops = &ocfs2_fast_symlink_aops;
1754 memcpy((char *) fe->id2.i_symlink, symname, l); 1755 memcpy((char *) fe->id2.i_symlink, symname, l);
1755 i_size_write(inode, newsize); 1756 i_size_write(inode, newsize);
1756 inode->i_blocks = 0; 1757 inode->i_blocks = 0;
diff --git a/fs/ocfs2/symlink.c b/fs/ocfs2/symlink.c
index 5d22872e2bb3..f1fbb4b552ad 100644
--- a/fs/ocfs2/symlink.c
+++ b/fs/ocfs2/symlink.c
@@ -54,101 +54,40 @@
54#include "buffer_head_io.h" 54#include "buffer_head_io.h"
55 55
56 56
57static char *ocfs2_fast_symlink_getlink(struct inode *inode, 57static int ocfs2_fast_symlink_readpage(struct file *unused, struct page *page)
58 struct buffer_head **bh)
59{ 58{
60 int status; 59 struct inode *inode = page->mapping->host;
61 char *link = NULL; 60 struct buffer_head *bh;
61 int status = ocfs2_read_inode_block(inode, &bh);
62 struct ocfs2_dinode *fe; 62 struct ocfs2_dinode *fe;
63 const char *link;
64 void *kaddr;
65 size_t len;
63 66
64 status = ocfs2_read_inode_block(inode, bh);
65 if (status < 0) { 67 if (status < 0) {
66 mlog_errno(status); 68 mlog_errno(status);
67 link = ERR_PTR(status); 69 return status;
68 goto bail;
69 } 70 }
70 71
71 fe = (struct ocfs2_dinode *) (*bh)->b_data; 72 fe = (struct ocfs2_dinode *) bh->b_data;
72 link = (char *) fe->id2.i_symlink; 73 link = (char *) fe->id2.i_symlink;
73bail: 74 /* will be less than a page size */
74 75 len = strnlen(link, ocfs2_fast_symlink_chars(inode->i_sb));
75 return link; 76 kaddr = kmap_atomic(page);
76} 77 memcpy(kaddr, link, len + 1);
77 78 kunmap_atomic(kaddr);
78static int ocfs2_readlink(struct dentry *dentry, 79 SetPageUptodate(page);
79 char __user *buffer, 80 unlock_page(page);
80 int buflen)
81{
82 int ret;
83 char *link;
84 struct buffer_head *bh = NULL;
85 struct inode *inode = dentry->d_inode;
86
87 link = ocfs2_fast_symlink_getlink(inode, &bh);
88 if (IS_ERR(link)) {
89 ret = PTR_ERR(link);
90 goto out;
91 }
92
93 /*
94 * Without vfsmount we can't update atime now,
95 * but we will update atime here ultimately.
96 */
97 ret = vfs_readlink(dentry, buffer, buflen, link);
98
99 brelse(bh); 81 brelse(bh);
100out: 82 return 0;
101 if (ret < 0)
102 mlog_errno(ret);
103 return ret;
104} 83}
105 84
106static void *ocfs2_fast_follow_link(struct dentry *dentry, 85const struct address_space_operations ocfs2_fast_symlink_aops = {
107 struct nameidata *nd) 86 .readpage = ocfs2_fast_symlink_readpage,
108{ 87};
109 int status = 0;
110 int len;
111 char *target, *link = ERR_PTR(-ENOMEM);
112 struct inode *inode = dentry->d_inode;
113 struct buffer_head *bh = NULL;
114
115 BUG_ON(!ocfs2_inode_is_fast_symlink(inode));
116 target = ocfs2_fast_symlink_getlink(inode, &bh);
117 if (IS_ERR(target)) {
118 status = PTR_ERR(target);
119 mlog_errno(status);
120 goto bail;
121 }
122
123 /* Fast symlinks can't be large */
124 len = strnlen(target, ocfs2_fast_symlink_chars(inode->i_sb));
125 link = kzalloc(len + 1, GFP_NOFS);
126 if (!link) {
127 status = -ENOMEM;
128 mlog_errno(status);
129 goto bail;
130 }
131
132 memcpy(link, target, len);
133
134bail:
135 nd_set_link(nd, status ? ERR_PTR(status) : link);
136 brelse(bh);
137
138 if (status)
139 mlog_errno(status);
140 return NULL;
141}
142
143static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
144{
145 char *link = nd_get_link(nd);
146 if (!IS_ERR(link))
147 kfree(link);
148}
149 88
150const struct inode_operations ocfs2_symlink_inode_operations = { 89const struct inode_operations ocfs2_symlink_inode_operations = {
151 .readlink = page_readlink, 90 .readlink = generic_readlink,
152 .follow_link = page_follow_link_light, 91 .follow_link = page_follow_link_light,
153 .put_link = page_put_link, 92 .put_link = page_put_link,
154 .getattr = ocfs2_getattr, 93 .getattr = ocfs2_getattr,
@@ -159,15 +98,3 @@ const struct inode_operations ocfs2_symlink_inode_operations = {
159 .removexattr = generic_removexattr, 98 .removexattr = generic_removexattr,
160 .fiemap = ocfs2_fiemap, 99 .fiemap = ocfs2_fiemap,
161}; 100};
162const struct inode_operations ocfs2_fast_symlink_inode_operations = {
163 .readlink = ocfs2_readlink,
164 .follow_link = ocfs2_fast_follow_link,
165 .put_link = ocfs2_fast_put_link,
166 .getattr = ocfs2_getattr,
167 .setattr = ocfs2_setattr,
168 .setxattr = generic_setxattr,
169 .getxattr = generic_getxattr,
170 .listxattr = ocfs2_listxattr,
171 .removexattr = generic_removexattr,
172 .fiemap = ocfs2_fiemap,
173};
diff --git a/fs/ocfs2/symlink.h b/fs/ocfs2/symlink.h
index 65a6c9c6ad51..71ee4245e919 100644
--- a/fs/ocfs2/symlink.h
+++ b/fs/ocfs2/symlink.h
@@ -27,7 +27,7 @@
27#define OCFS2_SYMLINK_H 27#define OCFS2_SYMLINK_H
28 28
29extern const struct inode_operations ocfs2_symlink_inode_operations; 29extern const struct inode_operations ocfs2_symlink_inode_operations;
30extern const struct inode_operations ocfs2_fast_symlink_inode_operations; 30extern const struct address_space_operations ocfs2_fast_symlink_aops;
31 31
32/* 32/*
33 * Test whether an inode is a fast symlink. 33 * Test whether an inode is a fast symlink.
diff --git a/fs/open.c b/fs/open.c
index d54301219d04..d6c79a0dffc7 100644
--- a/fs/open.c
+++ b/fs/open.c
@@ -654,10 +654,23 @@ static inline int __get_file_write_access(struct inode *inode,
654 return error; 654 return error;
655} 655}
656 656
657static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt, 657int open_check_o_direct(struct file *f)
658 struct file *f, 658{
659 int (*open)(struct inode *, struct file *), 659 /* NB: we're sure to have correct a_ops only after f_op->open */
660 const struct cred *cred) 660 if (f->f_flags & O_DIRECT) {
661 if (!f->f_mapping->a_ops ||
662 ((!f->f_mapping->a_ops->direct_IO) &&
663 (!f->f_mapping->a_ops->get_xip_mem))) {
664 return -EINVAL;
665 }
666 }
667 return 0;
668}
669
670static struct file *do_dentry_open(struct dentry *dentry, struct vfsmount *mnt,
671 struct file *f,
672 int (*open)(struct inode *, struct file *),
673 const struct cred *cred)
661{ 674{
662 static const struct file_operations empty_fops = {}; 675 static const struct file_operations empty_fops = {};
663 struct inode *inode; 676 struct inode *inode;
@@ -713,16 +726,6 @@ static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
713 726
714 file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping); 727 file_ra_state_init(&f->f_ra, f->f_mapping->host->i_mapping);
715 728
716 /* NB: we're sure to have correct a_ops only after f_op->open */
717 if (f->f_flags & O_DIRECT) {
718 if (!f->f_mapping->a_ops ||
719 ((!f->f_mapping->a_ops->direct_IO) &&
720 (!f->f_mapping->a_ops->get_xip_mem))) {
721 fput(f);
722 f = ERR_PTR(-EINVAL);
723 }
724 }
725
726 return f; 729 return f;
727 730
728cleanup_all: 731cleanup_all:
@@ -744,12 +747,29 @@ cleanup_all:
744 f->f_path.dentry = NULL; 747 f->f_path.dentry = NULL;
745 f->f_path.mnt = NULL; 748 f->f_path.mnt = NULL;
746cleanup_file: 749cleanup_file:
747 put_filp(f);
748 dput(dentry); 750 dput(dentry);
749 mntput(mnt); 751 mntput(mnt);
750 return ERR_PTR(error); 752 return ERR_PTR(error);
751} 753}
752 754
755static struct file *__dentry_open(struct dentry *dentry, struct vfsmount *mnt,
756 struct file *f,
757 int (*open)(struct inode *, struct file *),
758 const struct cred *cred)
759{
760 struct file *res = do_dentry_open(dentry, mnt, f, open, cred);
761 if (!IS_ERR(res)) {
762 int error = open_check_o_direct(f);
763 if (error) {
764 fput(res);
765 res = ERR_PTR(error);
766 }
767 } else {
768 put_filp(f);
769 }
770 return res;
771}
772
753/** 773/**
754 * lookup_instantiate_filp - instantiates the open intent filp 774 * lookup_instantiate_filp - instantiates the open intent filp
755 * @nd: pointer to nameidata 775 * @nd: pointer to nameidata
@@ -804,13 +824,31 @@ struct file *nameidata_to_filp(struct nameidata *nd)
804 824
805 /* Pick up the filp from the open intent */ 825 /* Pick up the filp from the open intent */
806 filp = nd->intent.open.file; 826 filp = nd->intent.open.file;
807 nd->intent.open.file = NULL;
808 827
809 /* Has the filesystem initialised the file for us? */ 828 /* Has the filesystem initialised the file for us? */
810 if (filp->f_path.dentry == NULL) { 829 if (filp->f_path.dentry != NULL) {
830 nd->intent.open.file = NULL;
831 } else {
832 struct file *res;
833
811 path_get(&nd->path); 834 path_get(&nd->path);
812 filp = __dentry_open(nd->path.dentry, nd->path.mnt, filp, 835 res = do_dentry_open(nd->path.dentry, nd->path.mnt,
813 NULL, cred); 836 filp, NULL, cred);
837 if (!IS_ERR(res)) {
838 int error;
839
840 nd->intent.open.file = NULL;
841 BUG_ON(res != filp);
842
843 error = open_check_o_direct(filp);
844 if (error) {
845 fput(filp);
846 filp = ERR_PTR(error);
847 }
848 } else {
849 /* Allow nd->intent.open.file to be recycled */
850 filp = res;
851 }
814 } 852 }
815 return filp; 853 return filp;
816} 854}
diff --git a/fs/pipe.c b/fs/pipe.c
index fec5e4ad071a..49c1065256fd 100644
--- a/fs/pipe.c
+++ b/fs/pipe.c
@@ -654,8 +654,11 @@ out:
654 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM); 654 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLRDNORM);
655 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); 655 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
656 } 656 }
657 if (ret > 0) 657 if (ret > 0) {
658 file_update_time(filp); 658 int err = file_update_time(filp);
659 if (err)
660 ret = err;
661 }
659 return ret; 662 return ret;
660} 663}
661 664
@@ -693,7 +696,7 @@ static long pipe_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
693 696
694 return put_user(count, (int __user *)arg); 697 return put_user(count, (int __user *)arg);
695 default: 698 default:
696 return -EINVAL; 699 return -ENOIOCTLCMD;
697 } 700 }
698} 701}
699 702
diff --git a/fs/pnode.c b/fs/pnode.c
index ab5fa9e1a79a..bed378db0758 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -257,12 +257,12 @@ int propagate_mnt(struct mount *dest_mnt, struct dentry *dest_dentry,
257 prev_src_mnt = child; 257 prev_src_mnt = child;
258 } 258 }
259out: 259out:
260 br_write_lock(vfsmount_lock); 260 br_write_lock(&vfsmount_lock);
261 while (!list_empty(&tmp_list)) { 261 while (!list_empty(&tmp_list)) {
262 child = list_first_entry(&tmp_list, struct mount, mnt_hash); 262 child = list_first_entry(&tmp_list, struct mount, mnt_hash);
263 umount_tree(child, 0, &umount_list); 263 umount_tree(child, 0, &umount_list);
264 } 264 }
265 br_write_unlock(vfsmount_lock); 265 br_write_unlock(&vfsmount_lock);
266 release_mounts(&umount_list); 266 release_mounts(&umount_list);
267 return ret; 267 return ret;
268} 268}
diff --git a/fs/proc/array.c b/fs/proc/array.c
index dc4c5a7b9ece..c1c207c36cae 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -370,7 +370,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
370 struct pid *pid, struct task_struct *task, int whole) 370 struct pid *pid, struct task_struct *task, int whole)
371{ 371{
372 unsigned long vsize, eip, esp, wchan = ~0UL; 372 unsigned long vsize, eip, esp, wchan = ~0UL;
373 long priority, nice; 373 int priority, nice;
374 int tty_pgrp = -1, tty_nr = 0; 374 int tty_pgrp = -1, tty_nr = 0;
375 sigset_t sigign, sigcatch; 375 sigset_t sigign, sigcatch;
376 char state; 376 char state;
@@ -492,7 +492,7 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
492 seq_put_decimal_ull(m, ' ', 0); 492 seq_put_decimal_ull(m, ' ', 0);
493 seq_put_decimal_ull(m, ' ', start_time); 493 seq_put_decimal_ull(m, ' ', start_time);
494 seq_put_decimal_ull(m, ' ', vsize); 494 seq_put_decimal_ull(m, ' ', vsize);
495 seq_put_decimal_ll(m, ' ', mm ? get_mm_rss(mm) : 0); 495 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
496 seq_put_decimal_ull(m, ' ', rsslim); 496 seq_put_decimal_ull(m, ' ', rsslim);
497 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0); 497 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
498 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0); 498 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
@@ -517,9 +517,23 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
517 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task)); 517 seq_put_decimal_ull(m, ' ', delayacct_blkio_ticks(task));
518 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime)); 518 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
519 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime)); 519 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
520 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_data : 0); 520
521 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->end_data : 0); 521 if (mm && permitted) {
522 seq_put_decimal_ull(m, ' ', (mm && permitted) ? mm->start_brk : 0); 522 seq_put_decimal_ull(m, ' ', mm->start_data);
523 seq_put_decimal_ull(m, ' ', mm->end_data);
524 seq_put_decimal_ull(m, ' ', mm->start_brk);
525 seq_put_decimal_ull(m, ' ', mm->arg_start);
526 seq_put_decimal_ull(m, ' ', mm->arg_end);
527 seq_put_decimal_ull(m, ' ', mm->env_start);
528 seq_put_decimal_ull(m, ' ', mm->env_end);
529 } else
530 seq_printf(m, " 0 0 0 0 0 0 0");
531
532 if (permitted)
533 seq_put_decimal_ll(m, ' ', task->exit_code);
534 else
535 seq_put_decimal_ll(m, ' ', 0);
536
523 seq_putc(m, '\n'); 537 seq_putc(m, '\n');
524 if (mm) 538 if (mm)
525 mmput(mm); 539 mmput(mm);
@@ -565,3 +579,126 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
565 579
566 return 0; 580 return 0;
567} 581}
582
583#ifdef CONFIG_CHECKPOINT_RESTORE
584static struct pid *
585get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
586{
587 struct task_struct *start, *task;
588 struct pid *pid = NULL;
589
590 read_lock(&tasklist_lock);
591
592 start = pid_task(proc_pid(inode), PIDTYPE_PID);
593 if (!start)
594 goto out;
595
596 /*
597 * Lets try to continue searching first, this gives
598 * us significant speedup on children-rich processes.
599 */
600 if (pid_prev) {
601 task = pid_task(pid_prev, PIDTYPE_PID);
602 if (task && task->real_parent == start &&
603 !(list_empty(&task->sibling))) {
604 if (list_is_last(&task->sibling, &start->children))
605 goto out;
606 task = list_first_entry(&task->sibling,
607 struct task_struct, sibling);
608 pid = get_pid(task_pid(task));
609 goto out;
610 }
611 }
612
613 /*
614 * Slow search case.
615 *
616 * We might miss some children here if children
617 * are exited while we were not holding the lock,
618 * but it was never promised to be accurate that
619 * much.
620 *
621 * "Just suppose that the parent sleeps, but N children
622 * exit after we printed their tids. Now the slow paths
623 * skips N extra children, we miss N tasks." (c)
624 *
625 * So one need to stop or freeze the leader and all
626 * its children to get a precise result.
627 */
628 list_for_each_entry(task, &start->children, sibling) {
629 if (pos-- == 0) {
630 pid = get_pid(task_pid(task));
631 break;
632 }
633 }
634
635out:
636 read_unlock(&tasklist_lock);
637 return pid;
638}
639
640static int children_seq_show(struct seq_file *seq, void *v)
641{
642 struct inode *inode = seq->private;
643 pid_t pid;
644
645 pid = pid_nr_ns(v, inode->i_sb->s_fs_info);
646 return seq_printf(seq, "%d ", pid);
647}
648
649static void *children_seq_start(struct seq_file *seq, loff_t *pos)
650{
651 return get_children_pid(seq->private, NULL, *pos);
652}
653
654static void *children_seq_next(struct seq_file *seq, void *v, loff_t *pos)
655{
656 struct pid *pid;
657
658 pid = get_children_pid(seq->private, v, *pos + 1);
659 put_pid(v);
660
661 ++*pos;
662 return pid;
663}
664
665static void children_seq_stop(struct seq_file *seq, void *v)
666{
667 put_pid(v);
668}
669
670static const struct seq_operations children_seq_ops = {
671 .start = children_seq_start,
672 .next = children_seq_next,
673 .stop = children_seq_stop,
674 .show = children_seq_show,
675};
676
677static int children_seq_open(struct inode *inode, struct file *file)
678{
679 struct seq_file *m;
680 int ret;
681
682 ret = seq_open(file, &children_seq_ops);
683 if (ret)
684 return ret;
685
686 m = file->private_data;
687 m->private = inode;
688
689 return ret;
690}
691
692int children_seq_release(struct inode *inode, struct file *file)
693{
694 seq_release(inode, file);
695 return 0;
696}
697
698const struct file_operations proc_tid_children_operations = {
699 .open = children_seq_open,
700 .read = seq_read,
701 .llseek = seq_lseek,
702 .release = children_seq_release,
703};
704#endif /* CONFIG_CHECKPOINT_RESTORE */
diff --git a/fs/proc/base.c b/fs/proc/base.c
index d7d711876b6a..437195f204e1 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -199,11 +199,6 @@ static int proc_root_link(struct dentry *dentry, struct path *path)
199 return result; 199 return result;
200} 200}
201 201
202struct mm_struct *mm_for_maps(struct task_struct *task)
203{
204 return mm_access(task, PTRACE_MODE_READ);
205}
206
207static int proc_pid_cmdline(struct task_struct *task, char * buffer) 202static int proc_pid_cmdline(struct task_struct *task, char * buffer)
208{ 203{
209 int res = 0; 204 int res = 0;
@@ -243,7 +238,7 @@ out:
243 238
244static int proc_pid_auxv(struct task_struct *task, char *buffer) 239static int proc_pid_auxv(struct task_struct *task, char *buffer)
245{ 240{
246 struct mm_struct *mm = mm_for_maps(task); 241 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
247 int res = PTR_ERR(mm); 242 int res = PTR_ERR(mm);
248 if (mm && !IS_ERR(mm)) { 243 if (mm && !IS_ERR(mm)) {
249 unsigned int nwords = 0; 244 unsigned int nwords = 0;
@@ -679,7 +674,7 @@ static const struct file_operations proc_single_file_operations = {
679 .release = single_release, 674 .release = single_release,
680}; 675};
681 676
682static int mem_open(struct inode* inode, struct file* file) 677static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
683{ 678{
684 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode); 679 struct task_struct *task = get_proc_task(file->f_path.dentry->d_inode);
685 struct mm_struct *mm; 680 struct mm_struct *mm;
@@ -687,7 +682,7 @@ static int mem_open(struct inode* inode, struct file* file)
687 if (!task) 682 if (!task)
688 return -ESRCH; 683 return -ESRCH;
689 684
690 mm = mm_access(task, PTRACE_MODE_ATTACH); 685 mm = mm_access(task, mode);
691 put_task_struct(task); 686 put_task_struct(task);
692 687
693 if (IS_ERR(mm)) 688 if (IS_ERR(mm))
@@ -707,6 +702,11 @@ static int mem_open(struct inode* inode, struct file* file)
707 return 0; 702 return 0;
708} 703}
709 704
705static int mem_open(struct inode *inode, struct file *file)
706{
707 return __mem_open(inode, file, PTRACE_MODE_ATTACH);
708}
709
710static ssize_t mem_rw(struct file *file, char __user *buf, 710static ssize_t mem_rw(struct file *file, char __user *buf,
711 size_t count, loff_t *ppos, int write) 711 size_t count, loff_t *ppos, int write)
712{ 712{
@@ -803,30 +803,29 @@ static const struct file_operations proc_mem_operations = {
803 .release = mem_release, 803 .release = mem_release,
804}; 804};
805 805
806static int environ_open(struct inode *inode, struct file *file)
807{
808 return __mem_open(inode, file, PTRACE_MODE_READ);
809}
810
806static ssize_t environ_read(struct file *file, char __user *buf, 811static ssize_t environ_read(struct file *file, char __user *buf,
807 size_t count, loff_t *ppos) 812 size_t count, loff_t *ppos)
808{ 813{
809 struct task_struct *task = get_proc_task(file->f_dentry->d_inode);
810 char *page; 814 char *page;
811 unsigned long src = *ppos; 815 unsigned long src = *ppos;
812 int ret = -ESRCH; 816 int ret = 0;
813 struct mm_struct *mm; 817 struct mm_struct *mm = file->private_data;
814 818
815 if (!task) 819 if (!mm)
816 goto out_no_task; 820 return 0;
817 821
818 ret = -ENOMEM;
819 page = (char *)__get_free_page(GFP_TEMPORARY); 822 page = (char *)__get_free_page(GFP_TEMPORARY);
820 if (!page) 823 if (!page)
821 goto out; 824 return -ENOMEM;
822
823
824 mm = mm_for_maps(task);
825 ret = PTR_ERR(mm);
826 if (!mm || IS_ERR(mm))
827 goto out_free;
828 825
829 ret = 0; 826 ret = 0;
827 if (!atomic_inc_not_zero(&mm->mm_users))
828 goto free;
830 while (count > 0) { 829 while (count > 0) {
831 int this_len, retval, max_len; 830 int this_len, retval, max_len;
832 831
@@ -838,7 +837,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
838 max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count; 837 max_len = (count > PAGE_SIZE) ? PAGE_SIZE : count;
839 this_len = (this_len > max_len) ? max_len : this_len; 838 this_len = (this_len > max_len) ? max_len : this_len;
840 839
841 retval = access_process_vm(task, (mm->env_start + src), 840 retval = access_remote_vm(mm, (mm->env_start + src),
842 page, this_len, 0); 841 page, this_len, 0);
843 842
844 if (retval <= 0) { 843 if (retval <= 0) {
@@ -857,19 +856,18 @@ static ssize_t environ_read(struct file *file, char __user *buf,
857 count -= retval; 856 count -= retval;
858 } 857 }
859 *ppos = src; 858 *ppos = src;
860
861 mmput(mm); 859 mmput(mm);
862out_free: 860
861free:
863 free_page((unsigned long) page); 862 free_page((unsigned long) page);
864out:
865 put_task_struct(task);
866out_no_task:
867 return ret; 863 return ret;
868} 864}
869 865
870static const struct file_operations proc_environ_operations = { 866static const struct file_operations proc_environ_operations = {
867 .open = environ_open,
871 .read = environ_read, 868 .read = environ_read,
872 .llseek = generic_file_llseek, 869 .llseek = generic_file_llseek,
870 .release = mem_release,
873}; 871};
874 872
875static ssize_t oom_adjust_read(struct file *file, char __user *buf, 873static ssize_t oom_adjust_read(struct file *file, char __user *buf,
@@ -1805,7 +1803,7 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
1805 rcu_read_lock(); 1803 rcu_read_lock();
1806 file = fcheck_files(files, fd); 1804 file = fcheck_files(files, fd);
1807 if (file) { 1805 if (file) {
1808 unsigned i_mode, f_mode = file->f_mode; 1806 unsigned f_mode = file->f_mode;
1809 1807
1810 rcu_read_unlock(); 1808 rcu_read_unlock();
1811 put_files_struct(files); 1809 put_files_struct(files);
@@ -1821,12 +1819,14 @@ static int tid_fd_revalidate(struct dentry *dentry, struct nameidata *nd)
1821 inode->i_gid = GLOBAL_ROOT_GID; 1819 inode->i_gid = GLOBAL_ROOT_GID;
1822 } 1820 }
1823 1821
1824 i_mode = S_IFLNK; 1822 if (S_ISLNK(inode->i_mode)) {
1825 if (f_mode & FMODE_READ) 1823 unsigned i_mode = S_IFLNK;
1826 i_mode |= S_IRUSR | S_IXUSR; 1824 if (f_mode & FMODE_READ)
1827 if (f_mode & FMODE_WRITE) 1825 i_mode |= S_IRUSR | S_IXUSR;
1828 i_mode |= S_IWUSR | S_IXUSR; 1826 if (f_mode & FMODE_WRITE)
1829 inode->i_mode = i_mode; 1827 i_mode |= S_IWUSR | S_IXUSR;
1828 inode->i_mode = i_mode;
1829 }
1830 1830
1831 security_task_to_inode(task, inode); 1831 security_task_to_inode(task, inode);
1832 put_task_struct(task); 1832 put_task_struct(task);
@@ -1850,7 +1850,7 @@ static const struct dentry_operations tid_fd_dentry_operations =
1850static struct dentry *proc_fd_instantiate(struct inode *dir, 1850static struct dentry *proc_fd_instantiate(struct inode *dir,
1851 struct dentry *dentry, struct task_struct *task, const void *ptr) 1851 struct dentry *dentry, struct task_struct *task, const void *ptr)
1852{ 1852{
1853 unsigned fd = *(const unsigned *)ptr; 1853 unsigned fd = (unsigned long)ptr;
1854 struct inode *inode; 1854 struct inode *inode;
1855 struct proc_inode *ei; 1855 struct proc_inode *ei;
1856 struct dentry *error = ERR_PTR(-ENOENT); 1856 struct dentry *error = ERR_PTR(-ENOENT);
@@ -1861,6 +1861,7 @@ static struct dentry *proc_fd_instantiate(struct inode *dir,
1861 ei = PROC_I(inode); 1861 ei = PROC_I(inode);
1862 ei->fd = fd; 1862 ei->fd = fd;
1863 1863
1864 inode->i_mode = S_IFLNK;
1864 inode->i_op = &proc_pid_link_inode_operations; 1865 inode->i_op = &proc_pid_link_inode_operations;
1865 inode->i_size = 64; 1866 inode->i_size = 64;
1866 ei->op.proc_get_link = proc_fd_link; 1867 ei->op.proc_get_link = proc_fd_link;
@@ -1887,7 +1888,7 @@ static struct dentry *proc_lookupfd_common(struct inode *dir,
1887 if (fd == ~0U) 1888 if (fd == ~0U)
1888 goto out; 1889 goto out;
1889 1890
1890 result = instantiate(dir, dentry, task, &fd); 1891 result = instantiate(dir, dentry, task, (void *)(unsigned long)fd);
1891out: 1892out:
1892 put_task_struct(task); 1893 put_task_struct(task);
1893out_no_task: 1894out_no_task:
@@ -1930,21 +1931,22 @@ static int proc_readfd_common(struct file * filp, void * dirent,
1930 fd++, filp->f_pos++) { 1931 fd++, filp->f_pos++) {
1931 char name[PROC_NUMBUF]; 1932 char name[PROC_NUMBUF];
1932 int len; 1933 int len;
1934 int rv;
1933 1935
1934 if (!fcheck_files(files, fd)) 1936 if (!fcheck_files(files, fd))
1935 continue; 1937 continue;
1936 rcu_read_unlock(); 1938 rcu_read_unlock();
1937 1939
1938 len = snprintf(name, sizeof(name), "%d", fd); 1940 len = snprintf(name, sizeof(name), "%d", fd);
1939 if (proc_fill_cache(filp, dirent, filldir, 1941 rv = proc_fill_cache(filp, dirent, filldir,
1940 name, len, instantiate, 1942 name, len, instantiate, p,
1941 p, &fd) < 0) { 1943 (void *)(unsigned long)fd);
1942 rcu_read_lock(); 1944 if (rv < 0)
1943 break; 1945 goto out_fd_loop;
1944 }
1945 rcu_read_lock(); 1946 rcu_read_lock();
1946 } 1947 }
1947 rcu_read_unlock(); 1948 rcu_read_unlock();
1949out_fd_loop:
1948 put_files_struct(files); 1950 put_files_struct(files);
1949 } 1951 }
1950out: 1952out:
@@ -2024,11 +2026,8 @@ static int map_files_d_revalidate(struct dentry *dentry, struct nameidata *nd)
2024 if (!task) 2026 if (!task)
2025 goto out_notask; 2027 goto out_notask;
2026 2028
2027 if (!ptrace_may_access(task, PTRACE_MODE_READ)) 2029 mm = mm_access(task, PTRACE_MODE_READ);
2028 goto out; 2030 if (IS_ERR_OR_NULL(mm))
2029
2030 mm = get_task_mm(task);
2031 if (!mm)
2032 goto out; 2031 goto out;
2033 2032
2034 if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) { 2033 if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
@@ -2357,7 +2356,7 @@ static const struct inode_operations proc_fd_inode_operations = {
2357static struct dentry *proc_fdinfo_instantiate(struct inode *dir, 2356static struct dentry *proc_fdinfo_instantiate(struct inode *dir,
2358 struct dentry *dentry, struct task_struct *task, const void *ptr) 2357 struct dentry *dentry, struct task_struct *task, const void *ptr)
2359{ 2358{
2360 unsigned fd = *(unsigned *)ptr; 2359 unsigned fd = (unsigned long)ptr;
2361 struct inode *inode; 2360 struct inode *inode;
2362 struct proc_inode *ei; 2361 struct proc_inode *ei;
2363 struct dentry *error = ERR_PTR(-ENOENT); 2362 struct dentry *error = ERR_PTR(-ENOENT);
@@ -3404,6 +3403,9 @@ static const struct pid_entry tid_base_stuff[] = {
3404 ONE("stat", S_IRUGO, proc_tid_stat), 3403 ONE("stat", S_IRUGO, proc_tid_stat),
3405 ONE("statm", S_IRUGO, proc_pid_statm), 3404 ONE("statm", S_IRUGO, proc_pid_statm),
3406 REG("maps", S_IRUGO, proc_tid_maps_operations), 3405 REG("maps", S_IRUGO, proc_tid_maps_operations),
3406#ifdef CONFIG_CHECKPOINT_RESTORE
3407 REG("children", S_IRUGO, proc_tid_children_operations),
3408#endif
3407#ifdef CONFIG_NUMA 3409#ifdef CONFIG_NUMA
3408 REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations), 3410 REG("numa_maps", S_IRUGO, proc_tid_numa_maps_operations),
3409#endif 3411#endif
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 5f79bb8b4c60..eca4aca5b6e2 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -31,8 +31,6 @@ struct vmalloc_info {
31 unsigned long largest_chunk; 31 unsigned long largest_chunk;
32}; 32};
33 33
34extern struct mm_struct *mm_for_maps(struct task_struct *);
35
36#ifdef CONFIG_MMU 34#ifdef CONFIG_MMU
37#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START) 35#define VMALLOC_TOTAL (VMALLOC_END - VMALLOC_START)
38extern void get_vmalloc_info(struct vmalloc_info *vmi); 36extern void get_vmalloc_info(struct vmalloc_info *vmi);
@@ -56,6 +54,7 @@ extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
56 struct pid *pid, struct task_struct *task); 54 struct pid *pid, struct task_struct *task);
57extern loff_t mem_lseek(struct file *file, loff_t offset, int orig); 55extern loff_t mem_lseek(struct file *file, loff_t offset, int orig);
58 56
57extern const struct file_operations proc_tid_children_operations;
59extern const struct file_operations proc_pid_maps_operations; 58extern const struct file_operations proc_pid_maps_operations;
60extern const struct file_operations proc_tid_maps_operations; 59extern const struct file_operations proc_tid_maps_operations;
61extern const struct file_operations proc_pid_numa_maps_operations; 60extern const struct file_operations proc_pid_numa_maps_operations;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 7faaf2acc570..4540b8f76f16 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -125,7 +125,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
125 if (!priv->task) 125 if (!priv->task)
126 return ERR_PTR(-ESRCH); 126 return ERR_PTR(-ESRCH);
127 127
128 mm = mm_for_maps(priv->task); 128 mm = mm_access(priv->task, PTRACE_MODE_READ);
129 if (!mm || IS_ERR(mm)) 129 if (!mm || IS_ERR(mm))
130 return mm; 130 return mm;
131 down_read(&mm->mmap_sem); 131 down_read(&mm->mmap_sem);
@@ -393,6 +393,7 @@ struct mem_size_stats {
393 unsigned long anonymous; 393 unsigned long anonymous;
394 unsigned long anonymous_thp; 394 unsigned long anonymous_thp;
395 unsigned long swap; 395 unsigned long swap;
396 unsigned long nonlinear;
396 u64 pss; 397 u64 pss;
397}; 398};
398 399
@@ -402,24 +403,33 @@ static void smaps_pte_entry(pte_t ptent, unsigned long addr,
402{ 403{
403 struct mem_size_stats *mss = walk->private; 404 struct mem_size_stats *mss = walk->private;
404 struct vm_area_struct *vma = mss->vma; 405 struct vm_area_struct *vma = mss->vma;
405 struct page *page; 406 pgoff_t pgoff = linear_page_index(vma, addr);
407 struct page *page = NULL;
406 int mapcount; 408 int mapcount;
407 409
408 if (is_swap_pte(ptent)) { 410 if (pte_present(ptent)) {
409 mss->swap += ptent_size; 411 page = vm_normal_page(vma, addr, ptent);
410 return; 412 } else if (is_swap_pte(ptent)) {
413 swp_entry_t swpent = pte_to_swp_entry(ptent);
414
415 if (!non_swap_entry(swpent))
416 mss->swap += ptent_size;
417 else if (is_migration_entry(swpent))
418 page = migration_entry_to_page(swpent);
419 } else if (pte_file(ptent)) {
420 if (pte_to_pgoff(ptent) != pgoff)
421 mss->nonlinear += ptent_size;
411 } 422 }
412 423
413 if (!pte_present(ptent))
414 return;
415
416 page = vm_normal_page(vma, addr, ptent);
417 if (!page) 424 if (!page)
418 return; 425 return;
419 426
420 if (PageAnon(page)) 427 if (PageAnon(page))
421 mss->anonymous += ptent_size; 428 mss->anonymous += ptent_size;
422 429
430 if (page->index != pgoff)
431 mss->nonlinear += ptent_size;
432
423 mss->resident += ptent_size; 433 mss->resident += ptent_size;
424 /* Accumulate the size in pages that have been accessed. */ 434 /* Accumulate the size in pages that have been accessed. */
425 if (pte_young(ptent) || PageReferenced(page)) 435 if (pte_young(ptent) || PageReferenced(page))
@@ -521,6 +531,10 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
521 (vma->vm_flags & VM_LOCKED) ? 531 (vma->vm_flags & VM_LOCKED) ?
522 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0); 532 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)) : 0);
523 533
534 if (vma->vm_flags & VM_NONLINEAR)
535 seq_printf(m, "Nonlinear: %8lu kB\n",
536 mss.nonlinear >> 10);
537
524 if (m->count < m->size) /* vma is copied successfully */ 538 if (m->count < m->size) /* vma is copied successfully */
525 m->version = (vma != get_gate_vma(task->mm)) 539 m->version = (vma != get_gate_vma(task->mm))
526 ? vma->vm_start : 0; 540 ? vma->vm_start : 0;
@@ -700,6 +714,7 @@ struct pagemapread {
700 714
701#define PM_PRESENT PM_STATUS(4LL) 715#define PM_PRESENT PM_STATUS(4LL)
702#define PM_SWAP PM_STATUS(2LL) 716#define PM_SWAP PM_STATUS(2LL)
717#define PM_FILE PM_STATUS(1LL)
703#define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT) 718#define PM_NOT_PRESENT PM_PSHIFT(PAGE_SHIFT)
704#define PM_END_OF_BUFFER 1 719#define PM_END_OF_BUFFER 1
705 720
@@ -733,22 +748,33 @@ static int pagemap_pte_hole(unsigned long start, unsigned long end,
733 return err; 748 return err;
734} 749}
735 750
736static u64 swap_pte_to_pagemap_entry(pte_t pte) 751static void pte_to_pagemap_entry(pagemap_entry_t *pme,
752 struct vm_area_struct *vma, unsigned long addr, pte_t pte)
737{ 753{
738 swp_entry_t e = pte_to_swp_entry(pte); 754 u64 frame, flags;
739 return swp_type(e) | (swp_offset(e) << MAX_SWAPFILES_SHIFT); 755 struct page *page = NULL;
740} 756
741 757 if (pte_present(pte)) {
742static void pte_to_pagemap_entry(pagemap_entry_t *pme, pte_t pte) 758 frame = pte_pfn(pte);
743{ 759 flags = PM_PRESENT;
744 if (is_swap_pte(pte)) 760 page = vm_normal_page(vma, addr, pte);
745 *pme = make_pme(PM_PFRAME(swap_pte_to_pagemap_entry(pte)) 761 } else if (is_swap_pte(pte)) {
746 | PM_PSHIFT(PAGE_SHIFT) | PM_SWAP); 762 swp_entry_t entry = pte_to_swp_entry(pte);
747 else if (pte_present(pte)) 763
748 *pme = make_pme(PM_PFRAME(pte_pfn(pte)) 764 frame = swp_type(entry) |
749 | PM_PSHIFT(PAGE_SHIFT) | PM_PRESENT); 765 (swp_offset(entry) << MAX_SWAPFILES_SHIFT);
750 else 766 flags = PM_SWAP;
767 if (is_migration_entry(entry))
768 page = migration_entry_to_page(entry);
769 } else {
751 *pme = make_pme(PM_NOT_PRESENT); 770 *pme = make_pme(PM_NOT_PRESENT);
771 return;
772 }
773
774 if (page && !PageAnon(page))
775 flags |= PM_FILE;
776
777 *pme = make_pme(PM_PFRAME(frame) | PM_PSHIFT(PAGE_SHIFT) | flags);
752} 778}
753 779
754#ifdef CONFIG_TRANSPARENT_HUGEPAGE 780#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -815,7 +841,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
815 if (vma && (vma->vm_start <= addr) && 841 if (vma && (vma->vm_start <= addr) &&
816 !is_vm_hugetlb_page(vma)) { 842 !is_vm_hugetlb_page(vma)) {
817 pte = pte_offset_map(pmd, addr); 843 pte = pte_offset_map(pmd, addr);
818 pte_to_pagemap_entry(&pme, *pte); 844 pte_to_pagemap_entry(&pme, vma, addr, *pte);
819 /* unmap before userspace copy */ 845 /* unmap before userspace copy */
820 pte_unmap(pte); 846 pte_unmap(pte);
821 } 847 }
@@ -869,11 +895,11 @@ static int pagemap_hugetlb_range(pte_t *pte, unsigned long hmask,
869 * For each page in the address space, this file contains one 64-bit entry 895 * For each page in the address space, this file contains one 64-bit entry
870 * consisting of the following: 896 * consisting of the following:
871 * 897 *
872 * Bits 0-55 page frame number (PFN) if present 898 * Bits 0-54 page frame number (PFN) if present
873 * Bits 0-4 swap type if swapped 899 * Bits 0-4 swap type if swapped
874 * Bits 5-55 swap offset if swapped 900 * Bits 5-54 swap offset if swapped
875 * Bits 55-60 page shift (page size = 1<<page shift) 901 * Bits 55-60 page shift (page size = 1<<page shift)
876 * Bit 61 reserved for future use 902 * Bit 61 page is file-page or shared-anon
877 * Bit 62 page swapped 903 * Bit 62 page swapped
878 * Bit 63 page present 904 * Bit 63 page present
879 * 905 *
@@ -919,7 +945,7 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
919 if (!pm.buffer) 945 if (!pm.buffer)
920 goto out_task; 946 goto out_task;
921 947
922 mm = mm_for_maps(task); 948 mm = mm_access(task, PTRACE_MODE_READ);
923 ret = PTR_ERR(mm); 949 ret = PTR_ERR(mm);
924 if (!mm || IS_ERR(mm)) 950 if (!mm || IS_ERR(mm))
925 goto out_free; 951 goto out_free;
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 74fe164d1b23..1ccfa537f5f5 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -223,7 +223,7 @@ static void *m_start(struct seq_file *m, loff_t *pos)
223 if (!priv->task) 223 if (!priv->task)
224 return ERR_PTR(-ESRCH); 224 return ERR_PTR(-ESRCH);
225 225
226 mm = mm_for_maps(priv->task); 226 mm = mm_access(priv->task, PTRACE_MODE_READ);
227 if (!mm || IS_ERR(mm)) { 227 if (!mm || IS_ERR(mm)) {
228 put_task_struct(priv->task); 228 put_task_struct(priv->task);
229 priv->task = NULL; 229 priv->task = NULL;
diff --git a/fs/proc_namespace.c b/fs/proc_namespace.c
index 12412852d88a..5e289a7cbad1 100644
--- a/fs/proc_namespace.c
+++ b/fs/proc_namespace.c
@@ -23,12 +23,12 @@ static unsigned mounts_poll(struct file *file, poll_table *wait)
23 23
24 poll_wait(file, &p->ns->poll, wait); 24 poll_wait(file, &p->ns->poll, wait);
25 25
26 br_read_lock(vfsmount_lock); 26 br_read_lock(&vfsmount_lock);
27 if (p->m.poll_event != ns->event) { 27 if (p->m.poll_event != ns->event) {
28 p->m.poll_event = ns->event; 28 p->m.poll_event = ns->event;
29 res |= POLLERR | POLLPRI; 29 res |= POLLERR | POLLPRI;
30 } 30 }
31 br_read_unlock(vfsmount_lock); 31 br_read_unlock(&vfsmount_lock);
32 32
33 return res; 33 return res;
34} 34}
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index aeb19e68e086..11a2aa2a56c4 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -258,7 +258,7 @@ fail:
258 return rc; 258 return rc;
259} 259}
260 260
261int pstore_fill_super(struct super_block *sb, void *data, int silent) 261static int pstore_fill_super(struct super_block *sb, void *data, int silent)
262{ 262{
263 struct inode *inode; 263 struct inode *inode;
264 264
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index 82c585f715e3..03ce7a9b81cc 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -94,20 +94,15 @@ static const char *get_reason_str(enum kmsg_dump_reason reason)
94 * as we can from the end of the buffer. 94 * as we can from the end of the buffer.
95 */ 95 */
96static void pstore_dump(struct kmsg_dumper *dumper, 96static void pstore_dump(struct kmsg_dumper *dumper,
97 enum kmsg_dump_reason reason, 97 enum kmsg_dump_reason reason)
98 const char *s1, unsigned long l1,
99 const char *s2, unsigned long l2)
100{ 98{
101 unsigned long s1_start, s2_start; 99 unsigned long total = 0;
102 unsigned long l1_cpy, l2_cpy;
103 unsigned long size, total = 0;
104 char *dst;
105 const char *why; 100 const char *why;
106 u64 id; 101 u64 id;
107 int hsize, ret;
108 unsigned int part = 1; 102 unsigned int part = 1;
109 unsigned long flags = 0; 103 unsigned long flags = 0;
110 int is_locked = 0; 104 int is_locked = 0;
105 int ret;
111 106
112 why = get_reason_str(reason); 107 why = get_reason_str(reason);
113 108
@@ -119,30 +114,25 @@ static void pstore_dump(struct kmsg_dumper *dumper,
119 spin_lock_irqsave(&psinfo->buf_lock, flags); 114 spin_lock_irqsave(&psinfo->buf_lock, flags);
120 oopscount++; 115 oopscount++;
121 while (total < kmsg_bytes) { 116 while (total < kmsg_bytes) {
117 char *dst;
118 unsigned long size;
119 int hsize;
120 size_t len;
121
122 dst = psinfo->buf; 122 dst = psinfo->buf;
123 hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part); 123 hsize = sprintf(dst, "%s#%d Part%d\n", why, oopscount, part);
124 size = psinfo->bufsize - hsize; 124 size = psinfo->bufsize - hsize;
125 dst += hsize; 125 dst += hsize;
126 126
127 l2_cpy = min(l2, size); 127 if (!kmsg_dump_get_buffer(dumper, true, dst, size, &len))
128 l1_cpy = min(l1, size - l2_cpy);
129
130 if (l1_cpy + l2_cpy == 0)
131 break; 128 break;
132 129
133 s2_start = l2 - l2_cpy;
134 s1_start = l1 - l1_cpy;
135
136 memcpy(dst, s1 + s1_start, l1_cpy);
137 memcpy(dst + l1_cpy, s2 + s2_start, l2_cpy);
138
139 ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part, 130 ret = psinfo->write(PSTORE_TYPE_DMESG, reason, &id, part,
140 hsize + l1_cpy + l2_cpy, psinfo); 131 hsize + len, psinfo);
141 if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted()) 132 if (ret == 0 && reason == KMSG_DUMP_OOPS && pstore_is_mounted())
142 pstore_new_entry = 1; 133 pstore_new_entry = 1;
143 l1 -= l1_cpy; 134
144 l2 -= l2_cpy; 135 total += hsize + len;
145 total += l1_cpy + l2_cpy;
146 part++; 136 part++;
147 } 137 }
148 if (in_nmi()) { 138 if (in_nmi()) {
diff --git a/fs/pstore/ram.c b/fs/pstore/ram.c
index 9123cce28c1e..453030f9c5bc 100644
--- a/fs/pstore/ram.c
+++ b/fs/pstore/ram.c
@@ -106,6 +106,8 @@ static ssize_t ramoops_pstore_read(u64 *id, enum pstore_type_id *type,
106 time->tv_sec = 0; 106 time->tv_sec = 0;
107 time->tv_nsec = 0; 107 time->tv_nsec = 0;
108 108
109 /* Update old/shadowed buffer. */
110 persistent_ram_save_old(prz);
109 size = persistent_ram_old_size(prz); 111 size = persistent_ram_old_size(prz);
110 *buf = kmalloc(size, GFP_KERNEL); 112 *buf = kmalloc(size, GFP_KERNEL);
111 if (*buf == NULL) 113 if (*buf == NULL)
@@ -184,6 +186,7 @@ static int ramoops_pstore_erase(enum pstore_type_id type, u64 id,
184 return -EINVAL; 186 return -EINVAL;
185 187
186 persistent_ram_free_old(cxt->przs[id]); 188 persistent_ram_free_old(cxt->przs[id]);
189 persistent_ram_zap(cxt->przs[id]);
187 190
188 return 0; 191 return 0;
189} 192}
diff --git a/fs/pstore/ram_core.c b/fs/pstore/ram_core.c
index 31f8d184f3a0..c5fbdbbf81ac 100644
--- a/fs/pstore/ram_core.c
+++ b/fs/pstore/ram_core.c
@@ -250,23 +250,24 @@ static void notrace persistent_ram_update(struct persistent_ram_zone *prz,
250 persistent_ram_update_ecc(prz, start, count); 250 persistent_ram_update_ecc(prz, start, count);
251} 251}
252 252
253static void __init 253void persistent_ram_save_old(struct persistent_ram_zone *prz)
254persistent_ram_save_old(struct persistent_ram_zone *prz)
255{ 254{
256 struct persistent_ram_buffer *buffer = prz->buffer; 255 struct persistent_ram_buffer *buffer = prz->buffer;
257 size_t size = buffer_size(prz); 256 size_t size = buffer_size(prz);
258 size_t start = buffer_start(prz); 257 size_t start = buffer_start(prz);
259 char *dest;
260 258
261 persistent_ram_ecc_old(prz); 259 if (!size)
260 return;
262 261
263 dest = kmalloc(size, GFP_KERNEL); 262 if (!prz->old_log) {
264 if (dest == NULL) { 263 persistent_ram_ecc_old(prz);
264 prz->old_log = kmalloc(size, GFP_KERNEL);
265 }
266 if (!prz->old_log) {
265 pr_err("persistent_ram: failed to allocate buffer\n"); 267 pr_err("persistent_ram: failed to allocate buffer\n");
266 return; 268 return;
267 } 269 }
268 270
269 prz->old_log = dest;
270 prz->old_log_size = size; 271 prz->old_log_size = size;
271 memcpy(prz->old_log, &buffer->data[start], size - start); 272 memcpy(prz->old_log, &buffer->data[start], size - start);
272 memcpy(prz->old_log + size - start, &buffer->data[0], start); 273 memcpy(prz->old_log + size - start, &buffer->data[0], start);
@@ -319,6 +320,13 @@ void persistent_ram_free_old(struct persistent_ram_zone *prz)
319 prz->old_log_size = 0; 320 prz->old_log_size = 0;
320} 321}
321 322
323void persistent_ram_zap(struct persistent_ram_zone *prz)
324{
325 atomic_set(&prz->buffer->start, 0);
326 atomic_set(&prz->buffer->size, 0);
327 persistent_ram_update_header_ecc(prz);
328}
329
322static void *persistent_ram_vmap(phys_addr_t start, size_t size) 330static void *persistent_ram_vmap(phys_addr_t start, size_t size)
323{ 331{
324 struct page **pages; 332 struct page **pages;
@@ -405,6 +413,7 @@ static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool
405 " size %zu, start %zu\n", 413 " size %zu, start %zu\n",
406 buffer_size(prz), buffer_start(prz)); 414 buffer_size(prz), buffer_start(prz));
407 persistent_ram_save_old(prz); 415 persistent_ram_save_old(prz);
416 return 0;
408 } 417 }
409 } else { 418 } else {
410 pr_info("persistent_ram: no valid data in buffer" 419 pr_info("persistent_ram: no valid data in buffer"
@@ -412,8 +421,7 @@ static int __init persistent_ram_post_init(struct persistent_ram_zone *prz, bool
412 } 421 }
413 422
414 prz->buffer->sig = PERSISTENT_RAM_SIG; 423 prz->buffer->sig = PERSISTENT_RAM_SIG;
415 atomic_set(&prz->buffer->start, 0); 424 persistent_ram_zap(prz);
416 atomic_set(&prz->buffer->size, 0);
417 425
418 return 0; 426 return 0;
419} 427}
@@ -448,7 +456,6 @@ struct persistent_ram_zone * __init persistent_ram_new(phys_addr_t start,
448 goto err; 456 goto err;
449 457
450 persistent_ram_post_init(prz, ecc); 458 persistent_ram_post_init(prz, ecc);
451 persistent_ram_update_header_ecc(prz);
452 459
453 return prz; 460 return prz;
454err: 461err:
diff --git a/fs/read_write.c b/fs/read_write.c
index ffc99d22e0a3..c20614f86c01 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -633,8 +633,7 @@ ssize_t do_loop_readv_writev(struct file *filp, struct iovec *iov,
633ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, 633ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
634 unsigned long nr_segs, unsigned long fast_segs, 634 unsigned long nr_segs, unsigned long fast_segs,
635 struct iovec *fast_pointer, 635 struct iovec *fast_pointer,
636 struct iovec **ret_pointer, 636 struct iovec **ret_pointer)
637 int check_access)
638{ 637{
639 unsigned long seg; 638 unsigned long seg;
640 ssize_t ret; 639 ssize_t ret;
@@ -690,7 +689,7 @@ ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
690 ret = -EINVAL; 689 ret = -EINVAL;
691 goto out; 690 goto out;
692 } 691 }
693 if (check_access 692 if (type >= 0
694 && unlikely(!access_ok(vrfy_dir(type), buf, len))) { 693 && unlikely(!access_ok(vrfy_dir(type), buf, len))) {
695 ret = -EFAULT; 694 ret = -EFAULT;
696 goto out; 695 goto out;
@@ -723,7 +722,7 @@ static ssize_t do_readv_writev(int type, struct file *file,
723 } 722 }
724 723
725 ret = rw_copy_check_uvector(type, uvector, nr_segs, 724 ret = rw_copy_check_uvector(type, uvector, nr_segs,
726 ARRAY_SIZE(iovstack), iovstack, &iov, 1); 725 ARRAY_SIZE(iovstack), iovstack, &iov);
727 if (ret <= 0) 726 if (ret <= 0)
728 goto out; 727 goto out;
729 728
diff --git a/fs/readdir.c b/fs/readdir.c
index cc0a8227cddf..39e3370d79cf 100644
--- a/fs/readdir.c
+++ b/fs/readdir.c
@@ -108,11 +108,11 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
108 int error; 108 int error;
109 struct file * file; 109 struct file * file;
110 struct readdir_callback buf; 110 struct readdir_callback buf;
111 int fput_needed;
111 112
112 error = -EBADF; 113 file = fget_light(fd, &fput_needed);
113 file = fget(fd);
114 if (!file) 114 if (!file)
115 goto out; 115 return -EBADF;
116 116
117 buf.result = 0; 117 buf.result = 0;
118 buf.dirent = dirent; 118 buf.dirent = dirent;
@@ -121,8 +121,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
121 if (buf.result) 121 if (buf.result)
122 error = buf.result; 122 error = buf.result;
123 123
124 fput(file); 124 fput_light(file, fput_needed);
125out:
126 return error; 125 return error;
127} 126}
128 127
@@ -195,16 +194,15 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
195 struct file * file; 194 struct file * file;
196 struct linux_dirent __user * lastdirent; 195 struct linux_dirent __user * lastdirent;
197 struct getdents_callback buf; 196 struct getdents_callback buf;
197 int fput_needed;
198 int error; 198 int error;
199 199
200 error = -EFAULT;
201 if (!access_ok(VERIFY_WRITE, dirent, count)) 200 if (!access_ok(VERIFY_WRITE, dirent, count))
202 goto out; 201 return -EFAULT;
203 202
204 error = -EBADF; 203 file = fget_light(fd, &fput_needed);
205 file = fget(fd);
206 if (!file) 204 if (!file)
207 goto out; 205 return -EBADF;
208 206
209 buf.current_dir = dirent; 207 buf.current_dir = dirent;
210 buf.previous = NULL; 208 buf.previous = NULL;
@@ -221,8 +219,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
221 else 219 else
222 error = count - buf.count; 220 error = count - buf.count;
223 } 221 }
224 fput(file); 222 fput_light(file, fput_needed);
225out:
226 return error; 223 return error;
227} 224}
228 225
@@ -278,16 +275,15 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
278 struct file * file; 275 struct file * file;
279 struct linux_dirent64 __user * lastdirent; 276 struct linux_dirent64 __user * lastdirent;
280 struct getdents_callback64 buf; 277 struct getdents_callback64 buf;
278 int fput_needed;
281 int error; 279 int error;
282 280
283 error = -EFAULT;
284 if (!access_ok(VERIFY_WRITE, dirent, count)) 281 if (!access_ok(VERIFY_WRITE, dirent, count))
285 goto out; 282 return -EFAULT;
286 283
287 error = -EBADF; 284 file = fget_light(fd, &fput_needed);
288 file = fget(fd);
289 if (!file) 285 if (!file)
290 goto out; 286 return -EBADF;
291 287
292 buf.current_dir = dirent; 288 buf.current_dir = dirent;
293 buf.previous = NULL; 289 buf.previous = NULL;
@@ -305,7 +301,6 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
305 else 301 else
306 error = count - buf.count; 302 error = count - buf.count;
307 } 303 }
308 fput(file); 304 fput_light(file, fput_needed);
309out:
310 return error; 305 return error;
311} 306}
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 59d06871a850..a6d4268fb6c1 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -1592,13 +1592,12 @@ struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
1592 (fh_type == 6) ? fid->raw[5] : 0); 1592 (fh_type == 6) ? fid->raw[5] : 0);
1593} 1593}
1594 1594
1595int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp, 1595int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
1596 int need_parent) 1596 struct inode *parent)
1597{ 1597{
1598 struct inode *inode = dentry->d_inode;
1599 int maxlen = *lenp; 1598 int maxlen = *lenp;
1600 1599
1601 if (need_parent && (maxlen < 5)) { 1600 if (parent && (maxlen < 5)) {
1602 *lenp = 5; 1601 *lenp = 5;
1603 return 255; 1602 return 255;
1604 } else if (maxlen < 3) { 1603 } else if (maxlen < 3) {
@@ -1610,20 +1609,15 @@ int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp,
1610 data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id); 1609 data[1] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id);
1611 data[2] = inode->i_generation; 1610 data[2] = inode->i_generation;
1612 *lenp = 3; 1611 *lenp = 3;
1613 /* no room for directory info? return what we've stored so far */ 1612 if (parent) {
1614 if (maxlen < 5 || !need_parent) 1613 data[3] = parent->i_ino;
1615 return 3; 1614 data[4] = le32_to_cpu(INODE_PKEY(parent)->k_dir_id);
1616 1615 *lenp = 5;
1617 spin_lock(&dentry->d_lock); 1616 if (maxlen >= 6) {
1618 inode = dentry->d_parent->d_inode; 1617 data[5] = parent->i_generation;
1619 data[3] = inode->i_ino; 1618 *lenp = 6;
1620 data[4] = le32_to_cpu(INODE_PKEY(inode)->k_dir_id); 1619 }
1621 *lenp = 5; 1620 }
1622 if (maxlen >= 6) {
1623 data[5] = inode->i_generation;
1624 *lenp = 6;
1625 }
1626 spin_unlock(&dentry->d_lock);
1627 return *lenp; 1621 return *lenp;
1628} 1622}
1629 1623
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index b1a08573fe14..afcadcc03e8a 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1923,6 +1923,8 @@ static int do_journal_release(struct reiserfs_transaction_handle *th,
1923 * the workqueue job (flush_async_commit) needs this lock 1923 * the workqueue job (flush_async_commit) needs this lock
1924 */ 1924 */
1925 reiserfs_write_unlock(sb); 1925 reiserfs_write_unlock(sb);
1926
1927 cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work);
1926 flush_workqueue(commit_wq); 1928 flush_workqueue(commit_wq);
1927 1929
1928 if (!reiserfs_mounted_fs_count) { 1930 if (!reiserfs_mounted_fs_count) {
@@ -3231,8 +3233,6 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3231 th->t_trans_id, journal->j_trans_id); 3233 th->t_trans_id, journal->j_trans_id);
3232 } 3234 }
3233 3235
3234 sb->s_dirt = 1;
3235
3236 prepared = test_clear_buffer_journal_prepared(bh); 3236 prepared = test_clear_buffer_journal_prepared(bh);
3237 clear_buffer_journal_restore_dirty(bh); 3237 clear_buffer_journal_restore_dirty(bh);
3238 /* already in this transaction, we are done */ 3238 /* already in this transaction, we are done */
@@ -3316,6 +3316,7 @@ int journal_mark_dirty(struct reiserfs_transaction_handle *th,
3316 journal->j_first = cn; 3316 journal->j_first = cn;
3317 journal->j_last = cn; 3317 journal->j_last = cn;
3318 } 3318 }
3319 reiserfs_schedule_old_flush(sb);
3319 return 0; 3320 return 0;
3320} 3321}
3321 3322
@@ -3492,7 +3493,7 @@ static void flush_async_commits(struct work_struct *work)
3492** flushes any old transactions to disk 3493** flushes any old transactions to disk
3493** ends the current transaction if it is too old 3494** ends the current transaction if it is too old
3494*/ 3495*/
3495int reiserfs_flush_old_commits(struct super_block *sb) 3496void reiserfs_flush_old_commits(struct super_block *sb)
3496{ 3497{
3497 time_t now; 3498 time_t now;
3498 struct reiserfs_transaction_handle th; 3499 struct reiserfs_transaction_handle th;
@@ -3502,9 +3503,8 @@ int reiserfs_flush_old_commits(struct super_block *sb)
3502 /* safety check so we don't flush while we are replaying the log during 3503 /* safety check so we don't flush while we are replaying the log during
3503 * mount 3504 * mount
3504 */ 3505 */
3505 if (list_empty(&journal->j_journal_list)) { 3506 if (list_empty(&journal->j_journal_list))
3506 return 0; 3507 return;
3507 }
3508 3508
3509 /* check the current transaction. If there are no writers, and it is 3509 /* check the current transaction. If there are no writers, and it is
3510 * too old, finish it, and force the commit blocks to disk 3510 * too old, finish it, and force the commit blocks to disk
@@ -3526,7 +3526,6 @@ int reiserfs_flush_old_commits(struct super_block *sb)
3526 do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT); 3526 do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT);
3527 } 3527 }
3528 } 3528 }
3529 return sb->s_dirt;
3530} 3529}
3531 3530
3532/* 3531/*
@@ -3955,7 +3954,7 @@ static int do_journal_end(struct reiserfs_transaction_handle *th,
3955 ** it tells us if we should continue with the journal_end, or just return 3954 ** it tells us if we should continue with the journal_end, or just return
3956 */ 3955 */
3957 if (!check_journal_end(th, sb, nblocks, flags)) { 3956 if (!check_journal_end(th, sb, nblocks, flags)) {
3958 sb->s_dirt = 1; 3957 reiserfs_schedule_old_flush(sb);
3959 wake_queued_writers(sb); 3958 wake_queued_writers(sb);
3960 reiserfs_async_progress_wait(sb); 3959 reiserfs_async_progress_wait(sb);
3961 goto out; 3960 goto out;
diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
index a59d27126338..33215f57ea06 100644
--- a/fs/reiserfs/reiserfs.h
+++ b/fs/reiserfs/reiserfs.h
@@ -480,6 +480,11 @@ struct reiserfs_sb_info {
480 struct dentry *priv_root; /* root of /.reiserfs_priv */ 480 struct dentry *priv_root; /* root of /.reiserfs_priv */
481 struct dentry *xattr_root; /* root of /.reiserfs_priv/xattrs */ 481 struct dentry *xattr_root; /* root of /.reiserfs_priv/xattrs */
482 int j_errno; 482 int j_errno;
483
484 int work_queued; /* non-zero delayed work is queued */
485 struct delayed_work old_work; /* old transactions flush delayed work */
486 spinlock_t old_work_lock; /* protects old_work and work_queued */
487
483#ifdef CONFIG_QUOTA 488#ifdef CONFIG_QUOTA
484 char *s_qf_names[MAXQUOTAS]; 489 char *s_qf_names[MAXQUOTAS];
485 int s_jquota_fmt; 490 int s_jquota_fmt;
@@ -2452,7 +2457,7 @@ struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct
2452int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *); 2457int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *);
2453int reiserfs_commit_page(struct inode *inode, struct page *page, 2458int reiserfs_commit_page(struct inode *inode, struct page *page,
2454 unsigned from, unsigned to); 2459 unsigned from, unsigned to);
2455int reiserfs_flush_old_commits(struct super_block *); 2460void reiserfs_flush_old_commits(struct super_block *);
2456int reiserfs_commit_for_inode(struct inode *); 2461int reiserfs_commit_for_inode(struct inode *);
2457int reiserfs_inode_needs_commit(struct inode *); 2462int reiserfs_inode_needs_commit(struct inode *);
2458void reiserfs_update_inode_transaction(struct inode *); 2463void reiserfs_update_inode_transaction(struct inode *);
@@ -2487,6 +2492,7 @@ void reiserfs_abort(struct super_block *sb, int errno, const char *fmt, ...);
2487int reiserfs_allocate_list_bitmaps(struct super_block *s, 2492int reiserfs_allocate_list_bitmaps(struct super_block *s,
2488 struct reiserfs_list_bitmap *, unsigned int); 2493 struct reiserfs_list_bitmap *, unsigned int);
2489 2494
2495void reiserfs_schedule_old_flush(struct super_block *s);
2490void add_save_link(struct reiserfs_transaction_handle *th, 2496void add_save_link(struct reiserfs_transaction_handle *th,
2491 struct inode *inode, int truncate); 2497 struct inode *inode, int truncate);
2492int remove_save_link(struct inode *inode, int truncate); 2498int remove_save_link(struct inode *inode, int truncate);
@@ -2611,8 +2617,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid,
2611 int fh_len, int fh_type); 2617 int fh_len, int fh_type);
2612struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid, 2618struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid,
2613 int fh_len, int fh_type); 2619 int fh_len, int fh_type);
2614int reiserfs_encode_fh(struct dentry *dentry, __u32 * data, int *lenp, 2620int reiserfs_encode_fh(struct inode *inode, __u32 * data, int *lenp,
2615 int connectable); 2621 struct inode *parent);
2616 2622
2617int reiserfs_truncate_file(struct inode *, int update_timestamps); 2623int reiserfs_truncate_file(struct inode *, int update_timestamps);
2618void make_cpu_key(struct cpu_key *cpu_key, struct inode *inode, loff_t offset, 2624void make_cpu_key(struct cpu_key *cpu_key, struct inode *inode, loff_t offset,
diff --git a/fs/reiserfs/resize.c b/fs/reiserfs/resize.c
index 9a17f63c3fd7..3ce02cff5e90 100644
--- a/fs/reiserfs/resize.c
+++ b/fs/reiserfs/resize.c
@@ -200,7 +200,6 @@ int reiserfs_resize(struct super_block *s, unsigned long block_count_new)
200 (bmap_nr_new - bmap_nr))); 200 (bmap_nr_new - bmap_nr)));
201 PUT_SB_BLOCK_COUNT(s, block_count_new); 201 PUT_SB_BLOCK_COUNT(s, block_count_new);
202 PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new); 202 PUT_SB_BMAP_NR(s, bmap_would_wrap(bmap_nr_new) ? : bmap_nr_new);
203 s->s_dirt = 1;
204 203
205 journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s)); 204 journal_mark_dirty(&th, s, SB_BUFFER_WITH_SB(s));
206 205
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index c07b7d709447..651ce767b55d 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -72,20 +72,58 @@ static int reiserfs_sync_fs(struct super_block *s, int wait)
72 if (!journal_begin(&th, s, 1)) 72 if (!journal_begin(&th, s, 1))
73 if (!journal_end_sync(&th, s, 1)) 73 if (!journal_end_sync(&th, s, 1))
74 reiserfs_flush_old_commits(s); 74 reiserfs_flush_old_commits(s);
75 s->s_dirt = 0; /* Even if it's not true.
76 * We'll loop forever in sync_supers otherwise */
77 reiserfs_write_unlock(s); 75 reiserfs_write_unlock(s);
78 return 0; 76 return 0;
79} 77}
80 78
81static void reiserfs_write_super(struct super_block *s) 79static void flush_old_commits(struct work_struct *work)
82{ 80{
81 struct reiserfs_sb_info *sbi;
82 struct super_block *s;
83
84 sbi = container_of(work, struct reiserfs_sb_info, old_work.work);
85 s = sbi->s_journal->j_work_sb;
86
87 spin_lock(&sbi->old_work_lock);
88 sbi->work_queued = 0;
89 spin_unlock(&sbi->old_work_lock);
90
83 reiserfs_sync_fs(s, 1); 91 reiserfs_sync_fs(s, 1);
84} 92}
85 93
94void reiserfs_schedule_old_flush(struct super_block *s)
95{
96 struct reiserfs_sb_info *sbi = REISERFS_SB(s);
97 unsigned long delay;
98
99 if (s->s_flags & MS_RDONLY)
100 return;
101
102 spin_lock(&sbi->old_work_lock);
103 if (!sbi->work_queued) {
104 delay = msecs_to_jiffies(dirty_writeback_interval * 10);
105 queue_delayed_work(system_long_wq, &sbi->old_work, delay);
106 sbi->work_queued = 1;
107 }
108 spin_unlock(&sbi->old_work_lock);
109}
110
111static void cancel_old_flush(struct super_block *s)
112{
113 struct reiserfs_sb_info *sbi = REISERFS_SB(s);
114
115 cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
116 spin_lock(&sbi->old_work_lock);
117 sbi->work_queued = 0;
118 spin_unlock(&sbi->old_work_lock);
119}
120
86static int reiserfs_freeze(struct super_block *s) 121static int reiserfs_freeze(struct super_block *s)
87{ 122{
88 struct reiserfs_transaction_handle th; 123 struct reiserfs_transaction_handle th;
124
125 cancel_old_flush(s);
126
89 reiserfs_write_lock(s); 127 reiserfs_write_lock(s);
90 if (!(s->s_flags & MS_RDONLY)) { 128 if (!(s->s_flags & MS_RDONLY)) {
91 int err = journal_begin(&th, s, 1); 129 int err = journal_begin(&th, s, 1);
@@ -99,7 +137,6 @@ static int reiserfs_freeze(struct super_block *s)
99 journal_end_sync(&th, s, 1); 137 journal_end_sync(&th, s, 1);
100 } 138 }
101 } 139 }
102 s->s_dirt = 0;
103 reiserfs_write_unlock(s); 140 reiserfs_write_unlock(s);
104 return 0; 141 return 0;
105} 142}
@@ -483,9 +520,6 @@ static void reiserfs_put_super(struct super_block *s)
483 520
484 reiserfs_write_lock(s); 521 reiserfs_write_lock(s);
485 522
486 if (s->s_dirt)
487 reiserfs_write_super(s);
488
489 /* change file system state to current state if it was mounted with read-write permissions */ 523 /* change file system state to current state if it was mounted with read-write permissions */
490 if (!(s->s_flags & MS_RDONLY)) { 524 if (!(s->s_flags & MS_RDONLY)) {
491 if (!journal_begin(&th, s, 10)) { 525 if (!journal_begin(&th, s, 10)) {
@@ -692,7 +726,6 @@ static const struct super_operations reiserfs_sops = {
692 .dirty_inode = reiserfs_dirty_inode, 726 .dirty_inode = reiserfs_dirty_inode,
693 .evict_inode = reiserfs_evict_inode, 727 .evict_inode = reiserfs_evict_inode,
694 .put_super = reiserfs_put_super, 728 .put_super = reiserfs_put_super,
695 .write_super = reiserfs_write_super,
696 .sync_fs = reiserfs_sync_fs, 729 .sync_fs = reiserfs_sync_fs,
697 .freeze_fs = reiserfs_freeze, 730 .freeze_fs = reiserfs_freeze,
698 .unfreeze_fs = reiserfs_unfreeze, 731 .unfreeze_fs = reiserfs_unfreeze,
@@ -1400,7 +1433,6 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg)
1400 err = journal_end(&th, s, 10); 1433 err = journal_end(&th, s, 10);
1401 if (err) 1434 if (err)
1402 goto out_err; 1435 goto out_err;
1403 s->s_dirt = 0;
1404 1436
1405 if (!(*mount_flags & MS_RDONLY)) { 1437 if (!(*mount_flags & MS_RDONLY)) {
1406 dquot_resume(s, -1); 1438 dquot_resume(s, -1);
@@ -1730,19 +1762,21 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1730 return -ENOMEM; 1762 return -ENOMEM;
1731 s->s_fs_info = sbi; 1763 s->s_fs_info = sbi;
1732 /* Set default values for options: non-aggressive tails, RO on errors */ 1764 /* Set default values for options: non-aggressive tails, RO on errors */
1733 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_SMALLTAIL); 1765 sbi->s_mount_opt |= (1 << REISERFS_SMALLTAIL);
1734 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_ERROR_RO); 1766 sbi->s_mount_opt |= (1 << REISERFS_ERROR_RO);
1735 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH); 1767 sbi->s_mount_opt |= (1 << REISERFS_BARRIER_FLUSH);
1736 /* no preallocation minimum, be smart in 1768 /* no preallocation minimum, be smart in
1737 reiserfs_file_write instead */ 1769 reiserfs_file_write instead */
1738 REISERFS_SB(s)->s_alloc_options.preallocmin = 0; 1770 sbi->s_alloc_options.preallocmin = 0;
1739 /* Preallocate by 16 blocks (17-1) at once */ 1771 /* Preallocate by 16 blocks (17-1) at once */
1740 REISERFS_SB(s)->s_alloc_options.preallocsize = 17; 1772 sbi->s_alloc_options.preallocsize = 17;
1741 /* setup default block allocator options */ 1773 /* setup default block allocator options */
1742 reiserfs_init_alloc_options(s); 1774 reiserfs_init_alloc_options(s);
1743 1775
1744 mutex_init(&REISERFS_SB(s)->lock); 1776 spin_lock_init(&sbi->old_work_lock);
1745 REISERFS_SB(s)->lock_depth = -1; 1777 INIT_DELAYED_WORK(&sbi->old_work, flush_old_commits);
1778 mutex_init(&sbi->lock);
1779 sbi->lock_depth = -1;
1746 1780
1747 jdev_name = NULL; 1781 jdev_name = NULL;
1748 if (reiserfs_parse_options 1782 if (reiserfs_parse_options
@@ -1751,8 +1785,8 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1751 goto error_unlocked; 1785 goto error_unlocked;
1752 } 1786 }
1753 if (jdev_name && jdev_name[0]) { 1787 if (jdev_name && jdev_name[0]) {
1754 REISERFS_SB(s)->s_jdev = kstrdup(jdev_name, GFP_KERNEL); 1788 sbi->s_jdev = kstrdup(jdev_name, GFP_KERNEL);
1755 if (!REISERFS_SB(s)->s_jdev) { 1789 if (!sbi->s_jdev) {
1756 SWARN(silent, s, "", "Cannot allocate memory for " 1790 SWARN(silent, s, "", "Cannot allocate memory for "
1757 "journal device name"); 1791 "journal device name");
1758 goto error; 1792 goto error;
@@ -1810,7 +1844,7 @@ static int reiserfs_fill_super(struct super_block *s, void *data, int silent)
1810 /* make data=ordered the default */ 1844 /* make data=ordered the default */
1811 if (!reiserfs_data_log(s) && !reiserfs_data_ordered(s) && 1845 if (!reiserfs_data_log(s) && !reiserfs_data_ordered(s) &&
1812 !reiserfs_data_writeback(s)) { 1846 !reiserfs_data_writeback(s)) {
1813 REISERFS_SB(s)->s_mount_opt |= (1 << REISERFS_DATA_ORDERED); 1847 sbi->s_mount_opt |= (1 << REISERFS_DATA_ORDERED);
1814 } 1848 }
1815 1849
1816 if (reiserfs_data_log(s)) { 1850 if (reiserfs_data_log(s)) {
@@ -2003,6 +2037,8 @@ error_unlocked:
2003 reiserfs_write_unlock(s); 2037 reiserfs_write_unlock(s);
2004 } 2038 }
2005 2039
2040 cancel_delayed_work_sync(&REISERFS_SB(s)->old_work);
2041
2006 reiserfs_free_bitmap_cache(s); 2042 reiserfs_free_bitmap_cache(s);
2007 if (SB_BUFFER_WITH_SB(s)) 2043 if (SB_BUFFER_WITH_SB(s))
2008 brelse(SB_BUFFER_WITH_SB(s)); 2044 brelse(SB_BUFFER_WITH_SB(s));
diff --git a/fs/select.c b/fs/select.c
index 17d33d09fc16..bae321569dfa 100644
--- a/fs/select.c
+++ b/fs/select.c
@@ -614,7 +614,6 @@ SYSCALL_DEFINE5(select, int, n, fd_set __user *, inp, fd_set __user *, outp,
614 return ret; 614 return ret;
615} 615}
616 616
617#ifdef HAVE_SET_RESTORE_SIGMASK
618static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp, 617static long do_pselect(int n, fd_set __user *inp, fd_set __user *outp,
619 fd_set __user *exp, struct timespec __user *tsp, 618 fd_set __user *exp, struct timespec __user *tsp,
620 const sigset_t __user *sigmask, size_t sigsetsize) 619 const sigset_t __user *sigmask, size_t sigsetsize)
@@ -686,7 +685,6 @@ SYSCALL_DEFINE6(pselect6, int, n, fd_set __user *, inp, fd_set __user *, outp,
686 685
687 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize); 686 return do_pselect(n, inp, outp, exp, tsp, up, sigsetsize);
688} 687}
689#endif /* HAVE_SET_RESTORE_SIGMASK */
690 688
691#ifdef __ARCH_WANT_SYS_OLD_SELECT 689#ifdef __ARCH_WANT_SYS_OLD_SELECT
692struct sel_arg_struct { 690struct sel_arg_struct {
@@ -941,7 +939,6 @@ SYSCALL_DEFINE3(poll, struct pollfd __user *, ufds, unsigned int, nfds,
941 return ret; 939 return ret;
942} 940}
943 941
944#ifdef HAVE_SET_RESTORE_SIGMASK
945SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds, 942SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
946 struct timespec __user *, tsp, const sigset_t __user *, sigmask, 943 struct timespec __user *, tsp, const sigset_t __user *, sigmask,
947 size_t, sigsetsize) 944 size_t, sigsetsize)
@@ -992,4 +989,3 @@ SYSCALL_DEFINE5(ppoll, struct pollfd __user *, ufds, unsigned int, nfds,
992 989
993 return ret; 990 return ret;
994} 991}
995#endif /* HAVE_SET_RESTORE_SIGMASK */
diff --git a/fs/signalfd.c b/fs/signalfd.c
index 7ae2a574cb25..9f35a37173de 100644
--- a/fs/signalfd.c
+++ b/fs/signalfd.c
@@ -269,12 +269,13 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
269 if (ufd < 0) 269 if (ufd < 0)
270 kfree(ctx); 270 kfree(ctx);
271 } else { 271 } else {
272 struct file *file = fget(ufd); 272 int fput_needed;
273 struct file *file = fget_light(ufd, &fput_needed);
273 if (!file) 274 if (!file)
274 return -EBADF; 275 return -EBADF;
275 ctx = file->private_data; 276 ctx = file->private_data;
276 if (file->f_op != &signalfd_fops) { 277 if (file->f_op != &signalfd_fops) {
277 fput(file); 278 fput_light(file, fput_needed);
278 return -EINVAL; 279 return -EINVAL;
279 } 280 }
280 spin_lock_irq(&current->sighand->siglock); 281 spin_lock_irq(&current->sighand->siglock);
@@ -282,7 +283,7 @@ SYSCALL_DEFINE4(signalfd4, int, ufd, sigset_t __user *, user_mask,
282 spin_unlock_irq(&current->sighand->siglock); 283 spin_unlock_irq(&current->sighand->siglock);
283 284
284 wake_up(&current->sighand->signalfd_wqh); 285 wake_up(&current->sighand->signalfd_wqh);
285 fput(file); 286 fput_light(file, fput_needed);
286 } 287 }
287 288
288 return ufd; 289 return ufd;
diff --git a/fs/splice.c b/fs/splice.c
index 406ef2b792c2..c9f1318a3b82 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -1003,8 +1003,10 @@ generic_file_splice_write(struct pipe_inode_info *pipe, struct file *out,
1003 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); 1003 mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD);
1004 ret = file_remove_suid(out); 1004 ret = file_remove_suid(out);
1005 if (!ret) { 1005 if (!ret) {
1006 file_update_time(out); 1006 ret = file_update_time(out);
1007 ret = splice_from_pipe_feed(pipe, &sd, pipe_to_file); 1007 if (!ret)
1008 ret = splice_from_pipe_feed(pipe, &sd,
1009 pipe_to_file);
1008 } 1010 }
1009 mutex_unlock(&inode->i_mutex); 1011 mutex_unlock(&inode->i_mutex);
1010 } while (ret > 0); 1012 } while (ret > 0);
diff --git a/fs/statfs.c b/fs/statfs.c
index 43e6b6fe4e85..95ad5c0e586c 100644
--- a/fs/statfs.c
+++ b/fs/statfs.c
@@ -87,11 +87,12 @@ int user_statfs(const char __user *pathname, struct kstatfs *st)
87 87
88int fd_statfs(int fd, struct kstatfs *st) 88int fd_statfs(int fd, struct kstatfs *st)
89{ 89{
90 struct file *file = fget(fd); 90 int fput_needed;
91 struct file *file = fget_light(fd, &fput_needed);
91 int error = -EBADF; 92 int error = -EBADF;
92 if (file) { 93 if (file) {
93 error = vfs_statfs(&file->f_path, st); 94 error = vfs_statfs(&file->f_path, st);
94 fput(file); 95 fput_light(file, fput_needed);
95 } 96 }
96 return error; 97 return error;
97} 98}
diff --git a/fs/sync.c b/fs/sync.c
index 0e8db939d96f..11e3d1c44901 100644
--- a/fs/sync.c
+++ b/fs/sync.c
@@ -188,11 +188,12 @@ static int do_fsync(unsigned int fd, int datasync)
188{ 188{
189 struct file *file; 189 struct file *file;
190 int ret = -EBADF; 190 int ret = -EBADF;
191 int fput_needed;
191 192
192 file = fget(fd); 193 file = fget_light(fd, &fput_needed);
193 if (file) { 194 if (file) {
194 ret = vfs_fsync(file, datasync); 195 ret = vfs_fsync(file, datasync);
195 fput(file); 196 fput_light(file, fput_needed);
196 } 197 }
197 return ret; 198 return ret;
198} 199}
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 685a83756b2b..84a7e6f3c046 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -2918,6 +2918,9 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
2918 struct dentry *dent; 2918 struct dentry *dent;
2919 struct ubifs_debug_info *d = c->dbg; 2919 struct ubifs_debug_info *d = c->dbg;
2920 2920
2921 if (!IS_ENABLED(DEBUG_FS))
2922 return 0;
2923
2921 n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME, 2924 n = snprintf(d->dfs_dir_name, UBIFS_DFS_DIR_LEN + 1, UBIFS_DFS_DIR_NAME,
2922 c->vi.ubi_num, c->vi.vol_id); 2925 c->vi.ubi_num, c->vi.vol_id);
2923 if (n == UBIFS_DFS_DIR_LEN) { 2926 if (n == UBIFS_DFS_DIR_LEN) {
@@ -3010,7 +3013,8 @@ out:
3010 */ 3013 */
3011void dbg_debugfs_exit_fs(struct ubifs_info *c) 3014void dbg_debugfs_exit_fs(struct ubifs_info *c)
3012{ 3015{
3013 debugfs_remove_recursive(c->dbg->dfs_dir); 3016 if (IS_ENABLED(DEBUG_FS))
3017 debugfs_remove_recursive(c->dbg->dfs_dir);
3014} 3018}
3015 3019
3016struct ubifs_global_debug_info ubifs_dbg; 3020struct ubifs_global_debug_info ubifs_dbg;
@@ -3095,6 +3099,9 @@ int dbg_debugfs_init(void)
3095 const char *fname; 3099 const char *fname;
3096 struct dentry *dent; 3100 struct dentry *dent;
3097 3101
3102 if (!IS_ENABLED(DEBUG_FS))
3103 return 0;
3104
3098 fname = "ubifs"; 3105 fname = "ubifs";
3099 dent = debugfs_create_dir(fname, NULL); 3106 dent = debugfs_create_dir(fname, NULL);
3100 if (IS_ERR_OR_NULL(dent)) 3107 if (IS_ERR_OR_NULL(dent))
@@ -3159,7 +3166,8 @@ out:
3159 */ 3166 */
3160void dbg_debugfs_exit(void) 3167void dbg_debugfs_exit(void)
3161{ 3168{
3162 debugfs_remove_recursive(dfs_rootdir); 3169 if (IS_ENABLED(DEBUG_FS))
3170 debugfs_remove_recursive(dfs_rootdir);
3163} 3171}
3164 3172
3165/** 3173/**
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index 62a2727f4ecf..a6d42efc76d2 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -1127,16 +1127,7 @@ int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
1127 struct ubifs_inode *ui = ubifs_inode(inode); 1127 struct ubifs_inode *ui = ubifs_inode(inode);
1128 1128
1129 mutex_lock(&ui->ui_mutex); 1129 mutex_lock(&ui->ui_mutex);
1130 stat->dev = inode->i_sb->s_dev; 1130 generic_fillattr(inode, stat);
1131 stat->ino = inode->i_ino;
1132 stat->mode = inode->i_mode;
1133 stat->nlink = inode->i_nlink;
1134 stat->uid = inode->i_uid;
1135 stat->gid = inode->i_gid;
1136 stat->rdev = inode->i_rdev;
1137 stat->atime = inode->i_atime;
1138 stat->mtime = inode->i_mtime;
1139 stat->ctime = inode->i_ctime;
1140 stat->blksize = UBIFS_BLOCK_SIZE; 1131 stat->blksize = UBIFS_BLOCK_SIZE;
1141 stat->size = ui->ui_size; 1132 stat->size = ui->ui_size;
1142 1133
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index a165c66e3eef..18024178ac4c 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -1260,16 +1260,15 @@ static struct dentry *udf_fh_to_parent(struct super_block *sb,
1260 fid->udf.parent_partref, 1260 fid->udf.parent_partref,
1261 fid->udf.parent_generation); 1261 fid->udf.parent_generation);
1262} 1262}
1263static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp, 1263static int udf_encode_fh(struct inode *inode, __u32 *fh, int *lenp,
1264 int connectable) 1264 struct inode *parent)
1265{ 1265{
1266 int len = *lenp; 1266 int len = *lenp;
1267 struct inode *inode = de->d_inode;
1268 struct kernel_lb_addr location = UDF_I(inode)->i_location; 1267 struct kernel_lb_addr location = UDF_I(inode)->i_location;
1269 struct fid *fid = (struct fid *)fh; 1268 struct fid *fid = (struct fid *)fh;
1270 int type = FILEID_UDF_WITHOUT_PARENT; 1269 int type = FILEID_UDF_WITHOUT_PARENT;
1271 1270
1272 if (connectable && (len < 5)) { 1271 if (parent && (len < 5)) {
1273 *lenp = 5; 1272 *lenp = 5;
1274 return 255; 1273 return 255;
1275 } else if (len < 3) { 1274 } else if (len < 3) {
@@ -1282,14 +1281,11 @@ static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp,
1282 fid->udf.partref = location.partitionReferenceNum; 1281 fid->udf.partref = location.partitionReferenceNum;
1283 fid->udf.generation = inode->i_generation; 1282 fid->udf.generation = inode->i_generation;
1284 1283
1285 if (connectable && !S_ISDIR(inode->i_mode)) { 1284 if (parent) {
1286 spin_lock(&de->d_lock); 1285 location = UDF_I(parent)->i_location;
1287 inode = de->d_parent->d_inode;
1288 location = UDF_I(inode)->i_location;
1289 fid->udf.parent_block = location.logicalBlockNum; 1286 fid->udf.parent_block = location.logicalBlockNum;
1290 fid->udf.parent_partref = location.partitionReferenceNum; 1287 fid->udf.parent_partref = location.partitionReferenceNum;
1291 fid->udf.parent_generation = inode->i_generation; 1288 fid->udf.parent_generation = inode->i_generation;
1292 spin_unlock(&de->d_lock);
1293 *lenp = 5; 1289 *lenp = 5;
1294 type = FILEID_UDF_WITH_PARENT; 1290 type = FILEID_UDF_WITH_PARENT;
1295 } 1291 }
diff --git a/fs/utimes.c b/fs/utimes.c
index ba653f3dc1bc..fa4dbe451e27 100644
--- a/fs/utimes.c
+++ b/fs/utimes.c
@@ -140,18 +140,19 @@ long do_utimes(int dfd, const char __user *filename, struct timespec *times,
140 goto out; 140 goto out;
141 141
142 if (filename == NULL && dfd != AT_FDCWD) { 142 if (filename == NULL && dfd != AT_FDCWD) {
143 int fput_needed;
143 struct file *file; 144 struct file *file;
144 145
145 if (flags & AT_SYMLINK_NOFOLLOW) 146 if (flags & AT_SYMLINK_NOFOLLOW)
146 goto out; 147 goto out;
147 148
148 file = fget(dfd); 149 file = fget_light(dfd, &fput_needed);
149 error = -EBADF; 150 error = -EBADF;
150 if (!file) 151 if (!file)
151 goto out; 152 goto out;
152 153
153 error = utimes_common(&file->f_path, times); 154 error = utimes_common(&file->f_path, times);
154 fput(file); 155 fput_light(file, fput_needed);
155 } else { 156 } else {
156 struct path path; 157 struct path path;
157 int lookup_flags = 0; 158 int lookup_flags = 0;
diff --git a/fs/xattr.c b/fs/xattr.c
index 3c8c1cc333c7..1d7ac3790458 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -399,11 +399,12 @@ SYSCALL_DEFINE5(lsetxattr, const char __user *, pathname,
399SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name, 399SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
400 const void __user *,value, size_t, size, int, flags) 400 const void __user *,value, size_t, size, int, flags)
401{ 401{
402 int fput_needed;
402 struct file *f; 403 struct file *f;
403 struct dentry *dentry; 404 struct dentry *dentry;
404 int error = -EBADF; 405 int error = -EBADF;
405 406
406 f = fget(fd); 407 f = fget_light(fd, &fput_needed);
407 if (!f) 408 if (!f)
408 return error; 409 return error;
409 dentry = f->f_path.dentry; 410 dentry = f->f_path.dentry;
@@ -413,7 +414,7 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
413 error = setxattr(dentry, name, value, size, flags); 414 error = setxattr(dentry, name, value, size, flags);
414 mnt_drop_write_file(f); 415 mnt_drop_write_file(f);
415 } 416 }
416 fput(f); 417 fput_light(f, fput_needed);
417 return error; 418 return error;
418} 419}
419 420
@@ -486,15 +487,16 @@ SYSCALL_DEFINE4(lgetxattr, const char __user *, pathname,
486SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name, 487SYSCALL_DEFINE4(fgetxattr, int, fd, const char __user *, name,
487 void __user *, value, size_t, size) 488 void __user *, value, size_t, size)
488{ 489{
490 int fput_needed;
489 struct file *f; 491 struct file *f;
490 ssize_t error = -EBADF; 492 ssize_t error = -EBADF;
491 493
492 f = fget(fd); 494 f = fget_light(fd, &fput_needed);
493 if (!f) 495 if (!f)
494 return error; 496 return error;
495 audit_inode(NULL, f->f_path.dentry); 497 audit_inode(NULL, f->f_path.dentry);
496 error = getxattr(f->f_path.dentry, name, value, size); 498 error = getxattr(f->f_path.dentry, name, value, size);
497 fput(f); 499 fput_light(f, fput_needed);
498 return error; 500 return error;
499} 501}
500 502
@@ -566,15 +568,16 @@ SYSCALL_DEFINE3(llistxattr, const char __user *, pathname, char __user *, list,
566 568
567SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size) 569SYSCALL_DEFINE3(flistxattr, int, fd, char __user *, list, size_t, size)
568{ 570{
571 int fput_needed;
569 struct file *f; 572 struct file *f;
570 ssize_t error = -EBADF; 573 ssize_t error = -EBADF;
571 574
572 f = fget(fd); 575 f = fget_light(fd, &fput_needed);
573 if (!f) 576 if (!f)
574 return error; 577 return error;
575 audit_inode(NULL, f->f_path.dentry); 578 audit_inode(NULL, f->f_path.dentry);
576 error = listxattr(f->f_path.dentry, list, size); 579 error = listxattr(f->f_path.dentry, list, size);
577 fput(f); 580 fput_light(f, fput_needed);
578 return error; 581 return error;
579} 582}
580 583
@@ -634,11 +637,12 @@ SYSCALL_DEFINE2(lremovexattr, const char __user *, pathname,
634 637
635SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name) 638SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
636{ 639{
640 int fput_needed;
637 struct file *f; 641 struct file *f;
638 struct dentry *dentry; 642 struct dentry *dentry;
639 int error = -EBADF; 643 int error = -EBADF;
640 644
641 f = fget(fd); 645 f = fget_light(fd, &fput_needed);
642 if (!f) 646 if (!f)
643 return error; 647 return error;
644 dentry = f->f_path.dentry; 648 dentry = f->f_path.dentry;
@@ -648,7 +652,7 @@ SYSCALL_DEFINE2(fremovexattr, int, fd, const char __user *, name)
648 error = removexattr(dentry, name); 652 error = removexattr(dentry, name);
649 mnt_drop_write_file(f); 653 mnt_drop_write_file(f);
650 } 654 }
651 fput(f); 655 fput_light(f, fput_needed);
652 return error; 656 return error;
653} 657}
654 658
diff --git a/fs/xfs/kmem.c b/fs/xfs/kmem.c
index a907de565db3..4a7286c1dc80 100644
--- a/fs/xfs/kmem.c
+++ b/fs/xfs/kmem.c
@@ -46,7 +46,7 @@ kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize)
46} 46}
47 47
48void * 48void *
49kmem_alloc(size_t size, unsigned int __nocast flags) 49kmem_alloc(size_t size, xfs_km_flags_t flags)
50{ 50{
51 int retries = 0; 51 int retries = 0;
52 gfp_t lflags = kmem_flags_convert(flags); 52 gfp_t lflags = kmem_flags_convert(flags);
@@ -65,7 +65,7 @@ kmem_alloc(size_t size, unsigned int __nocast flags)
65} 65}
66 66
67void * 67void *
68kmem_zalloc(size_t size, unsigned int __nocast flags) 68kmem_zalloc(size_t size, xfs_km_flags_t flags)
69{ 69{
70 void *ptr; 70 void *ptr;
71 71
@@ -87,7 +87,7 @@ kmem_free(const void *ptr)
87 87
88void * 88void *
89kmem_realloc(const void *ptr, size_t newsize, size_t oldsize, 89kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
90 unsigned int __nocast flags) 90 xfs_km_flags_t flags)
91{ 91{
92 void *new; 92 void *new;
93 93
@@ -102,7 +102,7 @@ kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
102} 102}
103 103
104void * 104void *
105kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags) 105kmem_zone_alloc(kmem_zone_t *zone, xfs_km_flags_t flags)
106{ 106{
107 int retries = 0; 107 int retries = 0;
108 gfp_t lflags = kmem_flags_convert(flags); 108 gfp_t lflags = kmem_flags_convert(flags);
@@ -121,7 +121,7 @@ kmem_zone_alloc(kmem_zone_t *zone, unsigned int __nocast flags)
121} 121}
122 122
123void * 123void *
124kmem_zone_zalloc(kmem_zone_t *zone, unsigned int __nocast flags) 124kmem_zone_zalloc(kmem_zone_t *zone, xfs_km_flags_t flags)
125{ 125{
126 void *ptr; 126 void *ptr;
127 127
diff --git a/fs/xfs/kmem.h b/fs/xfs/kmem.h
index ab7c53fe346e..b2f2620f9a87 100644
--- a/fs/xfs/kmem.h
+++ b/fs/xfs/kmem.h
@@ -27,10 +27,11 @@
27 * General memory allocation interfaces 27 * General memory allocation interfaces
28 */ 28 */
29 29
30#define KM_SLEEP 0x0001u 30typedef unsigned __bitwise xfs_km_flags_t;
31#define KM_NOSLEEP 0x0002u 31#define KM_SLEEP ((__force xfs_km_flags_t)0x0001u)
32#define KM_NOFS 0x0004u 32#define KM_NOSLEEP ((__force xfs_km_flags_t)0x0002u)
33#define KM_MAYFAIL 0x0008u 33#define KM_NOFS ((__force xfs_km_flags_t)0x0004u)
34#define KM_MAYFAIL ((__force xfs_km_flags_t)0x0008u)
34 35
35/* 36/*
36 * We use a special process flag to avoid recursive callbacks into 37 * We use a special process flag to avoid recursive callbacks into
@@ -38,7 +39,7 @@
38 * warnings, so we explicitly skip any generic ones (silly of us). 39 * warnings, so we explicitly skip any generic ones (silly of us).
39 */ 40 */
40static inline gfp_t 41static inline gfp_t
41kmem_flags_convert(unsigned int __nocast flags) 42kmem_flags_convert(xfs_km_flags_t flags)
42{ 43{
43 gfp_t lflags; 44 gfp_t lflags;
44 45
@@ -54,9 +55,9 @@ kmem_flags_convert(unsigned int __nocast flags)
54 return lflags; 55 return lflags;
55} 56}
56 57
57extern void *kmem_alloc(size_t, unsigned int __nocast); 58extern void *kmem_alloc(size_t, xfs_km_flags_t);
58extern void *kmem_zalloc(size_t, unsigned int __nocast); 59extern void *kmem_zalloc(size_t, xfs_km_flags_t);
59extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast); 60extern void *kmem_realloc(const void *, size_t, size_t, xfs_km_flags_t);
60extern void kmem_free(const void *); 61extern void kmem_free(const void *);
61 62
62static inline void *kmem_zalloc_large(size_t size) 63static inline void *kmem_zalloc_large(size_t size)
@@ -107,7 +108,7 @@ kmem_zone_destroy(kmem_zone_t *zone)
107 kmem_cache_destroy(zone); 108 kmem_cache_destroy(zone);
108} 109}
109 110
110extern void *kmem_zone_alloc(kmem_zone_t *, unsigned int __nocast); 111extern void *kmem_zone_alloc(kmem_zone_t *, xfs_km_flags_t);
111extern void *kmem_zone_zalloc(kmem_zone_t *, unsigned int __nocast); 112extern void *kmem_zone_zalloc(kmem_zone_t *, xfs_km_flags_t);
112 113
113#endif /* __XFS_SUPPORT_KMEM_H__ */ 114#endif /* __XFS_SUPPORT_KMEM_H__ */
diff --git a/fs/xfs/xfs_alloc.c b/fs/xfs/xfs_alloc.c
index 229641fb8e67..9d1aeb7e2734 100644
--- a/fs/xfs/xfs_alloc.c
+++ b/fs/xfs/xfs_alloc.c
@@ -1080,6 +1080,7 @@ restart:
1080 goto restart; 1080 goto restart;
1081 } 1081 }
1082 1082
1083 xfs_btree_del_cursor(cnt_cur, XFS_BTREE_NOERROR);
1083 trace_xfs_alloc_size_neither(args); 1084 trace_xfs_alloc_size_neither(args);
1084 args->agbno = NULLAGBLOCK; 1085 args->agbno = NULLAGBLOCK;
1085 return 0; 1086 return 0;
@@ -2441,7 +2442,7 @@ xfs_alloc_vextent(
2441 DECLARE_COMPLETION_ONSTACK(done); 2442 DECLARE_COMPLETION_ONSTACK(done);
2442 2443
2443 args->done = &done; 2444 args->done = &done;
2444 INIT_WORK(&args->work, xfs_alloc_vextent_worker); 2445 INIT_WORK_ONSTACK(&args->work, xfs_alloc_vextent_worker);
2445 queue_work(xfs_alloc_wq, &args->work); 2446 queue_work(xfs_alloc_wq, &args->work);
2446 wait_for_completion(&done); 2447 wait_for_completion(&done);
2447 return args->result; 2448 return args->result;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index ae31c313a79e..8dad722c0041 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -981,10 +981,15 @@ xfs_vm_writepage(
981 imap_valid = 0; 981 imap_valid = 0;
982 } 982 }
983 } else { 983 } else {
984 if (PageUptodate(page)) { 984 if (PageUptodate(page))
985 ASSERT(buffer_mapped(bh)); 985 ASSERT(buffer_mapped(bh));
986 imap_valid = 0; 986 /*
987 } 987 * This buffer is not uptodate and will not be
988 * written to disk. Ensure that we will put any
989 * subsequent writeable buffers into a new
990 * ioend.
991 */
992 imap_valid = 0;
988 continue; 993 continue;
989 } 994 }
990 995
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index 172d3cc8f8cb..a4beb421018a 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -201,14 +201,7 @@ xfs_buf_alloc(
201 bp->b_length = numblks; 201 bp->b_length = numblks;
202 bp->b_io_length = numblks; 202 bp->b_io_length = numblks;
203 bp->b_flags = flags; 203 bp->b_flags = flags;
204 204 bp->b_bn = blkno;
205 /*
206 * We do not set the block number here in the buffer because we have not
207 * finished initialising the buffer. We insert the buffer into the cache
208 * in this state, so this ensures that we are unable to do IO on a
209 * buffer that hasn't been fully initialised.
210 */
211 bp->b_bn = XFS_BUF_DADDR_NULL;
212 atomic_set(&bp->b_pin_count, 0); 205 atomic_set(&bp->b_pin_count, 0);
213 init_waitqueue_head(&bp->b_waiters); 206 init_waitqueue_head(&bp->b_waiters);
214 207
@@ -567,11 +560,6 @@ xfs_buf_get(
567 if (bp != new_bp) 560 if (bp != new_bp)
568 xfs_buf_free(new_bp); 561 xfs_buf_free(new_bp);
569 562
570 /*
571 * Now we have a workable buffer, fill in the block number so
572 * that we can do IO on it.
573 */
574 bp->b_bn = blkno;
575 bp->b_io_length = bp->b_length; 563 bp->b_io_length = bp->b_length;
576 564
577found: 565found:
@@ -772,7 +760,7 @@ xfs_buf_get_uncached(
772 int error, i; 760 int error, i;
773 xfs_buf_t *bp; 761 xfs_buf_t *bp;
774 762
775 bp = xfs_buf_alloc(target, 0, numblks, 0); 763 bp = xfs_buf_alloc(target, XFS_BUF_DADDR_NULL, numblks, 0);
776 if (unlikely(bp == NULL)) 764 if (unlikely(bp == NULL))
777 goto fail; 765 goto fail;
778 766
diff --git a/fs/xfs/xfs_export.c b/fs/xfs/xfs_export.c
index 2d25d19c4ea1..42679223a0fd 100644
--- a/fs/xfs/xfs_export.c
+++ b/fs/xfs/xfs_export.c
@@ -52,19 +52,18 @@ static int xfs_fileid_length(int fileid_type)
52 52
53STATIC int 53STATIC int
54xfs_fs_encode_fh( 54xfs_fs_encode_fh(
55 struct dentry *dentry, 55 struct inode *inode,
56 __u32 *fh, 56 __u32 *fh,
57 int *max_len, 57 int *max_len,
58 int connectable) 58 struct inode *parent)
59{ 59{
60 struct fid *fid = (struct fid *)fh; 60 struct fid *fid = (struct fid *)fh;
61 struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fh; 61 struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fh;
62 struct inode *inode = dentry->d_inode;
63 int fileid_type; 62 int fileid_type;
64 int len; 63 int len;
65 64
66 /* Directories don't need their parent encoded, they have ".." */ 65 /* Directories don't need their parent encoded, they have ".." */
67 if (S_ISDIR(inode->i_mode) || !connectable) 66 if (!parent)
68 fileid_type = FILEID_INO32_GEN; 67 fileid_type = FILEID_INO32_GEN;
69 else 68 else
70 fileid_type = FILEID_INO32_GEN_PARENT; 69 fileid_type = FILEID_INO32_GEN_PARENT;
@@ -96,20 +95,16 @@ xfs_fs_encode_fh(
96 95
97 switch (fileid_type) { 96 switch (fileid_type) {
98 case FILEID_INO32_GEN_PARENT: 97 case FILEID_INO32_GEN_PARENT:
99 spin_lock(&dentry->d_lock); 98 fid->i32.parent_ino = XFS_I(parent)->i_ino;
100 fid->i32.parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino; 99 fid->i32.parent_gen = parent->i_generation;
101 fid->i32.parent_gen = dentry->d_parent->d_inode->i_generation;
102 spin_unlock(&dentry->d_lock);
103 /*FALLTHRU*/ 100 /*FALLTHRU*/
104 case FILEID_INO32_GEN: 101 case FILEID_INO32_GEN:
105 fid->i32.ino = XFS_I(inode)->i_ino; 102 fid->i32.ino = XFS_I(inode)->i_ino;
106 fid->i32.gen = inode->i_generation; 103 fid->i32.gen = inode->i_generation;
107 break; 104 break;
108 case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG: 105 case FILEID_INO32_GEN_PARENT | XFS_FILEID_TYPE_64FLAG:
109 spin_lock(&dentry->d_lock); 106 fid64->parent_ino = XFS_I(parent)->i_ino;
110 fid64->parent_ino = XFS_I(dentry->d_parent->d_inode)->i_ino; 107 fid64->parent_gen = parent->i_generation;
111 fid64->parent_gen = dentry->d_parent->d_inode->i_generation;
112 spin_unlock(&dentry->d_lock);
113 /*FALLTHRU*/ 108 /*FALLTHRU*/
114 case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG: 109 case FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG:
115 fid64->ino = XFS_I(inode)->i_ino; 110 fid64->ino = XFS_I(inode)->i_ino;
diff --git a/fs/xfs/xfs_file.c b/fs/xfs/xfs_file.c
index 8d214b87f6bb..9f7ec15a6522 100644
--- a/fs/xfs/xfs_file.c
+++ b/fs/xfs/xfs_file.c
@@ -586,8 +586,11 @@ restart:
586 * lock above. Eventually we should look into a way to avoid 586 * lock above. Eventually we should look into a way to avoid
587 * the pointless lock roundtrip. 587 * the pointless lock roundtrip.
588 */ 588 */
589 if (likely(!(file->f_mode & FMODE_NOCMTIME))) 589 if (likely(!(file->f_mode & FMODE_NOCMTIME))) {
590 file_update_time(file); 590 error = file_update_time(file);
591 if (error)
592 return error;
593 }
591 594
592 /* 595 /*
593 * If we're writing the file then make sure to clear the setuid and 596 * If we're writing the file then make sure to clear the setuid and
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 6cdbf90c6f7b..d041d47d9d86 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -505,6 +505,14 @@ xfs_inode_item_push(
505 } 505 }
506 506
507 /* 507 /*
508 * Stale inode items should force out the iclog.
509 */
510 if (ip->i_flags & XFS_ISTALE) {
511 rval = XFS_ITEM_PINNED;
512 goto out_unlock;
513 }
514
515 /*
508 * Someone else is already flushing the inode. Nothing we can do 516 * Someone else is already flushing the inode. Nothing we can do
509 * here but wait for the flush to finish and remove the item from 517 * here but wait for the flush to finish and remove the item from
510 * the AIL. 518 * the AIL.
@@ -514,15 +522,6 @@ xfs_inode_item_push(
514 goto out_unlock; 522 goto out_unlock;
515 } 523 }
516 524
517 /*
518 * Stale inode items should force out the iclog.
519 */
520 if (ip->i_flags & XFS_ISTALE) {
521 xfs_ifunlock(ip);
522 xfs_iunlock(ip, XFS_ILOCK_SHARED);
523 return XFS_ITEM_PINNED;
524 }
525
526 ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); 525 ASSERT(iip->ili_fields != 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
527 ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount)); 526 ASSERT(iip->ili_logged == 0 || XFS_FORCED_SHUTDOWN(ip->i_mount));
528 527
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index 6b965bf450e4..d90d4a388609 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -38,13 +38,21 @@
38kmem_zone_t *xfs_log_ticket_zone; 38kmem_zone_t *xfs_log_ticket_zone;
39 39
40/* Local miscellaneous function prototypes */ 40/* Local miscellaneous function prototypes */
41STATIC int xlog_commit_record(struct log *log, struct xlog_ticket *ticket, 41STATIC int
42 xlog_in_core_t **, xfs_lsn_t *); 42xlog_commit_record(
43 struct xlog *log,
44 struct xlog_ticket *ticket,
45 struct xlog_in_core **iclog,
46 xfs_lsn_t *commitlsnp);
47
43STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp, 48STATIC xlog_t * xlog_alloc_log(xfs_mount_t *mp,
44 xfs_buftarg_t *log_target, 49 xfs_buftarg_t *log_target,
45 xfs_daddr_t blk_offset, 50 xfs_daddr_t blk_offset,
46 int num_bblks); 51 int num_bblks);
47STATIC int xlog_space_left(struct log *log, atomic64_t *head); 52STATIC int
53xlog_space_left(
54 struct xlog *log,
55 atomic64_t *head);
48STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog); 56STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
49STATIC void xlog_dealloc_log(xlog_t *log); 57STATIC void xlog_dealloc_log(xlog_t *log);
50 58
@@ -64,8 +72,10 @@ STATIC void xlog_state_switch_iclogs(xlog_t *log,
64 int eventual_size); 72 int eventual_size);
65STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog); 73STATIC void xlog_state_want_sync(xlog_t *log, xlog_in_core_t *iclog);
66 74
67STATIC void xlog_grant_push_ail(struct log *log, 75STATIC void
68 int need_bytes); 76xlog_grant_push_ail(
77 struct xlog *log,
78 int need_bytes);
69STATIC void xlog_regrant_reserve_log_space(xlog_t *log, 79STATIC void xlog_regrant_reserve_log_space(xlog_t *log,
70 xlog_ticket_t *ticket); 80 xlog_ticket_t *ticket);
71STATIC void xlog_ungrant_log_space(xlog_t *log, 81STATIC void xlog_ungrant_log_space(xlog_t *log,
@@ -73,7 +83,9 @@ STATIC void xlog_ungrant_log_space(xlog_t *log,
73 83
74#if defined(DEBUG) 84#if defined(DEBUG)
75STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr); 85STATIC void xlog_verify_dest_ptr(xlog_t *log, char *ptr);
76STATIC void xlog_verify_grant_tail(struct log *log); 86STATIC void
87xlog_verify_grant_tail(
88 struct xlog *log);
77STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog, 89STATIC void xlog_verify_iclog(xlog_t *log, xlog_in_core_t *iclog,
78 int count, boolean_t syncing); 90 int count, boolean_t syncing);
79STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog, 91STATIC void xlog_verify_tail_lsn(xlog_t *log, xlog_in_core_t *iclog,
@@ -89,9 +101,9 @@ STATIC int xlog_iclogs_empty(xlog_t *log);
89 101
90static void 102static void
91xlog_grant_sub_space( 103xlog_grant_sub_space(
92 struct log *log, 104 struct xlog *log,
93 atomic64_t *head, 105 atomic64_t *head,
94 int bytes) 106 int bytes)
95{ 107{
96 int64_t head_val = atomic64_read(head); 108 int64_t head_val = atomic64_read(head);
97 int64_t new, old; 109 int64_t new, old;
@@ -115,9 +127,9 @@ xlog_grant_sub_space(
115 127
116static void 128static void
117xlog_grant_add_space( 129xlog_grant_add_space(
118 struct log *log, 130 struct xlog *log,
119 atomic64_t *head, 131 atomic64_t *head,
120 int bytes) 132 int bytes)
121{ 133{
122 int64_t head_val = atomic64_read(head); 134 int64_t head_val = atomic64_read(head);
123 int64_t new, old; 135 int64_t new, old;
@@ -165,7 +177,7 @@ xlog_grant_head_wake_all(
165 177
166static inline int 178static inline int
167xlog_ticket_reservation( 179xlog_ticket_reservation(
168 struct log *log, 180 struct xlog *log,
169 struct xlog_grant_head *head, 181 struct xlog_grant_head *head,
170 struct xlog_ticket *tic) 182 struct xlog_ticket *tic)
171{ 183{
@@ -182,7 +194,7 @@ xlog_ticket_reservation(
182 194
183STATIC bool 195STATIC bool
184xlog_grant_head_wake( 196xlog_grant_head_wake(
185 struct log *log, 197 struct xlog *log,
186 struct xlog_grant_head *head, 198 struct xlog_grant_head *head,
187 int *free_bytes) 199 int *free_bytes)
188{ 200{
@@ -204,7 +216,7 @@ xlog_grant_head_wake(
204 216
205STATIC int 217STATIC int
206xlog_grant_head_wait( 218xlog_grant_head_wait(
207 struct log *log, 219 struct xlog *log,
208 struct xlog_grant_head *head, 220 struct xlog_grant_head *head,
209 struct xlog_ticket *tic, 221 struct xlog_ticket *tic,
210 int need_bytes) 222 int need_bytes)
@@ -256,7 +268,7 @@ shutdown:
256 */ 268 */
257STATIC int 269STATIC int
258xlog_grant_head_check( 270xlog_grant_head_check(
259 struct log *log, 271 struct xlog *log,
260 struct xlog_grant_head *head, 272 struct xlog_grant_head *head,
261 struct xlog_ticket *tic, 273 struct xlog_ticket *tic,
262 int *need_bytes) 274 int *need_bytes)
@@ -323,7 +335,7 @@ xfs_log_regrant(
323 struct xfs_mount *mp, 335 struct xfs_mount *mp,
324 struct xlog_ticket *tic) 336 struct xlog_ticket *tic)
325{ 337{
326 struct log *log = mp->m_log; 338 struct xlog *log = mp->m_log;
327 int need_bytes; 339 int need_bytes;
328 int error = 0; 340 int error = 0;
329 341
@@ -389,7 +401,7 @@ xfs_log_reserve(
389 bool permanent, 401 bool permanent,
390 uint t_type) 402 uint t_type)
391{ 403{
392 struct log *log = mp->m_log; 404 struct xlog *log = mp->m_log;
393 struct xlog_ticket *tic; 405 struct xlog_ticket *tic;
394 int need_bytes; 406 int need_bytes;
395 int error = 0; 407 int error = 0;
@@ -465,7 +477,7 @@ xfs_log_done(
465 struct xlog_in_core **iclog, 477 struct xlog_in_core **iclog,
466 uint flags) 478 uint flags)
467{ 479{
468 struct log *log = mp->m_log; 480 struct xlog *log = mp->m_log;
469 xfs_lsn_t lsn = 0; 481 xfs_lsn_t lsn = 0;
470 482
471 if (XLOG_FORCED_SHUTDOWN(log) || 483 if (XLOG_FORCED_SHUTDOWN(log) ||
@@ -810,6 +822,7 @@ xfs_log_unmount_write(xfs_mount_t *mp)
810void 822void
811xfs_log_unmount(xfs_mount_t *mp) 823xfs_log_unmount(xfs_mount_t *mp)
812{ 824{
825 cancel_delayed_work_sync(&mp->m_sync_work);
813 xfs_trans_ail_destroy(mp); 826 xfs_trans_ail_destroy(mp);
814 xlog_dealloc_log(mp->m_log); 827 xlog_dealloc_log(mp->m_log);
815} 828}
@@ -838,7 +851,7 @@ void
838xfs_log_space_wake( 851xfs_log_space_wake(
839 struct xfs_mount *mp) 852 struct xfs_mount *mp)
840{ 853{
841 struct log *log = mp->m_log; 854 struct xlog *log = mp->m_log;
842 int free_bytes; 855 int free_bytes;
843 856
844 if (XLOG_FORCED_SHUTDOWN(log)) 857 if (XLOG_FORCED_SHUTDOWN(log))
@@ -916,7 +929,7 @@ xfs_lsn_t
916xlog_assign_tail_lsn_locked( 929xlog_assign_tail_lsn_locked(
917 struct xfs_mount *mp) 930 struct xfs_mount *mp)
918{ 931{
919 struct log *log = mp->m_log; 932 struct xlog *log = mp->m_log;
920 struct xfs_log_item *lip; 933 struct xfs_log_item *lip;
921 xfs_lsn_t tail_lsn; 934 xfs_lsn_t tail_lsn;
922 935
@@ -965,7 +978,7 @@ xlog_assign_tail_lsn(
965 */ 978 */
966STATIC int 979STATIC int
967xlog_space_left( 980xlog_space_left(
968 struct log *log, 981 struct xlog *log,
969 atomic64_t *head) 982 atomic64_t *head)
970{ 983{
971 int free_bytes; 984 int free_bytes;
@@ -1277,7 +1290,7 @@ out:
1277 */ 1290 */
1278STATIC int 1291STATIC int
1279xlog_commit_record( 1292xlog_commit_record(
1280 struct log *log, 1293 struct xlog *log,
1281 struct xlog_ticket *ticket, 1294 struct xlog_ticket *ticket,
1282 struct xlog_in_core **iclog, 1295 struct xlog_in_core **iclog,
1283 xfs_lsn_t *commitlsnp) 1296 xfs_lsn_t *commitlsnp)
@@ -1311,7 +1324,7 @@ xlog_commit_record(
1311 */ 1324 */
1312STATIC void 1325STATIC void
1313xlog_grant_push_ail( 1326xlog_grant_push_ail(
1314 struct log *log, 1327 struct xlog *log,
1315 int need_bytes) 1328 int need_bytes)
1316{ 1329{
1317 xfs_lsn_t threshold_lsn = 0; 1330 xfs_lsn_t threshold_lsn = 0;
@@ -1790,7 +1803,7 @@ xlog_write_start_rec(
1790 1803
1791static xlog_op_header_t * 1804static xlog_op_header_t *
1792xlog_write_setup_ophdr( 1805xlog_write_setup_ophdr(
1793 struct log *log, 1806 struct xlog *log,
1794 struct xlog_op_header *ophdr, 1807 struct xlog_op_header *ophdr,
1795 struct xlog_ticket *ticket, 1808 struct xlog_ticket *ticket,
1796 uint flags) 1809 uint flags)
@@ -1873,7 +1886,7 @@ xlog_write_setup_copy(
1873 1886
1874static int 1887static int
1875xlog_write_copy_finish( 1888xlog_write_copy_finish(
1876 struct log *log, 1889 struct xlog *log,
1877 struct xlog_in_core *iclog, 1890 struct xlog_in_core *iclog,
1878 uint flags, 1891 uint flags,
1879 int *record_cnt, 1892 int *record_cnt,
@@ -1958,7 +1971,7 @@ xlog_write_copy_finish(
1958 */ 1971 */
1959int 1972int
1960xlog_write( 1973xlog_write(
1961 struct log *log, 1974 struct xlog *log,
1962 struct xfs_log_vec *log_vector, 1975 struct xfs_log_vec *log_vector,
1963 struct xlog_ticket *ticket, 1976 struct xlog_ticket *ticket,
1964 xfs_lsn_t *start_lsn, 1977 xfs_lsn_t *start_lsn,
@@ -2821,7 +2834,7 @@ _xfs_log_force(
2821 uint flags, 2834 uint flags,
2822 int *log_flushed) 2835 int *log_flushed)
2823{ 2836{
2824 struct log *log = mp->m_log; 2837 struct xlog *log = mp->m_log;
2825 struct xlog_in_core *iclog; 2838 struct xlog_in_core *iclog;
2826 xfs_lsn_t lsn; 2839 xfs_lsn_t lsn;
2827 2840
@@ -2969,7 +2982,7 @@ _xfs_log_force_lsn(
2969 uint flags, 2982 uint flags,
2970 int *log_flushed) 2983 int *log_flushed)
2971{ 2984{
2972 struct log *log = mp->m_log; 2985 struct xlog *log = mp->m_log;
2973 struct xlog_in_core *iclog; 2986 struct xlog_in_core *iclog;
2974 int already_slept = 0; 2987 int already_slept = 0;
2975 2988
@@ -3147,12 +3160,12 @@ xfs_log_ticket_get(
3147 */ 3160 */
3148xlog_ticket_t * 3161xlog_ticket_t *
3149xlog_ticket_alloc( 3162xlog_ticket_alloc(
3150 struct log *log, 3163 struct xlog *log,
3151 int unit_bytes, 3164 int unit_bytes,
3152 int cnt, 3165 int cnt,
3153 char client, 3166 char client,
3154 bool permanent, 3167 bool permanent,
3155 int alloc_flags) 3168 xfs_km_flags_t alloc_flags)
3156{ 3169{
3157 struct xlog_ticket *tic; 3170 struct xlog_ticket *tic;
3158 uint num_headers; 3171 uint num_headers;
@@ -3278,7 +3291,7 @@ xlog_ticket_alloc(
3278 */ 3291 */
3279void 3292void
3280xlog_verify_dest_ptr( 3293xlog_verify_dest_ptr(
3281 struct log *log, 3294 struct xlog *log,
3282 char *ptr) 3295 char *ptr)
3283{ 3296{
3284 int i; 3297 int i;
@@ -3307,7 +3320,7 @@ xlog_verify_dest_ptr(
3307 */ 3320 */
3308STATIC void 3321STATIC void
3309xlog_verify_grant_tail( 3322xlog_verify_grant_tail(
3310 struct log *log) 3323 struct xlog *log)
3311{ 3324{
3312 int tail_cycle, tail_blocks; 3325 int tail_cycle, tail_blocks;
3313 int cycle, space; 3326 int cycle, space;
diff --git a/fs/xfs/xfs_log_cil.c b/fs/xfs/xfs_log_cil.c
index 7d6197c58493..ddc4529d07d3 100644
--- a/fs/xfs/xfs_log_cil.c
+++ b/fs/xfs/xfs_log_cil.c
@@ -44,7 +44,7 @@
44 */ 44 */
45static struct xlog_ticket * 45static struct xlog_ticket *
46xlog_cil_ticket_alloc( 46xlog_cil_ticket_alloc(
47 struct log *log) 47 struct xlog *log)
48{ 48{
49 struct xlog_ticket *tic; 49 struct xlog_ticket *tic;
50 50
@@ -72,7 +72,7 @@ xlog_cil_ticket_alloc(
72 */ 72 */
73void 73void
74xlog_cil_init_post_recovery( 74xlog_cil_init_post_recovery(
75 struct log *log) 75 struct xlog *log)
76{ 76{
77 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log); 77 log->l_cilp->xc_ctx->ticket = xlog_cil_ticket_alloc(log);
78 log->l_cilp->xc_ctx->sequence = 1; 78 log->l_cilp->xc_ctx->sequence = 1;
@@ -182,7 +182,7 @@ xlog_cil_prepare_log_vecs(
182 */ 182 */
183STATIC void 183STATIC void
184xfs_cil_prepare_item( 184xfs_cil_prepare_item(
185 struct log *log, 185 struct xlog *log,
186 struct xfs_log_vec *lv, 186 struct xfs_log_vec *lv,
187 int *len, 187 int *len,
188 int *diff_iovecs) 188 int *diff_iovecs)
@@ -231,7 +231,7 @@ xfs_cil_prepare_item(
231 */ 231 */
232static void 232static void
233xlog_cil_insert_items( 233xlog_cil_insert_items(
234 struct log *log, 234 struct xlog *log,
235 struct xfs_log_vec *log_vector, 235 struct xfs_log_vec *log_vector,
236 struct xlog_ticket *ticket) 236 struct xlog_ticket *ticket)
237{ 237{
@@ -373,7 +373,7 @@ xlog_cil_committed(
373 */ 373 */
374STATIC int 374STATIC int
375xlog_cil_push( 375xlog_cil_push(
376 struct log *log) 376 struct xlog *log)
377{ 377{
378 struct xfs_cil *cil = log->l_cilp; 378 struct xfs_cil *cil = log->l_cilp;
379 struct xfs_log_vec *lv; 379 struct xfs_log_vec *lv;
@@ -601,7 +601,7 @@ xlog_cil_push_work(
601 */ 601 */
602static void 602static void
603xlog_cil_push_background( 603xlog_cil_push_background(
604 struct log *log) 604 struct xlog *log)
605{ 605{
606 struct xfs_cil *cil = log->l_cilp; 606 struct xfs_cil *cil = log->l_cilp;
607 607
@@ -629,7 +629,7 @@ xlog_cil_push_background(
629 629
630static void 630static void
631xlog_cil_push_foreground( 631xlog_cil_push_foreground(
632 struct log *log, 632 struct xlog *log,
633 xfs_lsn_t push_seq) 633 xfs_lsn_t push_seq)
634{ 634{
635 struct xfs_cil *cil = log->l_cilp; 635 struct xfs_cil *cil = log->l_cilp;
@@ -683,7 +683,7 @@ xfs_log_commit_cil(
683 xfs_lsn_t *commit_lsn, 683 xfs_lsn_t *commit_lsn,
684 int flags) 684 int flags)
685{ 685{
686 struct log *log = mp->m_log; 686 struct xlog *log = mp->m_log;
687 int log_flags = 0; 687 int log_flags = 0;
688 struct xfs_log_vec *log_vector; 688 struct xfs_log_vec *log_vector;
689 689
@@ -754,7 +754,7 @@ xfs_log_commit_cil(
754 */ 754 */
755xfs_lsn_t 755xfs_lsn_t
756xlog_cil_force_lsn( 756xlog_cil_force_lsn(
757 struct log *log, 757 struct xlog *log,
758 xfs_lsn_t sequence) 758 xfs_lsn_t sequence)
759{ 759{
760 struct xfs_cil *cil = log->l_cilp; 760 struct xfs_cil *cil = log->l_cilp;
@@ -833,7 +833,7 @@ xfs_log_item_in_current_chkpt(
833 */ 833 */
834int 834int
835xlog_cil_init( 835xlog_cil_init(
836 struct log *log) 836 struct xlog *log)
837{ 837{
838 struct xfs_cil *cil; 838 struct xfs_cil *cil;
839 struct xfs_cil_ctx *ctx; 839 struct xfs_cil_ctx *ctx;
@@ -869,7 +869,7 @@ xlog_cil_init(
869 869
870void 870void
871xlog_cil_destroy( 871xlog_cil_destroy(
872 struct log *log) 872 struct xlog *log)
873{ 873{
874 if (log->l_cilp->xc_ctx) { 874 if (log->l_cilp->xc_ctx) {
875 if (log->l_cilp->xc_ctx->ticket) 875 if (log->l_cilp->xc_ctx->ticket)
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 735ff1ee53da..72eba2201b14 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -19,7 +19,7 @@
19#define __XFS_LOG_PRIV_H__ 19#define __XFS_LOG_PRIV_H__
20 20
21struct xfs_buf; 21struct xfs_buf;
22struct log; 22struct xlog;
23struct xlog_ticket; 23struct xlog_ticket;
24struct xfs_mount; 24struct xfs_mount;
25 25
@@ -352,7 +352,7 @@ typedef struct xlog_in_core {
352 struct xlog_in_core *ic_next; 352 struct xlog_in_core *ic_next;
353 struct xlog_in_core *ic_prev; 353 struct xlog_in_core *ic_prev;
354 struct xfs_buf *ic_bp; 354 struct xfs_buf *ic_bp;
355 struct log *ic_log; 355 struct xlog *ic_log;
356 int ic_size; 356 int ic_size;
357 int ic_offset; 357 int ic_offset;
358 int ic_bwritecnt; 358 int ic_bwritecnt;
@@ -409,7 +409,7 @@ struct xfs_cil_ctx {
409 * operations almost as efficient as the old logging methods. 409 * operations almost as efficient as the old logging methods.
410 */ 410 */
411struct xfs_cil { 411struct xfs_cil {
412 struct log *xc_log; 412 struct xlog *xc_log;
413 struct list_head xc_cil; 413 struct list_head xc_cil;
414 spinlock_t xc_cil_lock; 414 spinlock_t xc_cil_lock;
415 struct xfs_cil_ctx *xc_ctx; 415 struct xfs_cil_ctx *xc_ctx;
@@ -487,7 +487,7 @@ struct xlog_grant_head {
487 * overflow 31 bits worth of byte offset, so using a byte number will mean 487 * overflow 31 bits worth of byte offset, so using a byte number will mean
488 * that round off problems won't occur when releasing partial reservations. 488 * that round off problems won't occur when releasing partial reservations.
489 */ 489 */
490typedef struct log { 490typedef struct xlog {
491 /* The following fields don't need locking */ 491 /* The following fields don't need locking */
492 struct xfs_mount *l_mp; /* mount point */ 492 struct xfs_mount *l_mp; /* mount point */
493 struct xfs_ail *l_ailp; /* AIL log is working with */ 493 struct xfs_ail *l_ailp; /* AIL log is working with */
@@ -553,9 +553,14 @@ extern int xlog_recover_finish(xlog_t *log);
553extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int); 553extern void xlog_pack_data(xlog_t *log, xlog_in_core_t *iclog, int);
554 554
555extern kmem_zone_t *xfs_log_ticket_zone; 555extern kmem_zone_t *xfs_log_ticket_zone;
556struct xlog_ticket *xlog_ticket_alloc(struct log *log, int unit_bytes, 556struct xlog_ticket *
557 int count, char client, bool permanent, 557xlog_ticket_alloc(
558 int alloc_flags); 558 struct xlog *log,
559 int unit_bytes,
560 int count,
561 char client,
562 bool permanent,
563 xfs_km_flags_t alloc_flags);
559 564
560 565
561static inline void 566static inline void
@@ -567,9 +572,14 @@ xlog_write_adv_cnt(void **ptr, int *len, int *off, size_t bytes)
567} 572}
568 573
569void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket); 574void xlog_print_tic_res(struct xfs_mount *mp, struct xlog_ticket *ticket);
570int xlog_write(struct log *log, struct xfs_log_vec *log_vector, 575int
571 struct xlog_ticket *tic, xfs_lsn_t *start_lsn, 576xlog_write(
572 xlog_in_core_t **commit_iclog, uint flags); 577 struct xlog *log,
578 struct xfs_log_vec *log_vector,
579 struct xlog_ticket *tic,
580 xfs_lsn_t *start_lsn,
581 struct xlog_in_core **commit_iclog,
582 uint flags);
573 583
574/* 584/*
575 * When we crack an atomic LSN, we sample it first so that the value will not 585 * When we crack an atomic LSN, we sample it first so that the value will not
@@ -629,17 +639,23 @@ xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
629/* 639/*
630 * Committed Item List interfaces 640 * Committed Item List interfaces
631 */ 641 */
632int xlog_cil_init(struct log *log); 642int
633void xlog_cil_init_post_recovery(struct log *log); 643xlog_cil_init(struct xlog *log);
634void xlog_cil_destroy(struct log *log); 644void
645xlog_cil_init_post_recovery(struct xlog *log);
646void
647xlog_cil_destroy(struct xlog *log);
635 648
636/* 649/*
637 * CIL force routines 650 * CIL force routines
638 */ 651 */
639xfs_lsn_t xlog_cil_force_lsn(struct log *log, xfs_lsn_t sequence); 652xfs_lsn_t
653xlog_cil_force_lsn(
654 struct xlog *log,
655 xfs_lsn_t sequence);
640 656
641static inline void 657static inline void
642xlog_cil_force(struct log *log) 658xlog_cil_force(struct xlog *log)
643{ 659{
644 xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence); 660 xlog_cil_force_lsn(log, log->l_cilp->xc_current_sequence);
645} 661}
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index ca386909131a..a7be98abd6a9 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1471,8 +1471,8 @@ xlog_recover_add_item(
1471 1471
1472STATIC int 1472STATIC int
1473xlog_recover_add_to_cont_trans( 1473xlog_recover_add_to_cont_trans(
1474 struct log *log, 1474 struct xlog *log,
1475 xlog_recover_t *trans, 1475 struct xlog_recover *trans,
1476 xfs_caddr_t dp, 1476 xfs_caddr_t dp,
1477 int len) 1477 int len)
1478{ 1478{
@@ -1517,8 +1517,8 @@ xlog_recover_add_to_cont_trans(
1517 */ 1517 */
1518STATIC int 1518STATIC int
1519xlog_recover_add_to_trans( 1519xlog_recover_add_to_trans(
1520 struct log *log, 1520 struct xlog *log,
1521 xlog_recover_t *trans, 1521 struct xlog_recover *trans,
1522 xfs_caddr_t dp, 1522 xfs_caddr_t dp,
1523 int len) 1523 int len)
1524{ 1524{
@@ -1588,8 +1588,8 @@ xlog_recover_add_to_trans(
1588 */ 1588 */
1589STATIC int 1589STATIC int
1590xlog_recover_reorder_trans( 1590xlog_recover_reorder_trans(
1591 struct log *log, 1591 struct xlog *log,
1592 xlog_recover_t *trans, 1592 struct xlog_recover *trans,
1593 int pass) 1593 int pass)
1594{ 1594{
1595 xlog_recover_item_t *item, *n; 1595 xlog_recover_item_t *item, *n;
@@ -1642,8 +1642,8 @@ xlog_recover_reorder_trans(
1642 */ 1642 */
1643STATIC int 1643STATIC int
1644xlog_recover_buffer_pass1( 1644xlog_recover_buffer_pass1(
1645 struct log *log, 1645 struct xlog *log,
1646 xlog_recover_item_t *item) 1646 struct xlog_recover_item *item)
1647{ 1647{
1648 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr; 1648 xfs_buf_log_format_t *buf_f = item->ri_buf[0].i_addr;
1649 struct list_head *bucket; 1649 struct list_head *bucket;
@@ -1696,7 +1696,7 @@ xlog_recover_buffer_pass1(
1696 */ 1696 */
1697STATIC int 1697STATIC int
1698xlog_check_buffer_cancelled( 1698xlog_check_buffer_cancelled(
1699 struct log *log, 1699 struct xlog *log,
1700 xfs_daddr_t blkno, 1700 xfs_daddr_t blkno,
1701 uint len, 1701 uint len,
1702 ushort flags) 1702 ushort flags)
@@ -2689,9 +2689,9 @@ xlog_recover_free_trans(
2689 2689
2690STATIC int 2690STATIC int
2691xlog_recover_commit_pass1( 2691xlog_recover_commit_pass1(
2692 struct log *log, 2692 struct xlog *log,
2693 struct xlog_recover *trans, 2693 struct xlog_recover *trans,
2694 xlog_recover_item_t *item) 2694 struct xlog_recover_item *item)
2695{ 2695{
2696 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1); 2696 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS1);
2697 2697
@@ -2716,10 +2716,10 @@ xlog_recover_commit_pass1(
2716 2716
2717STATIC int 2717STATIC int
2718xlog_recover_commit_pass2( 2718xlog_recover_commit_pass2(
2719 struct log *log, 2719 struct xlog *log,
2720 struct xlog_recover *trans, 2720 struct xlog_recover *trans,
2721 struct list_head *buffer_list, 2721 struct list_head *buffer_list,
2722 xlog_recover_item_t *item) 2722 struct xlog_recover_item *item)
2723{ 2723{
2724 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2); 2724 trace_xfs_log_recover_item_recover(log, trans, item, XLOG_RECOVER_PASS2);
2725 2725
@@ -2753,7 +2753,7 @@ xlog_recover_commit_pass2(
2753 */ 2753 */
2754STATIC int 2754STATIC int
2755xlog_recover_commit_trans( 2755xlog_recover_commit_trans(
2756 struct log *log, 2756 struct xlog *log,
2757 struct xlog_recover *trans, 2757 struct xlog_recover *trans,
2758 int pass) 2758 int pass)
2759{ 2759{
@@ -2793,8 +2793,8 @@ out:
2793 2793
2794STATIC int 2794STATIC int
2795xlog_recover_unmount_trans( 2795xlog_recover_unmount_trans(
2796 struct log *log, 2796 struct xlog *log,
2797 xlog_recover_t *trans) 2797 struct xlog_recover *trans)
2798{ 2798{
2799 /* Do nothing now */ 2799 /* Do nothing now */
2800 xfs_warn(log->l_mp, "%s: Unmount LR", __func__); 2800 xfs_warn(log->l_mp, "%s: Unmount LR", __func__);
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 8b89c5ac72d9..90c1fc9eaea4 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -53,7 +53,7 @@ typedef struct xfs_trans_reservations {
53 53
54#include "xfs_sync.h" 54#include "xfs_sync.h"
55 55
56struct log; 56struct xlog;
57struct xfs_mount_args; 57struct xfs_mount_args;
58struct xfs_inode; 58struct xfs_inode;
59struct xfs_bmbt_irec; 59struct xfs_bmbt_irec;
@@ -133,7 +133,7 @@ typedef struct xfs_mount {
133 uint m_readio_blocks; /* min read size blocks */ 133 uint m_readio_blocks; /* min read size blocks */
134 uint m_writeio_log; /* min write size log bytes */ 134 uint m_writeio_log; /* min write size log bytes */
135 uint m_writeio_blocks; /* min write size blocks */ 135 uint m_writeio_blocks; /* min write size blocks */
136 struct log *m_log; /* log specific stuff */ 136 struct xlog *m_log; /* log specific stuff */
137 int m_logbufs; /* number of log buffers */ 137 int m_logbufs; /* number of log buffers */
138 int m_logbsize; /* size of each log buffer */ 138 int m_logbsize; /* size of each log buffer */
139 uint m_rsumlevels; /* rt summary levels */ 139 uint m_rsumlevels; /* rt summary levels */
diff --git a/fs/xfs/xfs_sync.c b/fs/xfs/xfs_sync.c
index c9d3409c5ca3..1e9ee064dbb2 100644
--- a/fs/xfs/xfs_sync.c
+++ b/fs/xfs/xfs_sync.c
@@ -386,23 +386,23 @@ xfs_sync_worker(
386 * We shouldn't write/force the log if we are in the mount/unmount 386 * We shouldn't write/force the log if we are in the mount/unmount
387 * process or on a read only filesystem. The workqueue still needs to be 387 * process or on a read only filesystem. The workqueue still needs to be
388 * active in both cases, however, because it is used for inode reclaim 388 * active in both cases, however, because it is used for inode reclaim
389 * during these times. Use the s_umount semaphore to provide exclusion 389 * during these times. Use the MS_ACTIVE flag to avoid doing anything
390 * with unmount. 390 * during mount. Doing work during unmount is avoided by calling
391 * cancel_delayed_work_sync on this work queue before tearing down
392 * the ail and the log in xfs_log_unmount.
391 */ 393 */
392 if (down_read_trylock(&mp->m_super->s_umount)) { 394 if (!(mp->m_super->s_flags & MS_ACTIVE) &&
393 if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { 395 !(mp->m_flags & XFS_MOUNT_RDONLY)) {
394 /* dgc: errors ignored here */ 396 /* dgc: errors ignored here */
395 if (mp->m_super->s_frozen == SB_UNFROZEN && 397 if (mp->m_super->s_frozen == SB_UNFROZEN &&
396 xfs_log_need_covered(mp)) 398 xfs_log_need_covered(mp))
397 error = xfs_fs_log_dummy(mp); 399 error = xfs_fs_log_dummy(mp);
398 else 400 else
399 xfs_log_force(mp, 0); 401 xfs_log_force(mp, 0);
400 402
401 /* start pushing all the metadata that is currently 403 /* start pushing all the metadata that is currently
402 * dirty */ 404 * dirty */
403 xfs_ail_push_all(mp->m_ail); 405 xfs_ail_push_all(mp->m_ail);
404 }
405 up_read(&mp->m_super->s_umount);
406 } 406 }
407 407
408 /* queue us up again */ 408 /* queue us up again */
diff --git a/fs/xfs/xfs_trace.h b/fs/xfs/xfs_trace.h
index 7cf9d3529e51..caf5dabfd553 100644
--- a/fs/xfs/xfs_trace.h
+++ b/fs/xfs/xfs_trace.h
@@ -32,7 +32,7 @@ struct xfs_da_node_entry;
32struct xfs_dquot; 32struct xfs_dquot;
33struct xfs_log_item; 33struct xfs_log_item;
34struct xlog_ticket; 34struct xlog_ticket;
35struct log; 35struct xlog;
36struct xlog_recover; 36struct xlog_recover;
37struct xlog_recover_item; 37struct xlog_recover_item;
38struct xfs_buf_log_format; 38struct xfs_buf_log_format;
@@ -762,7 +762,7 @@ DEFINE_DQUOT_EVENT(xfs_dqflush_force);
762DEFINE_DQUOT_EVENT(xfs_dqflush_done); 762DEFINE_DQUOT_EVENT(xfs_dqflush_done);
763 763
764DECLARE_EVENT_CLASS(xfs_loggrant_class, 764DECLARE_EVENT_CLASS(xfs_loggrant_class,
765 TP_PROTO(struct log *log, struct xlog_ticket *tic), 765 TP_PROTO(struct xlog *log, struct xlog_ticket *tic),
766 TP_ARGS(log, tic), 766 TP_ARGS(log, tic),
767 TP_STRUCT__entry( 767 TP_STRUCT__entry(
768 __field(dev_t, dev) 768 __field(dev_t, dev)
@@ -830,7 +830,7 @@ DECLARE_EVENT_CLASS(xfs_loggrant_class,
830 830
831#define DEFINE_LOGGRANT_EVENT(name) \ 831#define DEFINE_LOGGRANT_EVENT(name) \
832DEFINE_EVENT(xfs_loggrant_class, name, \ 832DEFINE_EVENT(xfs_loggrant_class, name, \
833 TP_PROTO(struct log *log, struct xlog_ticket *tic), \ 833 TP_PROTO(struct xlog *log, struct xlog_ticket *tic), \
834 TP_ARGS(log, tic)) 834 TP_ARGS(log, tic))
835DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm); 835DEFINE_LOGGRANT_EVENT(xfs_log_done_nonperm);
836DEFINE_LOGGRANT_EVENT(xfs_log_done_perm); 836DEFINE_LOGGRANT_EVENT(xfs_log_done_perm);
@@ -1664,7 +1664,7 @@ DEFINE_SWAPEXT_EVENT(xfs_swap_extent_before);
1664DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after); 1664DEFINE_SWAPEXT_EVENT(xfs_swap_extent_after);
1665 1665
1666DECLARE_EVENT_CLASS(xfs_log_recover_item_class, 1666DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
1667 TP_PROTO(struct log *log, struct xlog_recover *trans, 1667 TP_PROTO(struct xlog *log, struct xlog_recover *trans,
1668 struct xlog_recover_item *item, int pass), 1668 struct xlog_recover_item *item, int pass),
1669 TP_ARGS(log, trans, item, pass), 1669 TP_ARGS(log, trans, item, pass),
1670 TP_STRUCT__entry( 1670 TP_STRUCT__entry(
@@ -1698,7 +1698,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_item_class,
1698 1698
1699#define DEFINE_LOG_RECOVER_ITEM(name) \ 1699#define DEFINE_LOG_RECOVER_ITEM(name) \
1700DEFINE_EVENT(xfs_log_recover_item_class, name, \ 1700DEFINE_EVENT(xfs_log_recover_item_class, name, \
1701 TP_PROTO(struct log *log, struct xlog_recover *trans, \ 1701 TP_PROTO(struct xlog *log, struct xlog_recover *trans, \
1702 struct xlog_recover_item *item, int pass), \ 1702 struct xlog_recover_item *item, int pass), \
1703 TP_ARGS(log, trans, item, pass)) 1703 TP_ARGS(log, trans, item, pass))
1704 1704
@@ -1709,7 +1709,7 @@ DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_reorder_tail);
1709DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover); 1709DEFINE_LOG_RECOVER_ITEM(xfs_log_recover_item_recover);
1710 1710
1711DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class, 1711DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
1712 TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), 1712 TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f),
1713 TP_ARGS(log, buf_f), 1713 TP_ARGS(log, buf_f),
1714 TP_STRUCT__entry( 1714 TP_STRUCT__entry(
1715 __field(dev_t, dev) 1715 __field(dev_t, dev)
@@ -1739,7 +1739,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_buf_item_class,
1739 1739
1740#define DEFINE_LOG_RECOVER_BUF_ITEM(name) \ 1740#define DEFINE_LOG_RECOVER_BUF_ITEM(name) \
1741DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \ 1741DEFINE_EVENT(xfs_log_recover_buf_item_class, name, \
1742 TP_PROTO(struct log *log, struct xfs_buf_log_format *buf_f), \ 1742 TP_PROTO(struct xlog *log, struct xfs_buf_log_format *buf_f), \
1743 TP_ARGS(log, buf_f)) 1743 TP_ARGS(log, buf_f))
1744 1744
1745DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel); 1745DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_not_cancel);
@@ -1752,7 +1752,7 @@ DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_reg_buf);
1752DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf); 1752DEFINE_LOG_RECOVER_BUF_ITEM(xfs_log_recover_buf_dquot_buf);
1753 1753
1754DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class, 1754DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
1755 TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), 1755 TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f),
1756 TP_ARGS(log, in_f), 1756 TP_ARGS(log, in_f),
1757 TP_STRUCT__entry( 1757 TP_STRUCT__entry(
1758 __field(dev_t, dev) 1758 __field(dev_t, dev)
@@ -1790,7 +1790,7 @@ DECLARE_EVENT_CLASS(xfs_log_recover_ino_item_class,
1790) 1790)
1791#define DEFINE_LOG_RECOVER_INO_ITEM(name) \ 1791#define DEFINE_LOG_RECOVER_INO_ITEM(name) \
1792DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \ 1792DEFINE_EVENT(xfs_log_recover_ino_item_class, name, \
1793 TP_PROTO(struct log *log, struct xfs_inode_log_format *in_f), \ 1793 TP_PROTO(struct xlog *log, struct xfs_inode_log_format *in_f), \
1794 TP_ARGS(log, in_f)) 1794 TP_ARGS(log, in_f))
1795 1795
1796DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover); 1796DEFINE_LOG_RECOVER_INO_ITEM(xfs_log_recover_inode_recover);
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index cdf896fcbfa4..fdf324508c5e 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -584,7 +584,7 @@ xfs_trans_t *
584_xfs_trans_alloc( 584_xfs_trans_alloc(
585 xfs_mount_t *mp, 585 xfs_mount_t *mp,
586 uint type, 586 uint type,
587 uint memflags) 587 xfs_km_flags_t memflags)
588{ 588{
589 xfs_trans_t *tp; 589 xfs_trans_t *tp;
590 590
diff --git a/fs/xfs/xfs_trans.h b/fs/xfs/xfs_trans.h
index 7ab99e1898c8..7c37b533aa8e 100644
--- a/fs/xfs/xfs_trans.h
+++ b/fs/xfs/xfs_trans.h
@@ -443,7 +443,7 @@ typedef struct xfs_trans {
443 * XFS transaction mechanism exported interfaces. 443 * XFS transaction mechanism exported interfaces.
444 */ 444 */
445xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint); 445xfs_trans_t *xfs_trans_alloc(struct xfs_mount *, uint);
446xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, uint); 446xfs_trans_t *_xfs_trans_alloc(struct xfs_mount *, uint, xfs_km_flags_t);
447xfs_trans_t *xfs_trans_dup(xfs_trans_t *); 447xfs_trans_t *xfs_trans_dup(xfs_trans_t *);
448int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint, 448int xfs_trans_reserve(xfs_trans_t *, uint, uint, uint,
449 uint, uint); 449 uint, uint);
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index b0d62820ada1..9e6e1c6eb60a 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -440,8 +440,8 @@ static inline int acpi_pm_device_sleep_wake(struct device *dev, bool enable)
440 440
441#else /* CONFIG_ACPI */ 441#else /* CONFIG_ACPI */
442 442
443static int register_acpi_bus_type(struct acpi_bus_type *bus) { return 0; } 443static inline int register_acpi_bus_type(void *bus) { return 0; }
444static int unregister_acpi_bus_type(struct acpi_bus_type *bus) { return 0; } 444static inline int unregister_acpi_bus_type(void *bus) { return 0; }
445 445
446#endif /* CONFIG_ACPI */ 446#endif /* CONFIG_ACPI */
447 447
diff --git a/include/asm-generic/Kbuild b/include/asm-generic/Kbuild
index 53f91b1ae53a..2c85a0f647b7 100644
--- a/include/asm-generic/Kbuild
+++ b/include/asm-generic/Kbuild
@@ -8,6 +8,7 @@ header-y += int-ll64.h
8header-y += ioctl.h 8header-y += ioctl.h
9header-y += ioctls.h 9header-y += ioctls.h
10header-y += ipcbuf.h 10header-y += ipcbuf.h
11header-y += kvm_para.h
11header-y += mman-common.h 12header-y += mman-common.h
12header-y += mman.h 13header-y += mman.h
13header-y += msgbuf.h 14header-y += msgbuf.h
diff --git a/include/asm-generic/bitsperlong.h b/include/asm-generic/bitsperlong.h
index 4ae54e07de83..a7b0914348fd 100644
--- a/include/asm-generic/bitsperlong.h
+++ b/include/asm-generic/bitsperlong.h
@@ -28,5 +28,9 @@
28#error Inconsistent word size. Check asm/bitsperlong.h 28#error Inconsistent word size. Check asm/bitsperlong.h
29#endif 29#endif
30 30
31#ifndef BITS_PER_LONG_LONG
32#define BITS_PER_LONG_LONG 64
33#endif
34
31#endif /* __KERNEL__ */ 35#endif /* __KERNEL__ */
32#endif /* __ASM_GENERIC_BITS_PER_LONG */ 36#endif /* __ASM_GENERIC_BITS_PER_LONG */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 2520a6e241dc..506ec19a3736 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -31,6 +31,9 @@ struct bug_entry {
31 31
32#endif /* CONFIG_GENERIC_BUG */ 32#endif /* CONFIG_GENERIC_BUG */
33 33
34#ifndef __ASSEMBLY__
35#include <linux/kernel.h>
36
34/* 37/*
35 * Don't use BUG() or BUG_ON() unless there's really no way out; one 38 * Don't use BUG() or BUG_ON() unless there's really no way out; one
36 * example might be detecting data structure corruption in the middle 39 * example might be detecting data structure corruption in the middle
@@ -60,7 +63,6 @@ struct bug_entry {
60 * to provide better diagnostics. 63 * to provide better diagnostics.
61 */ 64 */
62#ifndef __WARN_TAINT 65#ifndef __WARN_TAINT
63#ifndef __ASSEMBLY__
64extern __printf(3, 4) 66extern __printf(3, 4)
65void warn_slowpath_fmt(const char *file, const int line, 67void warn_slowpath_fmt(const char *file, const int line,
66 const char *fmt, ...); 68 const char *fmt, ...);
@@ -69,7 +71,6 @@ void warn_slowpath_fmt_taint(const char *file, const int line, unsigned taint,
69 const char *fmt, ...); 71 const char *fmt, ...);
70extern void warn_slowpath_null(const char *file, const int line); 72extern void warn_slowpath_null(const char *file, const int line);
71#define WANT_WARN_ON_SLOWPATH 73#define WANT_WARN_ON_SLOWPATH
72#endif
73#define __WARN() warn_slowpath_null(__FILE__, __LINE__) 74#define __WARN() warn_slowpath_null(__FILE__, __LINE__)
74#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg) 75#define __WARN_printf(arg...) warn_slowpath_fmt(__FILE__, __LINE__, arg)
75#define __WARN_printf_taint(taint, arg...) \ 76#define __WARN_printf_taint(taint, arg...) \
@@ -202,4 +203,6 @@ extern void warn_slowpath_null(const char *file, const int line);
202# define WARN_ON_SMP(x) ({0;}) 203# define WARN_ON_SMP(x) ({0;})
203#endif 204#endif
204 205
206#endif /* __ASSEMBLY__ */
207
205#endif 208#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 6f2b45a9b6bc..ff4947b7a976 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -484,6 +484,16 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
484 /* 484 /*
485 * The barrier will stabilize the pmdval in a register or on 485 * The barrier will stabilize the pmdval in a register or on
486 * the stack so that it will stop changing under the code. 486 * the stack so that it will stop changing under the code.
487 *
488 * When CONFIG_TRANSPARENT_HUGEPAGE=y on x86 32bit PAE,
489 * pmd_read_atomic is allowed to return a not atomic pmdval
490 * (for example pointing to an hugepage that has never been
491 * mapped in the pmd). The below checks will only care about
492 * the low part of the pmd with 32bit PAE x86 anyway, with the
493 * exception of pmd_none(). So the important thing is that if
494 * the low part of the pmd is found null, the high part will
495 * be also null or the pmd_none() check below would be
496 * confused.
487 */ 497 */
488#ifdef CONFIG_TRANSPARENT_HUGEPAGE 498#ifdef CONFIG_TRANSPARENT_HUGEPAGE
489 barrier(); 499 barrier();
diff --git a/include/asm-generic/posix_types.h b/include/asm-generic/posix_types.h
index 91d44bd4dde3..fe74fccf18db 100644
--- a/include/asm-generic/posix_types.h
+++ b/include/asm-generic/posix_types.h
@@ -23,10 +23,6 @@ typedef __kernel_ulong_t __kernel_ino_t;
23typedef unsigned int __kernel_mode_t; 23typedef unsigned int __kernel_mode_t;
24#endif 24#endif
25 25
26#ifndef __kernel_nlink_t
27typedef __kernel_ulong_t __kernel_nlink_t;
28#endif
29
30#ifndef __kernel_pid_t 26#ifndef __kernel_pid_t
31typedef int __kernel_pid_t; 27typedef int __kernel_pid_t;
32#endif 28#endif
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 73e45600f95d..bac55c215113 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -54,7 +54,7 @@ struct drm_mode_object {
54 struct drm_object_properties *properties; 54 struct drm_object_properties *properties;
55}; 55};
56 56
57#define DRM_OBJECT_MAX_PROPERTY 16 57#define DRM_OBJECT_MAX_PROPERTY 24
58struct drm_object_properties { 58struct drm_object_properties {
59 int count; 59 int count;
60 uint32_t ids[DRM_OBJECT_MAX_PROPERTY]; 60 uint32_t ids[DRM_OBJECT_MAX_PROPERTY];
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
index 6bd325fedc87..19a240446fca 100644
--- a/include/drm/drm_mem_util.h
+++ b/include/drm/drm_mem_util.h
@@ -31,7 +31,7 @@
31 31
32static __inline__ void *drm_calloc_large(size_t nmemb, size_t size) 32static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
33{ 33{
34 if (size != 0 && nmemb > ULONG_MAX / size) 34 if (size != 0 && nmemb > SIZE_MAX / size)
35 return NULL; 35 return NULL;
36 36
37 if (size * nmemb <= PAGE_SIZE) 37 if (size * nmemb <= PAGE_SIZE)
@@ -44,7 +44,7 @@ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
44/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */ 44/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
45static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size) 45static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
46{ 46{
47 if (size != 0 && nmemb > ULONG_MAX / size) 47 if (size != 0 && nmemb > SIZE_MAX / size)
48 return NULL; 48 return NULL;
49 49
50 if (size * nmemb <= PAGE_SIZE) 50 if (size * nmemb <= PAGE_SIZE)
diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h
index 58d0bdab68dd..a7aec391b7b7 100644
--- a/include/drm/drm_pciids.h
+++ b/include/drm/drm_pciids.h
@@ -1,7 +1,3 @@
1/*
2 This file is auto-generated from the drm_pciids.txt in the DRM CVS
3 Please contact dri-devel@lists.sf.net to add new cards to this list
4*/
5#define radeon_PCI_IDS \ 1#define radeon_PCI_IDS \
6 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \ 2 {0x1002, 0x3150, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY}, \
7 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 3 {0x1002, 0x3151, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RV380|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
@@ -181,6 +177,7 @@
181 {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 177 {0x1002, 0x6747, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
182 {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 178 {0x1002, 0x6748, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
183 {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 179 {0x1002, 0x6749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
180 {0x1002, 0x674A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
184 {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 181 {0x1002, 0x6750, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
185 {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 182 {0x1002, 0x6751, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
186 {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \ 183 {0x1002, 0x6758, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_TURKS|RADEON_NEW_MEMMAP}, \
@@ -198,6 +195,7 @@
198 {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 195 {0x1002, 0x6767, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
199 {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 196 {0x1002, 0x6768, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
200 {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 197 {0x1002, 0x6770, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
198 {0x1002, 0x6771, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
201 {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 199 {0x1002, 0x6772, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
202 {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 200 {0x1002, 0x6778, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
203 {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \ 201 {0x1002, 0x6779, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_CAICOS|RADEON_NEW_MEMMAP}, \
@@ -229,10 +227,11 @@
229 {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 227 {0x1002, 0x6827, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
230 {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 228 {0x1002, 0x6828, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
231 {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 229 {0x1002, 0x6829, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
230 {0x1002, 0x682B, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
232 {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 231 {0x1002, 0x682D, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
233 {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \ 232 {0x1002, 0x682F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
234 {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 233 {0x1002, 0x6830, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
235 {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 234 {0x1002, 0x6831, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
236 {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 235 {0x1002, 0x6837, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
237 {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 236 {0x1002, 0x6838, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
238 {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \ 237 {0x1002, 0x6839, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_VERDE|RADEON_NEW_MEMMAP}, \
@@ -531,6 +530,7 @@
531 {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 530 {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
532 {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ 531 {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
533 {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ 532 {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
533 {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\
534 {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 534 {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
535 {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 535 {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
536 {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 536 {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -550,6 +550,7 @@
550 {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 550 {0x1002, 0x9807, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
551 {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 551 {0x1002, 0x9808, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
552 {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 552 {0x1002, 0x9809, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
553 {0x1002, 0x980A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_PALM|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
553 {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 554 {0x1002, 0x9900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
554 {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 555 {0x1002, 0x9901, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
555 {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 556 {0x1002, 0x9903, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
@@ -561,11 +562,19 @@
561 {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 562 {0x1002, 0x9909, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
562 {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 563 {0x1002, 0x990A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
563 {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 564 {0x1002, 0x990F, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
565 {0x1002, 0x9910, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
566 {0x1002, 0x9913, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
567 {0x1002, 0x9917, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
568 {0x1002, 0x9918, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
569 {0x1002, 0x9919, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
564 {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 570 {0x1002, 0x9990, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
565 {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 571 {0x1002, 0x9991, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
566 {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 572 {0x1002, 0x9992, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
567 {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 573 {0x1002, 0x9993, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
568 {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ 574 {0x1002, 0x9994, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
575 {0x1002, 0x99A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
576 {0x1002, 0x99A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
577 {0x1002, 0x99A4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_ARUBA|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \
569 {0, 0, 0} 578 {0, 0, 0}
570 579
571#define r128_PCI_IDS \ 580#define r128_PCI_IDS \
diff --git a/include/drm/exynos_drm.h b/include/drm/exynos_drm.h
index b6d7ce92eadd..68733587e700 100644
--- a/include/drm/exynos_drm.h
+++ b/include/drm/exynos_drm.h
@@ -64,6 +64,7 @@ struct drm_exynos_gem_map_off {
64 * A structure for mapping buffer. 64 * A structure for mapping buffer.
65 * 65 *
66 * @handle: a handle to gem object created. 66 * @handle: a handle to gem object created.
67 * @pad: just padding to be 64-bit aligned.
67 * @size: memory size to be mapped. 68 * @size: memory size to be mapped.
68 * @mapped: having user virtual address mmaped. 69 * @mapped: having user virtual address mmaped.
69 * - this variable would be filled by exynos gem module 70 * - this variable would be filled by exynos gem module
@@ -72,7 +73,8 @@ struct drm_exynos_gem_map_off {
72 */ 73 */
73struct drm_exynos_gem_mmap { 74struct drm_exynos_gem_mmap {
74 unsigned int handle; 75 unsigned int handle;
75 unsigned int size; 76 unsigned int pad;
77 uint64_t size;
76 uint64_t mapped; 78 uint64_t mapped;
77}; 79};
78 80
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 7185b8f15ced..8760be30b375 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -226,6 +226,7 @@ header-y += kdev_t.h
226header-y += kernel.h 226header-y += kernel.h
227header-y += kernelcapi.h 227header-y += kernelcapi.h
228header-y += kernel-page-flags.h 228header-y += kernel-page-flags.h
229header-y += kexec.h
229header-y += keyboard.h 230header-y += keyboard.h
230header-y += keyctl.h 231header-y += keyctl.h
231header-y += l2tp.h 232header-y += l2tp.h
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 81e803e90aa4..acba894374a1 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -132,6 +132,7 @@ extern u64 clockevent_delta2ns(unsigned long latch,
132 struct clock_event_device *evt); 132 struct clock_event_device *evt);
133extern void clockevents_register_device(struct clock_event_device *dev); 133extern void clockevents_register_device(struct clock_event_device *dev);
134 134
135extern void clockevents_config(struct clock_event_device *dev, u32 freq);
135extern void clockevents_config_and_register(struct clock_event_device *dev, 136extern void clockevents_config_and_register(struct clock_event_device *dev,
136 u32 freq, unsigned long min_delta, 137 u32 freq, unsigned long min_delta,
137 unsigned long max_delta); 138 unsigned long max_delta);
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index e988037abd2a..51a90b7f2d60 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -1,8 +1,6 @@
1#ifndef _LINUX_COMPACTION_H 1#ifndef _LINUX_COMPACTION_H
2#define _LINUX_COMPACTION_H 2#define _LINUX_COMPACTION_H
3 3
4#include <linux/node.h>
5
6/* Return values for compact_zone() and try_to_compact_pages() */ 4/* Return values for compact_zone() and try_to_compact_pages() */
7/* compaction didn't start as it was not possible or direct reclaim was more suitable */ 5/* compaction didn't start as it was not possible or direct reclaim was more suitable */
8#define COMPACT_SKIPPED 0 6#define COMPACT_SKIPPED 0
@@ -13,23 +11,6 @@
13/* The full zone was compacted */ 11/* The full zone was compacted */
14#define COMPACT_COMPLETE 3 12#define COMPACT_COMPLETE 3
15 13
16/*
17 * compaction supports three modes
18 *
19 * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans
20 * MIGRATE_MOVABLE pageblocks as migration sources and targets.
21 * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans
22 * MIGRATE_MOVABLE pageblocks as migration sources.
23 * MIGRATE_UNMOVABLE pageblocks are scanned as potential migration
24 * targets and convers them to MIGRATE_MOVABLE if possible
25 * COMPACT_SYNC uses synchronous migration and scans all pageblocks
26 */
27enum compact_mode {
28 COMPACT_ASYNC_MOVABLE,
29 COMPACT_ASYNC_UNMOVABLE,
30 COMPACT_SYNC,
31};
32
33#ifdef CONFIG_COMPACTION 14#ifdef CONFIG_COMPACTION
34extern int sysctl_compact_memory; 15extern int sysctl_compact_memory;
35extern int sysctl_compaction_handler(struct ctl_table *table, int write, 16extern int sysctl_compaction_handler(struct ctl_table *table, int write,
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 5d46217f84ad..4e890394ef99 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -577,8 +577,7 @@ extern ssize_t compat_rw_copy_check_uvector(int type,
577 const struct compat_iovec __user *uvector, 577 const struct compat_iovec __user *uvector,
578 unsigned long nr_segs, 578 unsigned long nr_segs,
579 unsigned long fast_segs, struct iovec *fast_pointer, 579 unsigned long fast_segs, struct iovec *fast_pointer,
580 struct iovec **ret_pointer, 580 struct iovec **ret_pointer);
581 int check_access);
582 581
583extern void __user *compat_alloc_user_space(unsigned long len); 582extern void __user *compat_alloc_user_space(unsigned long len);
584 583
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index e5834aa24b9e..6a6d7aefe12d 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -47,9 +47,9 @@
47 */ 47 */
48#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \ 48#if !defined(CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING) || \
49 !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4) 49 !defined(CONFIG_OPTIMIZE_INLINING) || (__GNUC__ < 4)
50# define inline inline __attribute__((always_inline)) 50# define inline inline __attribute__((always_inline)) notrace
51# define __inline__ __inline__ __attribute__((always_inline)) 51# define __inline__ __inline__ __attribute__((always_inline)) notrace
52# define __inline __inline __attribute__((always_inline)) 52# define __inline __inline __attribute__((always_inline)) notrace
53#else 53#else
54/* A lot of inline functions can cause havoc with function tracing */ 54/* A lot of inline functions can cause havoc with function tracing */
55# define inline inline notrace 55# define inline inline notrace
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 7230bb59a06f..2e9b9ebbeb78 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -177,6 +177,7 @@ extern void put_online_cpus(void);
177#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) 177#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri)
178#define register_hotcpu_notifier(nb) register_cpu_notifier(nb) 178#define register_hotcpu_notifier(nb) register_cpu_notifier(nb)
179#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) 179#define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb)
180void clear_tasks_mm_cpumask(int cpu);
180int cpu_down(unsigned int cpu); 181int cpu_down(unsigned int cpu);
181 182
182#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE 183#ifdef CONFIG_ARCH_CPU_PROBE_RELEASE
diff --git a/include/linux/cred.h b/include/linux/cred.h
index 917dc5aeb1d4..ebbed2ce6637 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -277,17 +277,13 @@ static inline void put_cred(const struct cred *_cred)
277 * @task: The task to query 277 * @task: The task to query
278 * 278 *
279 * Access the objective credentials of a task. The caller must hold the RCU 279 * Access the objective credentials of a task. The caller must hold the RCU
280 * readlock or the task must be dead and unable to change its own credentials. 280 * readlock.
281 * 281 *
282 * The result of this function should not be passed directly to get_cred(); 282 * The result of this function should not be passed directly to get_cred();
283 * rather get_task_cred() should be used instead. 283 * rather get_task_cred() should be used instead.
284 */ 284 */
285#define __task_cred(task) \ 285#define __task_cred(task) \
286 ({ \ 286 rcu_dereference((task)->real_cred)
287 const struct task_struct *__t = (task); \
288 rcu_dereference_check(__t->real_cred, \
289 task_is_dead(__t)); \
290 })
291 287
292/** 288/**
293 * get_current_cred - Get the current task's subjective credentials 289 * get_current_cred - Get the current task's subjective credentials
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index d3fec584e8c3..56377df39124 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -635,6 +635,18 @@ static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
635 dir, flags, NULL); 635 dir, flags, NULL);
636} 636}
637 637
638#ifdef CONFIG_RAPIDIO_DMA_ENGINE
639struct rio_dma_ext;
640static inline struct dma_async_tx_descriptor *dmaengine_prep_rio_sg(
641 struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len,
642 enum dma_transfer_direction dir, unsigned long flags,
643 struct rio_dma_ext *rio_ext)
644{
645 return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
646 dir, flags, rio_ext);
647}
648#endif
649
638static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic( 650static inline struct dma_async_tx_descriptor *dmaengine_prep_dma_cyclic(
639 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len, 651 struct dma_chan *chan, dma_addr_t buf_addr, size_t buf_len,
640 size_t period_len, enum dma_transfer_direction dir) 652 size_t period_len, enum dma_transfer_direction dir)
diff --git a/include/linux/errno.h b/include/linux/errno.h
index 2d09bfa5c262..e0de516374da 100644
--- a/include/linux/errno.h
+++ b/include/linux/errno.h
@@ -17,6 +17,7 @@
17#define ENOIOCTLCMD 515 /* No ioctl command */ 17#define ENOIOCTLCMD 515 /* No ioctl command */
18#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */ 18#define ERESTART_RESTARTBLOCK 516 /* restart by calling sys_restart_syscall */
19#define EPROBE_DEFER 517 /* Driver requests probe retry */ 19#define EPROBE_DEFER 517 /* Driver requests probe retry */
20#define EOPENSTALE 518 /* open found a stale dentry */
20 21
21/* Defined for the NFSv3 protocol */ 22/* Defined for the NFSv3 protocol */
22#define EBADHANDLE 521 /* Illegal NFS file handle */ 23#define EBADHANDLE 521 /* Illegal NFS file handle */
diff --git a/include/linux/eventfd.h b/include/linux/eventfd.h
index 91bb4f27238c..3c3ef19a625a 100644
--- a/include/linux/eventfd.h
+++ b/include/linux/eventfd.h
@@ -34,7 +34,7 @@ void eventfd_ctx_put(struct eventfd_ctx *ctx);
34struct file *eventfd_fget(int fd); 34struct file *eventfd_fget(int fd);
35struct eventfd_ctx *eventfd_ctx_fdget(int fd); 35struct eventfd_ctx *eventfd_ctx_fdget(int fd);
36struct eventfd_ctx *eventfd_ctx_fileget(struct file *file); 36struct eventfd_ctx *eventfd_ctx_fileget(struct file *file);
37int eventfd_signal(struct eventfd_ctx *ctx, int n); 37__u64 eventfd_signal(struct eventfd_ctx *ctx, __u64 n);
38ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt); 38ssize_t eventfd_ctx_read(struct eventfd_ctx *ctx, int no_wait, __u64 *cnt);
39int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait, 39int eventfd_ctx_remove_wait_queue(struct eventfd_ctx *ctx, wait_queue_t *wait,
40 __u64 *cnt); 40 __u64 *cnt);
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 3a4cef5322dc..12291a7ee275 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -165,8 +165,8 @@ struct fid {
165 */ 165 */
166 166
167struct export_operations { 167struct export_operations {
168 int (*encode_fh)(struct dentry *de, __u32 *fh, int *max_len, 168 int (*encode_fh)(struct inode *inode, __u32 *fh, int *max_len,
169 int connectable); 169 struct inode *parent);
170 struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid, 170 struct dentry * (*fh_to_dentry)(struct super_block *sb, struct fid *fid,
171 int fh_len, int fh_type); 171 int fh_len, int fh_type);
172 struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid, 172 struct dentry * (*fh_to_parent)(struct super_block *sb, struct fid *fid,
diff --git a/include/linux/fb.h b/include/linux/fb.h
index a3229d7ab9f2..ac3f1c605843 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -611,6 +611,7 @@ struct fb_deferred_io {
611 struct mutex lock; /* mutex that protects the page list */ 611 struct mutex lock; /* mutex that protects the page list */
612 struct list_head pagelist; /* list of touched pages */ 612 struct list_head pagelist; /* list of touched pages */
613 /* callback */ 613 /* callback */
614 void (*first_io)(struct fb_info *info);
614 void (*deferred_io)(struct fb_info *info, struct list_head *pagelist); 615 void (*deferred_io)(struct fb_info *info, struct list_head *pagelist);
615}; 616};
616#endif 617#endif
diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
new file mode 100644
index 000000000000..0e4e2eec5c1d
--- /dev/null
+++ b/include/linux/frontswap.h
@@ -0,0 +1,127 @@
1#ifndef _LINUX_FRONTSWAP_H
2#define _LINUX_FRONTSWAP_H
3
4#include <linux/swap.h>
5#include <linux/mm.h>
6#include <linux/bitops.h>
7
8struct frontswap_ops {
9 void (*init)(unsigned);
10 int (*store)(unsigned, pgoff_t, struct page *);
11 int (*load)(unsigned, pgoff_t, struct page *);
12 void (*invalidate_page)(unsigned, pgoff_t);
13 void (*invalidate_area)(unsigned);
14};
15
16extern bool frontswap_enabled;
17extern struct frontswap_ops
18 frontswap_register_ops(struct frontswap_ops *ops);
19extern void frontswap_shrink(unsigned long);
20extern unsigned long frontswap_curr_pages(void);
21extern void frontswap_writethrough(bool);
22
23extern void __frontswap_init(unsigned type);
24extern int __frontswap_store(struct page *page);
25extern int __frontswap_load(struct page *page);
26extern void __frontswap_invalidate_page(unsigned, pgoff_t);
27extern void __frontswap_invalidate_area(unsigned);
28
29#ifdef CONFIG_FRONTSWAP
30
31static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
32{
33 bool ret = false;
34
35 if (frontswap_enabled && sis->frontswap_map)
36 ret = test_bit(offset, sis->frontswap_map);
37 return ret;
38}
39
40static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset)
41{
42 if (frontswap_enabled && sis->frontswap_map)
43 set_bit(offset, sis->frontswap_map);
44}
45
46static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
47{
48 if (frontswap_enabled && sis->frontswap_map)
49 clear_bit(offset, sis->frontswap_map);
50}
51
52static inline void frontswap_map_set(struct swap_info_struct *p,
53 unsigned long *map)
54{
55 p->frontswap_map = map;
56}
57
58static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
59{
60 return p->frontswap_map;
61}
62#else
63/* all inline routines become no-ops and all externs are ignored */
64
65#define frontswap_enabled (0)
66
67static inline bool frontswap_test(struct swap_info_struct *sis, pgoff_t offset)
68{
69 return false;
70}
71
72static inline void frontswap_set(struct swap_info_struct *sis, pgoff_t offset)
73{
74}
75
76static inline void frontswap_clear(struct swap_info_struct *sis, pgoff_t offset)
77{
78}
79
80static inline void frontswap_map_set(struct swap_info_struct *p,
81 unsigned long *map)
82{
83}
84
85static inline unsigned long *frontswap_map_get(struct swap_info_struct *p)
86{
87 return NULL;
88}
89#endif
90
91static inline int frontswap_store(struct page *page)
92{
93 int ret = -1;
94
95 if (frontswap_enabled)
96 ret = __frontswap_store(page);
97 return ret;
98}
99
100static inline int frontswap_load(struct page *page)
101{
102 int ret = -1;
103
104 if (frontswap_enabled)
105 ret = __frontswap_load(page);
106 return ret;
107}
108
109static inline void frontswap_invalidate_page(unsigned type, pgoff_t offset)
110{
111 if (frontswap_enabled)
112 __frontswap_invalidate_page(type, offset);
113}
114
115static inline void frontswap_invalidate_area(unsigned type)
116{
117 if (frontswap_enabled)
118 __frontswap_invalidate_area(type);
119}
120
121static inline void frontswap_init(unsigned type)
122{
123 if (frontswap_enabled)
124 __frontswap_init(type);
125}
126
127#endif /* _LINUX_FRONTSWAP_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 038076b27ea4..17fd887c798f 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -173,6 +173,15 @@ struct inodes_stat_t {
173#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA) 173#define WRITE_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FUA)
174#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) 174#define WRITE_FLUSH_FUA (WRITE | REQ_SYNC | REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
175 175
176
177/*
178 * Flag for rw_copy_check_uvector and compat_rw_copy_check_uvector
179 * that indicates that they should check the contents of the iovec are
180 * valid, but not check the memory that the iovec elements
181 * points too.
182 */
183#define CHECK_IOVEC_ONLY -1
184
176#define SEL_IN 1 185#define SEL_IN 1
177#define SEL_OUT 2 186#define SEL_OUT 2
178#define SEL_EX 4 187#define SEL_EX 4
@@ -793,13 +802,14 @@ struct inode {
793 unsigned int __i_nlink; 802 unsigned int __i_nlink;
794 }; 803 };
795 dev_t i_rdev; 804 dev_t i_rdev;
805 loff_t i_size;
796 struct timespec i_atime; 806 struct timespec i_atime;
797 struct timespec i_mtime; 807 struct timespec i_mtime;
798 struct timespec i_ctime; 808 struct timespec i_ctime;
799 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */ 809 spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
800 unsigned short i_bytes; 810 unsigned short i_bytes;
811 unsigned int i_blkbits;
801 blkcnt_t i_blocks; 812 blkcnt_t i_blocks;
802 loff_t i_size;
803 813
804#ifdef __NEED_I_SIZE_ORDERED 814#ifdef __NEED_I_SIZE_ORDERED
805 seqcount_t i_size_seqcount; 815 seqcount_t i_size_seqcount;
@@ -819,9 +829,8 @@ struct inode {
819 struct list_head i_dentry; 829 struct list_head i_dentry;
820 struct rcu_head i_rcu; 830 struct rcu_head i_rcu;
821 }; 831 };
822 atomic_t i_count;
823 unsigned int i_blkbits;
824 u64 i_version; 832 u64 i_version;
833 atomic_t i_count;
825 atomic_t i_dio_count; 834 atomic_t i_dio_count;
826 atomic_t i_writecount; 835 atomic_t i_writecount;
827 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */ 836 const struct file_operations *i_fop; /* former ->i_op->default_file_ops */
@@ -1683,6 +1692,7 @@ struct inode_operations {
1683 int (*removexattr) (struct dentry *, const char *); 1692 int (*removexattr) (struct dentry *, const char *);
1684 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, 1693 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
1685 u64 len); 1694 u64 len);
1695 int (*update_time)(struct inode *, struct timespec *, int);
1686} ____cacheline_aligned; 1696} ____cacheline_aligned;
1687 1697
1688struct seq_file; 1698struct seq_file;
@@ -1690,8 +1700,7 @@ struct seq_file;
1690ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector, 1700ssize_t rw_copy_check_uvector(int type, const struct iovec __user * uvector,
1691 unsigned long nr_segs, unsigned long fast_segs, 1701 unsigned long nr_segs, unsigned long fast_segs,
1692 struct iovec *fast_pointer, 1702 struct iovec *fast_pointer,
1693 struct iovec **ret_pointer, 1703 struct iovec **ret_pointer);
1694 int check_access);
1695 1704
1696extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *); 1705extern ssize_t vfs_read(struct file *, char __user *, size_t, loff_t *);
1697extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *); 1706extern ssize_t vfs_write(struct file *, const char __user *, size_t, loff_t *);
@@ -1842,6 +1851,13 @@ static inline void inode_inc_iversion(struct inode *inode)
1842 spin_unlock(&inode->i_lock); 1851 spin_unlock(&inode->i_lock);
1843} 1852}
1844 1853
1854enum file_time_flags {
1855 S_ATIME = 1,
1856 S_MTIME = 2,
1857 S_CTIME = 4,
1858 S_VERSION = 8,
1859};
1860
1845extern void touch_atime(struct path *); 1861extern void touch_atime(struct path *);
1846static inline void file_accessed(struct file *file) 1862static inline void file_accessed(struct file *file)
1847{ 1863{
@@ -2453,8 +2469,6 @@ enum {
2453}; 2469};
2454 2470
2455void dio_end_io(struct bio *bio, int error); 2471void dio_end_io(struct bio *bio, int error);
2456void inode_dio_wait(struct inode *inode);
2457void inode_dio_done(struct inode *inode);
2458 2472
2459ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode, 2473ssize_t __blockdev_direct_IO(int rw, struct kiocb *iocb, struct inode *inode,
2460 struct block_device *bdev, const struct iovec *iov, loff_t offset, 2474 struct block_device *bdev, const struct iovec *iov, loff_t offset,
@@ -2469,12 +2483,11 @@ static inline ssize_t blockdev_direct_IO(int rw, struct kiocb *iocb,
2469 offset, nr_segs, get_block, NULL, NULL, 2483 offset, nr_segs, get_block, NULL, NULL,
2470 DIO_LOCKING | DIO_SKIP_HOLES); 2484 DIO_LOCKING | DIO_SKIP_HOLES);
2471} 2485}
2472#else
2473static inline void inode_dio_wait(struct inode *inode)
2474{
2475}
2476#endif 2486#endif
2477 2487
2488void inode_dio_wait(struct inode *inode);
2489void inode_dio_done(struct inode *inode);
2490
2478extern const struct file_operations generic_ro_fops; 2491extern const struct file_operations generic_ro_fops;
2479 2492
2480#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m)) 2493#define special_file(m) (S_ISCHR(m)||S_ISBLK(m)||S_ISFIFO(m)||S_ISSOCK(m))
@@ -2578,7 +2591,7 @@ extern int inode_change_ok(const struct inode *, struct iattr *);
2578extern int inode_newsize_ok(const struct inode *, loff_t offset); 2591extern int inode_newsize_ok(const struct inode *, loff_t offset);
2579extern void setattr_copy(struct inode *inode, const struct iattr *attr); 2592extern void setattr_copy(struct inode *inode, const struct iattr *attr);
2580 2593
2581extern void file_update_time(struct file *file); 2594extern int file_update_time(struct file *file);
2582 2595
2583extern int generic_show_options(struct seq_file *m, struct dentry *root); 2596extern int generic_show_options(struct seq_file *m, struct dentry *root);
2584extern void save_mount_options(struct super_block *sb, char *options); 2597extern void save_mount_options(struct super_block *sb, char *options);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 91d0e0a34ef3..63d966d5c2ea 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -60,7 +60,7 @@
60#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\ 60#define FS_EVENTS_POSS_ON_CHILD (FS_ACCESS | FS_MODIFY | FS_ATTRIB |\
61 FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\ 61 FS_CLOSE_WRITE | FS_CLOSE_NOWRITE | FS_OPEN |\
62 FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\ 62 FS_MOVED_FROM | FS_MOVED_TO | FS_CREATE |\
63 FS_DELETE) 63 FS_DELETE | FS_OPEN_PERM | FS_ACCESS_PERM)
64 64
65#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO) 65#define FS_MOVE (FS_MOVED_FROM | FS_MOVED_TO)
66 66
diff --git a/include/linux/fuse.h b/include/linux/fuse.h
index 8f2ab8fef929..9303348965fb 100644
--- a/include/linux/fuse.h
+++ b/include/linux/fuse.h
@@ -54,6 +54,9 @@
54 * 7.18 54 * 7.18
55 * - add FUSE_IOCTL_DIR flag 55 * - add FUSE_IOCTL_DIR flag
56 * - add FUSE_NOTIFY_DELETE 56 * - add FUSE_NOTIFY_DELETE
57 *
58 * 7.19
59 * - add FUSE_FALLOCATE
57 */ 60 */
58 61
59#ifndef _LINUX_FUSE_H 62#ifndef _LINUX_FUSE_H
@@ -85,7 +88,7 @@
85#define FUSE_KERNEL_VERSION 7 88#define FUSE_KERNEL_VERSION 7
86 89
87/** Minor version number of this interface */ 90/** Minor version number of this interface */
88#define FUSE_KERNEL_MINOR_VERSION 18 91#define FUSE_KERNEL_MINOR_VERSION 19
89 92
90/** The node ID of the root inode */ 93/** The node ID of the root inode */
91#define FUSE_ROOT_ID 1 94#define FUSE_ROOT_ID 1
@@ -278,6 +281,7 @@ enum fuse_opcode {
278 FUSE_POLL = 40, 281 FUSE_POLL = 40,
279 FUSE_NOTIFY_REPLY = 41, 282 FUSE_NOTIFY_REPLY = 41,
280 FUSE_BATCH_FORGET = 42, 283 FUSE_BATCH_FORGET = 42,
284 FUSE_FALLOCATE = 43,
281 285
282 /* CUSE specific operations */ 286 /* CUSE specific operations */
283 CUSE_INIT = 4096, 287 CUSE_INIT = 4096,
@@ -571,6 +575,14 @@ struct fuse_notify_poll_wakeup_out {
571 __u64 kh; 575 __u64 kh;
572}; 576};
573 577
578struct fuse_fallocate_in {
579 __u64 fh;
580 __u64 offset;
581 __u64 length;
582 __u32 mode;
583 __u32 padding;
584};
585
574struct fuse_in_header { 586struct fuse_in_header {
575 __u32 len; 587 __u32 len;
576 __u32 opcode; 588 __u32 opcode;
diff --git a/include/linux/genetlink.h b/include/linux/genetlink.h
index 73c28dea10ae..7a114016ac7d 100644
--- a/include/linux/genetlink.h
+++ b/include/linux/genetlink.h
@@ -110,6 +110,9 @@ extern int lockdep_genl_is_held(void);
110#define genl_dereference(p) \ 110#define genl_dereference(p) \
111 rcu_dereference_protected(p, lockdep_genl_is_held()) 111 rcu_dereference_protected(p, lockdep_genl_is_held())
112 112
113#define MODULE_ALIAS_GENL_FAMILY(family)\
114 MODULE_ALIAS_NET_PF_PROTO_NAME(PF_NETLINK, NETLINK_GENERIC, "-family-" family)
115
113#endif /* __KERNEL__ */ 116#endif /* __KERNEL__ */
114 117
115#endif /* __LINUX_GENERIC_NETLINK_H */ 118#endif /* __LINUX_GENERIC_NETLINK_H */
diff --git a/include/linux/i2c-mux-pinctrl.h b/include/linux/i2c-mux-pinctrl.h
new file mode 100644
index 000000000000..a65c86429e84
--- /dev/null
+++ b/include/linux/i2c-mux-pinctrl.h
@@ -0,0 +1,41 @@
1/*
2 * i2c-mux-pinctrl platform data
3 *
4 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program. If not, see <http://www.gnu.org/licenses/>.
17 */
18
19#ifndef _LINUX_I2C_MUX_PINCTRL_H
20#define _LINUX_I2C_MUX_PINCTRL_H
21
22/**
23 * struct i2c_mux_pinctrl_platform_data - Platform data for i2c-mux-pinctrl
24 * @parent_bus_num: Parent I2C bus number
25 * @base_bus_num: Base I2C bus number for the child busses. 0 for dynamic.
26 * @bus_count: Number of child busses. Also the number of elements in
27 * @pinctrl_states
28 * @pinctrl_states: The names of the pinctrl state to select for each child bus
29 * @pinctrl_state_idle: The pinctrl state to select when no child bus is being
30 * accessed. If NULL, the most recently used pinctrl state will be left
31 * selected.
32 */
33struct i2c_mux_pinctrl_platform_data {
34 int parent_bus_num;
35 int base_bus_num;
36 int bus_count;
37 const char **pinctrl_states;
38 const char *pinctrl_state_idle;
39};
40
41#endif
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index e4baff5f7ff4..9e65eff6af3b 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -149,6 +149,7 @@ extern struct cred init_cred;
149 .normal_prio = MAX_PRIO-20, \ 149 .normal_prio = MAX_PRIO-20, \
150 .policy = SCHED_NORMAL, \ 150 .policy = SCHED_NORMAL, \
151 .cpus_allowed = CPU_MASK_ALL, \ 151 .cpus_allowed = CPU_MASK_ALL, \
152 .nr_cpus_allowed= NR_CPUS, \
152 .mm = NULL, \ 153 .mm = NULL, \
153 .active_mm = &init_mm, \ 154 .active_mm = &init_mm, \
154 .se = { \ 155 .se = { \
@@ -157,7 +158,6 @@ extern struct cred init_cred;
157 .rt = { \ 158 .rt = { \
158 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \ 159 .run_list = LIST_HEAD_INIT(tsk.rt.run_list), \
159 .time_slice = RR_TIMESLICE, \ 160 .time_slice = RR_TIMESLICE, \
160 .nr_cpus_allowed = NR_CPUS, \
161 }, \ 161 }, \
162 .tasks = LIST_HEAD_INIT(tsk.tasks), \ 162 .tasks = LIST_HEAD_INIT(tsk.tasks), \
163 INIT_PUSHABLE_TASKS(tsk) \ 163 INIT_PUSHABLE_TASKS(tsk) \
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index c91171599cb6..e68a8e53bb59 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -142,8 +142,6 @@ request_any_context_irq(unsigned int irq, irq_handler_t handler,
142extern int __must_check 142extern int __must_check
143request_percpu_irq(unsigned int irq, irq_handler_t handler, 143request_percpu_irq(unsigned int irq, irq_handler_t handler,
144 const char *devname, void __percpu *percpu_dev_id); 144 const char *devname, void __percpu *percpu_dev_id);
145
146extern void exit_irq_thread(void);
147#else 145#else
148 146
149extern int __must_check 147extern int __must_check
@@ -177,8 +175,6 @@ request_percpu_irq(unsigned int irq, irq_handler_t handler,
177{ 175{
178 return request_irq(irq, handler, 0, devname, percpu_dev_id); 176 return request_irq(irq, handler, 0, devname, percpu_dev_id);
179} 177}
180
181static inline void exit_irq_thread(void) { }
182#endif 178#endif
183 179
184extern void free_irq(unsigned int, void *); 180extern void free_irq(unsigned int, void *);
diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h
index 8a297a5e794c..5499c92a9153 100644
--- a/include/linux/ipc_namespace.h
+++ b/include/linux/ipc_namespace.h
@@ -62,6 +62,8 @@ struct ipc_namespace {
62 unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */ 62 unsigned int mq_queues_max; /* initialized to DFLT_QUEUESMAX */
63 unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */ 63 unsigned int mq_msg_max; /* initialized to DFLT_MSGMAX */
64 unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */ 64 unsigned int mq_msgsize_max; /* initialized to DFLT_MSGSIZEMAX */
65 unsigned int mq_msg_default;
66 unsigned int mq_msgsize_default;
65 67
66 /* user_ns which owns the ipc ns */ 68 /* user_ns which owns the ipc ns */
67 struct user_namespace *user_ns; 69 struct user_namespace *user_ns;
@@ -90,11 +92,41 @@ static inline void shm_destroy_orphaned(struct ipc_namespace *ns) {}
90 92
91#ifdef CONFIG_POSIX_MQUEUE 93#ifdef CONFIG_POSIX_MQUEUE
92extern int mq_init_ns(struct ipc_namespace *ns); 94extern int mq_init_ns(struct ipc_namespace *ns);
93/* default values */ 95/*
94#define DFLT_QUEUESMAX 256 /* max number of message queues */ 96 * POSIX Message Queue default values:
95#define DFLT_MSGMAX 10 /* max number of messages in each queue */ 97 *
96#define HARD_MSGMAX (32768*sizeof(void *)/4) 98 * MIN_*: Lowest value an admin can set the maximum unprivileged limit to
97#define DFLT_MSGSIZEMAX 8192 /* max message size */ 99 * DFLT_*MAX: Default values for the maximum unprivileged limits
100 * DFLT_{MSG,MSGSIZE}: Default values used when the user doesn't supply
101 * an attribute to the open call and the queue must be created
102 * HARD_*: Highest value the maximums can be set to. These are enforced
103 * on CAP_SYS_RESOURCE apps as well making them inviolate (so make them
104 * suitably high)
105 *
106 * POSIX Requirements:
107 * Per app minimum openable message queues - 8. This does not map well
108 * to the fact that we limit the number of queues on a per namespace
109 * basis instead of a per app basis. So, make the default high enough
110 * that no given app should have a hard time opening 8 queues.
111 * Minimum maximum for HARD_MSGMAX - 32767. I bumped this to 65536.
112 * Minimum maximum for HARD_MSGSIZEMAX - POSIX is silent on this. However,
113 * we have run into a situation where running applications in the wild
114 * require this to be at least 5MB, and preferably 10MB, so I set the
115 * value to 16MB in hopes that this user is the worst of the bunch and
116 * the new maximum will handle anyone else. I may have to revisit this
117 * in the future.
118 */
119#define MIN_QUEUESMAX 1
120#define DFLT_QUEUESMAX 256
121#define HARD_QUEUESMAX 1024
122#define MIN_MSGMAX 1
123#define DFLT_MSG 10U
124#define DFLT_MSGMAX 10
125#define HARD_MSGMAX 65536
126#define MIN_MSGSIZEMAX 128
127#define DFLT_MSGSIZE 8192U
128#define DFLT_MSGSIZEMAX 8192
129#define HARD_MSGSIZEMAX (16*1024*1024)
98#else 130#else
99static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; } 131static inline int mq_init_ns(struct ipc_namespace *ns) { return 0; }
100#endif 132#endif
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 912c30a8ddb1..f334c7fab967 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -31,6 +31,7 @@
31#include <linux/mutex.h> 31#include <linux/mutex.h>
32#include <linux/timer.h> 32#include <linux/timer.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <crypto/hash.h>
34#endif 35#endif
35 36
36#define journal_oom_retry 1 37#define journal_oom_retry 1
@@ -147,12 +148,24 @@ typedef struct journal_header_s
147#define JBD2_CRC32_CHKSUM 1 148#define JBD2_CRC32_CHKSUM 1
148#define JBD2_MD5_CHKSUM 2 149#define JBD2_MD5_CHKSUM 2
149#define JBD2_SHA1_CHKSUM 3 150#define JBD2_SHA1_CHKSUM 3
151#define JBD2_CRC32C_CHKSUM 4
150 152
151#define JBD2_CRC32_CHKSUM_SIZE 4 153#define JBD2_CRC32_CHKSUM_SIZE 4
152 154
153#define JBD2_CHECKSUM_BYTES (32 / sizeof(u32)) 155#define JBD2_CHECKSUM_BYTES (32 / sizeof(u32))
154/* 156/*
155 * Commit block header for storing transactional checksums: 157 * Commit block header for storing transactional checksums:
158 *
159 * NOTE: If FEATURE_COMPAT_CHECKSUM (checksum v1) is set, the h_chksum*
160 * fields are used to store a checksum of the descriptor and data blocks.
161 *
162 * If FEATURE_INCOMPAT_CSUM_V2 (checksum v2) is set, then the h_chksum
163 * field is used to store crc32c(uuid+commit_block). Each journal metadata
164 * block gets its own checksum, and data block checksums are stored in
165 * journal_block_tag (in the descriptor). The other h_chksum* fields are
166 * not used.
167 *
168 * Checksum v1 and v2 are mutually exclusive features.
156 */ 169 */
157struct commit_header { 170struct commit_header {
158 __be32 h_magic; 171 __be32 h_magic;
@@ -175,13 +188,19 @@ struct commit_header {
175typedef struct journal_block_tag_s 188typedef struct journal_block_tag_s
176{ 189{
177 __be32 t_blocknr; /* The on-disk block number */ 190 __be32 t_blocknr; /* The on-disk block number */
178 __be32 t_flags; /* See below */ 191 __be16 t_checksum; /* truncated crc32c(uuid+seq+block) */
192 __be16 t_flags; /* See below */
179 __be32 t_blocknr_high; /* most-significant high 32bits. */ 193 __be32 t_blocknr_high; /* most-significant high 32bits. */
180} journal_block_tag_t; 194} journal_block_tag_t;
181 195
182#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high)) 196#define JBD2_TAG_SIZE32 (offsetof(journal_block_tag_t, t_blocknr_high))
183#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t)) 197#define JBD2_TAG_SIZE64 (sizeof(journal_block_tag_t))
184 198
199/* Tail of descriptor block, for checksumming */
200struct jbd2_journal_block_tail {
201 __be32 t_checksum; /* crc32c(uuid+descr_block) */
202};
203
185/* 204/*
186 * The revoke descriptor: used on disk to describe a series of blocks to 205 * The revoke descriptor: used on disk to describe a series of blocks to
187 * be revoked from the log 206 * be revoked from the log
@@ -192,6 +211,10 @@ typedef struct jbd2_journal_revoke_header_s
192 __be32 r_count; /* Count of bytes used in the block */ 211 __be32 r_count; /* Count of bytes used in the block */
193} jbd2_journal_revoke_header_t; 212} jbd2_journal_revoke_header_t;
194 213
214/* Tail of revoke block, for checksumming */
215struct jbd2_journal_revoke_tail {
216 __be32 r_checksum; /* crc32c(uuid+revoke_block) */
217};
195 218
196/* Definitions for the journal tag flags word: */ 219/* Definitions for the journal tag flags word: */
197#define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */ 220#define JBD2_FLAG_ESCAPE 1 /* on-disk block is escaped */
@@ -241,7 +264,10 @@ typedef struct journal_superblock_s
241 __be32 s_max_trans_data; /* Limit of data blocks per trans. */ 264 __be32 s_max_trans_data; /* Limit of data blocks per trans. */
242 265
243/* 0x0050 */ 266/* 0x0050 */
244 __u32 s_padding[44]; 267 __u8 s_checksum_type; /* checksum type */
268 __u8 s_padding2[3];
269 __u32 s_padding[42];
270 __be32 s_checksum; /* crc32c(superblock) */
245 271
246/* 0x0100 */ 272/* 0x0100 */
247 __u8 s_users[16*48]; /* ids of all fs'es sharing the log */ 273 __u8 s_users[16*48]; /* ids of all fs'es sharing the log */
@@ -263,13 +289,15 @@ typedef struct journal_superblock_s
263#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001 289#define JBD2_FEATURE_INCOMPAT_REVOKE 0x00000001
264#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002 290#define JBD2_FEATURE_INCOMPAT_64BIT 0x00000002
265#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004 291#define JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT 0x00000004
292#define JBD2_FEATURE_INCOMPAT_CSUM_V2 0x00000008
266 293
267/* Features known to this kernel version: */ 294/* Features known to this kernel version: */
268#define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM 295#define JBD2_KNOWN_COMPAT_FEATURES JBD2_FEATURE_COMPAT_CHECKSUM
269#define JBD2_KNOWN_ROCOMPAT_FEATURES 0 296#define JBD2_KNOWN_ROCOMPAT_FEATURES 0
270#define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \ 297#define JBD2_KNOWN_INCOMPAT_FEATURES (JBD2_FEATURE_INCOMPAT_REVOKE | \
271 JBD2_FEATURE_INCOMPAT_64BIT | \ 298 JBD2_FEATURE_INCOMPAT_64BIT | \
272 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT) 299 JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT | \
300 JBD2_FEATURE_INCOMPAT_CSUM_V2)
273 301
274#ifdef __KERNEL__ 302#ifdef __KERNEL__
275 303
@@ -939,6 +967,12 @@ struct journal_s
939 * superblock pointer here 967 * superblock pointer here
940 */ 968 */
941 void *j_private; 969 void *j_private;
970
971 /* Reference to checksum algorithm driver via cryptoapi */
972 struct crypto_shash *j_chksum_driver;
973
974 /* Precomputed journal UUID checksum for seeding other checksums */
975 __u32 j_csum_seed;
942}; 976};
943 977
944/* 978/*
@@ -1268,6 +1302,25 @@ static inline int jbd_space_needed(journal_t *journal)
1268 1302
1269extern int jbd_blocks_per_page(struct inode *inode); 1303extern int jbd_blocks_per_page(struct inode *inode);
1270 1304
1305static inline u32 jbd2_chksum(journal_t *journal, u32 crc,
1306 const void *address, unsigned int length)
1307{
1308 struct {
1309 struct shash_desc shash;
1310 char ctx[crypto_shash_descsize(journal->j_chksum_driver)];
1311 } desc;
1312 int err;
1313
1314 desc.shash.tfm = journal->j_chksum_driver;
1315 desc.shash.flags = 0;
1316 *(u32 *)desc.ctx = crc;
1317
1318 err = crypto_shash_update(&desc.shash, address, length);
1319 BUG_ON(err);
1320
1321 return *(u32 *)desc.ctx;
1322}
1323
1271#ifdef __KERNEL__ 1324#ifdef __KERNEL__
1272 1325
1273#define buffer_trace_init(bh) do {} while (0) 1326#define buffer_trace_init(bh) do {} while (0)
diff --git a/include/linux/jbd_common.h b/include/linux/jbd_common.h
index 6230f8556a4e..6133679bc4c0 100644
--- a/include/linux/jbd_common.h
+++ b/include/linux/jbd_common.h
@@ -12,6 +12,7 @@ enum jbd_state_bits {
12 BH_State, /* Pins most journal_head state */ 12 BH_State, /* Pins most journal_head state */
13 BH_JournalHead, /* Pins bh->b_private and jh->b_bh */ 13 BH_JournalHead, /* Pins bh->b_private and jh->b_bh */
14 BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */ 14 BH_Unshadow, /* Dummy bit, for BJ_Shadow wakeup filtering */
15 BH_Verified, /* Metadata block has been verified ok */
15 BH_JBDPrivateStart, /* First bit available for private use by FS */ 16 BH_JBDPrivateStart, /* First bit available for private use by FS */
16}; 17};
17 18
@@ -24,6 +25,7 @@ TAS_BUFFER_FNS(Revoked, revoked)
24BUFFER_FNS(RevokeValid, revokevalid) 25BUFFER_FNS(RevokeValid, revokevalid)
25TAS_BUFFER_FNS(RevokeValid, revokevalid) 26TAS_BUFFER_FNS(RevokeValid, revokevalid)
26BUFFER_FNS(Freed, freed) 27BUFFER_FNS(Freed, freed)
28BUFFER_FNS(Verified, verified)
27 29
28static inline struct buffer_head *jh2bh(struct journal_head *jh) 30static inline struct buffer_head *jh2bh(struct journal_head *jh)
29{ 31{
diff --git a/include/linux/kcmp.h b/include/linux/kcmp.h
new file mode 100644
index 000000000000..2dcd1b3aafc8
--- /dev/null
+++ b/include/linux/kcmp.h
@@ -0,0 +1,17 @@
1#ifndef _LINUX_KCMP_H
2#define _LINUX_KCMP_H
3
4/* Comparison type */
5enum kcmp_type {
6 KCMP_FILE,
7 KCMP_VM,
8 KCMP_FILES,
9 KCMP_FS,
10 KCMP_SIGHAND,
11 KCMP_IO,
12 KCMP_SYSVSEM,
13
14 KCMP_TYPES,
15};
16
17#endif /* _LINUX_KCMP_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index ec55a3c8ba77..e07f5e0c5df4 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -35,6 +35,7 @@
35#define LLONG_MAX ((long long)(~0ULL>>1)) 35#define LLONG_MAX ((long long)(~0ULL>>1))
36#define LLONG_MIN (-LLONG_MAX - 1) 36#define LLONG_MIN (-LLONG_MAX - 1)
37#define ULLONG_MAX (~0ULL) 37#define ULLONG_MAX (~0ULL)
38#define SIZE_MAX (~(size_t)0)
38 39
39#define STACK_MAGIC 0xdeadbeef 40#define STACK_MAGIC 0xdeadbeef
40 41
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 0d7d6a1b172f..37c5f7261142 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -1,8 +1,58 @@
1#ifndef LINUX_KEXEC_H 1#ifndef LINUX_KEXEC_H
2#define LINUX_KEXEC_H 2#define LINUX_KEXEC_H
3 3
4#ifdef CONFIG_KEXEC 4/* kexec system call - It loads the new kernel to boot into.
5 * kexec does not sync, or unmount filesystems so if you need
6 * that to happen you need to do that yourself.
7 */
8
5#include <linux/types.h> 9#include <linux/types.h>
10
11/* kexec flags for different usage scenarios */
12#define KEXEC_ON_CRASH 0x00000001
13#define KEXEC_PRESERVE_CONTEXT 0x00000002
14#define KEXEC_ARCH_MASK 0xffff0000
15
16/* These values match the ELF architecture values.
17 * Unless there is a good reason that should continue to be the case.
18 */
19#define KEXEC_ARCH_DEFAULT ( 0 << 16)
20#define KEXEC_ARCH_386 ( 3 << 16)
21#define KEXEC_ARCH_X86_64 (62 << 16)
22#define KEXEC_ARCH_PPC (20 << 16)
23#define KEXEC_ARCH_PPC64 (21 << 16)
24#define KEXEC_ARCH_IA_64 (50 << 16)
25#define KEXEC_ARCH_ARM (40 << 16)
26#define KEXEC_ARCH_S390 (22 << 16)
27#define KEXEC_ARCH_SH (42 << 16)
28#define KEXEC_ARCH_MIPS_LE (10 << 16)
29#define KEXEC_ARCH_MIPS ( 8 << 16)
30
31/* The artificial cap on the number of segments passed to kexec_load. */
32#define KEXEC_SEGMENT_MAX 16
33
34#ifndef __KERNEL__
35/*
36 * This structure is used to hold the arguments that are used when
37 * loading kernel binaries.
38 */
39struct kexec_segment {
40 const void *buf;
41 size_t bufsz;
42 const void *mem;
43 size_t memsz;
44};
45
46/* Load a new kernel image as described by the kexec_segment array
47 * consisting of passed number of segments at the entry-point address.
48 * The flags allow different useage types.
49 */
50extern int kexec_load(void *, size_t, struct kexec_segment *,
51 unsigned long int);
52#endif /* __KERNEL__ */
53
54#ifdef __KERNEL__
55#ifdef CONFIG_KEXEC
6#include <linux/list.h> 56#include <linux/list.h>
7#include <linux/linkage.h> 57#include <linux/linkage.h>
8#include <linux/compat.h> 58#include <linux/compat.h>
@@ -67,11 +117,10 @@ typedef unsigned long kimage_entry_t;
67#define IND_DONE 0x4 117#define IND_DONE 0x4
68#define IND_SOURCE 0x8 118#define IND_SOURCE 0x8
69 119
70#define KEXEC_SEGMENT_MAX 16
71struct kexec_segment { 120struct kexec_segment {
72 void __user *buf; 121 void __user *buf;
73 size_t bufsz; 122 size_t bufsz;
74 unsigned long mem; /* User space sees this as a (void *) ... */ 123 unsigned long mem;
75 size_t memsz; 124 size_t memsz;
76}; 125};
77 126
@@ -175,25 +224,6 @@ extern struct kimage *kexec_crash_image;
175#define kexec_flush_icache_page(page) 224#define kexec_flush_icache_page(page)
176#endif 225#endif
177 226
178#define KEXEC_ON_CRASH 0x00000001
179#define KEXEC_PRESERVE_CONTEXT 0x00000002
180#define KEXEC_ARCH_MASK 0xffff0000
181
182/* These values match the ELF architecture values.
183 * Unless there is a good reason that should continue to be the case.
184 */
185#define KEXEC_ARCH_DEFAULT ( 0 << 16)
186#define KEXEC_ARCH_386 ( 3 << 16)
187#define KEXEC_ARCH_X86_64 (62 << 16)
188#define KEXEC_ARCH_PPC (20 << 16)
189#define KEXEC_ARCH_PPC64 (21 << 16)
190#define KEXEC_ARCH_IA_64 (50 << 16)
191#define KEXEC_ARCH_ARM (40 << 16)
192#define KEXEC_ARCH_S390 (22 << 16)
193#define KEXEC_ARCH_SH (42 << 16)
194#define KEXEC_ARCH_MIPS_LE (10 << 16)
195#define KEXEC_ARCH_MIPS ( 8 << 16)
196
197/* List of defined/legal kexec flags */ 227/* List of defined/legal kexec flags */
198#ifndef CONFIG_KEXEC_JUMP 228#ifndef CONFIG_KEXEC_JUMP
199#define KEXEC_FLAGS KEXEC_ON_CRASH 229#define KEXEC_FLAGS KEXEC_ON_CRASH
@@ -228,4 +258,5 @@ struct task_struct;
228static inline void crash_kexec(struct pt_regs *regs) { } 258static inline void crash_kexec(struct pt_regs *regs) { }
229static inline int kexec_should_crash(struct task_struct *p) { return 0; } 259static inline int kexec_should_crash(struct task_struct *p) { return 0; }
230#endif /* CONFIG_KEXEC */ 260#endif /* CONFIG_KEXEC */
261#endif /* __KERNEL__ */
231#endif /* LINUX_KEXEC_H */ 262#endif /* LINUX_KEXEC_H */
diff --git a/include/linux/key.h b/include/linux/key.h
index 5231800770e1..4cd22ed627ef 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -308,9 +308,6 @@ static inline bool key_is_instantiated(const struct key *key)
308#ifdef CONFIG_SYSCTL 308#ifdef CONFIG_SYSCTL
309extern ctl_table key_sysctls[]; 309extern ctl_table key_sysctls[];
310#endif 310#endif
311
312extern void key_replace_session_keyring(void);
313
314/* 311/*
315 * the userspace interface 312 * the userspace interface
316 */ 313 */
@@ -334,7 +331,6 @@ extern void key_init(void);
334#define key_fsuid_changed(t) do { } while(0) 331#define key_fsuid_changed(t) do { } while(0)
335#define key_fsgid_changed(t) do { } while(0) 332#define key_fsgid_changed(t) do { } while(0)
336#define key_init() do { } while(0) 333#define key_init() do { } while(0)
337#define key_replace_session_keyring() do { } while(0)
338 334
339#endif /* CONFIG_KEYS */ 335#endif /* CONFIG_KEYS */
340#endif /* __KERNEL__ */ 336#endif /* __KERNEL__ */
diff --git a/include/linux/kmod.h b/include/linux/kmod.h
index dd99c329e161..5398d5807075 100644
--- a/include/linux/kmod.h
+++ b/include/linux/kmod.h
@@ -66,40 +66,10 @@ struct subprocess_info {
66 void *data; 66 void *data;
67}; 67};
68 68
69/* Allocate a subprocess_info structure */ 69extern int
70struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
71 char **envp, gfp_t gfp_mask);
72
73/* Set various pieces of state into the subprocess_info structure */
74void call_usermodehelper_setfns(struct subprocess_info *info,
75 int (*init)(struct subprocess_info *info, struct cred *new),
76 void (*cleanup)(struct subprocess_info *info),
77 void *data);
78
79/* Actually execute the sub-process */
80int call_usermodehelper_exec(struct subprocess_info *info, int wait);
81
82/* Free the subprocess_info. This is only needed if you're not going
83 to call call_usermodehelper_exec */
84void call_usermodehelper_freeinfo(struct subprocess_info *info);
85
86static inline int
87call_usermodehelper_fns(char *path, char **argv, char **envp, int wait, 70call_usermodehelper_fns(char *path, char **argv, char **envp, int wait,
88 int (*init)(struct subprocess_info *info, struct cred *new), 71 int (*init)(struct subprocess_info *info, struct cred *new),
89 void (*cleanup)(struct subprocess_info *), void *data) 72 void (*cleanup)(struct subprocess_info *), void *data);
90{
91 struct subprocess_info *info;
92 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
93
94 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
95
96 if (info == NULL)
97 return -ENOMEM;
98
99 call_usermodehelper_setfns(info, init, cleanup, data);
100
101 return call_usermodehelper_exec(info, wait);
102}
103 73
104static inline int 74static inline int
105call_usermodehelper(char *path, char **argv, char **envp, int wait) 75call_usermodehelper(char *path, char **argv, char **envp, int wait)
diff --git a/include/linux/kmsg_dump.h b/include/linux/kmsg_dump.h
index 35f7237ec972..d6bd50110ec2 100644
--- a/include/linux/kmsg_dump.h
+++ b/include/linux/kmsg_dump.h
@@ -21,6 +21,7 @@
21 * is passed to the kernel. 21 * is passed to the kernel.
22 */ 22 */
23enum kmsg_dump_reason { 23enum kmsg_dump_reason {
24 KMSG_DUMP_UNDEF,
24 KMSG_DUMP_PANIC, 25 KMSG_DUMP_PANIC,
25 KMSG_DUMP_OOPS, 26 KMSG_DUMP_OOPS,
26 KMSG_DUMP_EMERG, 27 KMSG_DUMP_EMERG,
@@ -31,23 +32,37 @@ enum kmsg_dump_reason {
31 32
32/** 33/**
33 * struct kmsg_dumper - kernel crash message dumper structure 34 * struct kmsg_dumper - kernel crash message dumper structure
34 * @dump: The callback which gets called on crashes. The buffer is passed
35 * as two sections, where s1 (length l1) contains the older
36 * messages and s2 (length l2) contains the newer.
37 * @list: Entry in the dumper list (private) 35 * @list: Entry in the dumper list (private)
36 * @dump: Call into dumping code which will retrieve the data with
37 * through the record iterator
38 * @max_reason: filter for highest reason number that should be dumped
38 * @registered: Flag that specifies if this is already registered 39 * @registered: Flag that specifies if this is already registered
39 */ 40 */
40struct kmsg_dumper { 41struct kmsg_dumper {
41 void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason,
42 const char *s1, unsigned long l1,
43 const char *s2, unsigned long l2);
44 struct list_head list; 42 struct list_head list;
45 int registered; 43 void (*dump)(struct kmsg_dumper *dumper, enum kmsg_dump_reason reason);
44 enum kmsg_dump_reason max_reason;
45 bool active;
46 bool registered;
47
48 /* private state of the kmsg iterator */
49 u32 cur_idx;
50 u32 next_idx;
51 u64 cur_seq;
52 u64 next_seq;
46}; 53};
47 54
48#ifdef CONFIG_PRINTK 55#ifdef CONFIG_PRINTK
49void kmsg_dump(enum kmsg_dump_reason reason); 56void kmsg_dump(enum kmsg_dump_reason reason);
50 57
58bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
59 char *line, size_t size, size_t *len);
60
61bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
62 char *buf, size_t size, size_t *len);
63
64void kmsg_dump_rewind(struct kmsg_dumper *dumper);
65
51int kmsg_dump_register(struct kmsg_dumper *dumper); 66int kmsg_dump_register(struct kmsg_dumper *dumper);
52 67
53int kmsg_dump_unregister(struct kmsg_dumper *dumper); 68int kmsg_dump_unregister(struct kmsg_dumper *dumper);
@@ -56,6 +71,22 @@ static inline void kmsg_dump(enum kmsg_dump_reason reason)
56{ 71{
57} 72}
58 73
74static inline bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
75 const char *line, size_t size, size_t *len)
76{
77 return false;
78}
79
80static inline bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
81 char *buf, size_t size, size_t *len)
82{
83 return false;
84}
85
86static inline void kmsg_dump_rewind(struct kmsg_dumper *dumper)
87{
88}
89
59static inline int kmsg_dump_register(struct kmsg_dumper *dumper) 90static inline int kmsg_dump_register(struct kmsg_dumper *dumper)
60{ 91{
61 return -EINVAL; 92 return -EINVAL;
diff --git a/include/linux/lglock.h b/include/linux/lglock.h
index 87f402ccec55..f01e5f6d1f07 100644
--- a/include/linux/lglock.h
+++ b/include/linux/lglock.h
@@ -23,28 +23,17 @@
23#include <linux/lockdep.h> 23#include <linux/lockdep.h>
24#include <linux/percpu.h> 24#include <linux/percpu.h>
25#include <linux/cpu.h> 25#include <linux/cpu.h>
26#include <linux/notifier.h>
26 27
27/* can make br locks by using local lock for read side, global lock for write */ 28/* can make br locks by using local lock for read side, global lock for write */
28#define br_lock_init(name) name##_lock_init() 29#define br_lock_init(name) lg_lock_init(name, #name)
29#define br_read_lock(name) name##_local_lock() 30#define br_read_lock(name) lg_local_lock(name)
30#define br_read_unlock(name) name##_local_unlock() 31#define br_read_unlock(name) lg_local_unlock(name)
31#define br_write_lock(name) name##_global_lock_online() 32#define br_write_lock(name) lg_global_lock(name)
32#define br_write_unlock(name) name##_global_unlock_online() 33#define br_write_unlock(name) lg_global_unlock(name)
33 34
34#define DECLARE_BRLOCK(name) DECLARE_LGLOCK(name)
35#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name) 35#define DEFINE_BRLOCK(name) DEFINE_LGLOCK(name)
36 36
37
38#define lg_lock_init(name) name##_lock_init()
39#define lg_local_lock(name) name##_local_lock()
40#define lg_local_unlock(name) name##_local_unlock()
41#define lg_local_lock_cpu(name, cpu) name##_local_lock_cpu(cpu)
42#define lg_local_unlock_cpu(name, cpu) name##_local_unlock_cpu(cpu)
43#define lg_global_lock(name) name##_global_lock()
44#define lg_global_unlock(name) name##_global_unlock()
45#define lg_global_lock_online(name) name##_global_lock_online()
46#define lg_global_unlock_online(name) name##_global_unlock_online()
47
48#ifdef CONFIG_DEBUG_LOCK_ALLOC 37#ifdef CONFIG_DEBUG_LOCK_ALLOC
49#define LOCKDEP_INIT_MAP lockdep_init_map 38#define LOCKDEP_INIT_MAP lockdep_init_map
50 39
@@ -59,142 +48,26 @@
59#define DEFINE_LGLOCK_LOCKDEP(name) 48#define DEFINE_LGLOCK_LOCKDEP(name)
60#endif 49#endif
61 50
62 51struct lglock {
63#define DECLARE_LGLOCK(name) \ 52 arch_spinlock_t __percpu *lock;
64 extern void name##_lock_init(void); \ 53#ifdef CONFIG_DEBUG_LOCK_ALLOC
65 extern void name##_local_lock(void); \ 54 struct lock_class_key lock_key;
66 extern void name##_local_unlock(void); \ 55 struct lockdep_map lock_dep_map;
67 extern void name##_local_lock_cpu(int cpu); \ 56#endif
68 extern void name##_local_unlock_cpu(int cpu); \ 57};
69 extern void name##_global_lock(void); \
70 extern void name##_global_unlock(void); \
71 extern void name##_global_lock_online(void); \
72 extern void name##_global_unlock_online(void); \
73 58
74#define DEFINE_LGLOCK(name) \ 59#define DEFINE_LGLOCK(name) \
75 \ 60 DEFINE_LGLOCK_LOCKDEP(name); \
76 DEFINE_SPINLOCK(name##_cpu_lock); \ 61 DEFINE_PER_CPU(arch_spinlock_t, name ## _lock) \
77 cpumask_t name##_cpus __read_mostly; \ 62 = __ARCH_SPIN_LOCK_UNLOCKED; \
78 DEFINE_PER_CPU(arch_spinlock_t, name##_lock); \ 63 struct lglock name = { .lock = &name ## _lock }
79 DEFINE_LGLOCK_LOCKDEP(name); \ 64
80 \ 65void lg_lock_init(struct lglock *lg, char *name);
81 static int \ 66void lg_local_lock(struct lglock *lg);
82 name##_lg_cpu_callback(struct notifier_block *nb, \ 67void lg_local_unlock(struct lglock *lg);
83 unsigned long action, void *hcpu) \ 68void lg_local_lock_cpu(struct lglock *lg, int cpu);
84 { \ 69void lg_local_unlock_cpu(struct lglock *lg, int cpu);
85 switch (action & ~CPU_TASKS_FROZEN) { \ 70void lg_global_lock(struct lglock *lg);
86 case CPU_UP_PREPARE: \ 71void lg_global_unlock(struct lglock *lg);
87 spin_lock(&name##_cpu_lock); \ 72
88 cpu_set((unsigned long)hcpu, name##_cpus); \
89 spin_unlock(&name##_cpu_lock); \
90 break; \
91 case CPU_UP_CANCELED: case CPU_DEAD: \
92 spin_lock(&name##_cpu_lock); \
93 cpu_clear((unsigned long)hcpu, name##_cpus); \
94 spin_unlock(&name##_cpu_lock); \
95 } \
96 return NOTIFY_OK; \
97 } \
98 static struct notifier_block name##_lg_cpu_notifier = { \
99 .notifier_call = name##_lg_cpu_callback, \
100 }; \
101 void name##_lock_init(void) { \
102 int i; \
103 LOCKDEP_INIT_MAP(&name##_lock_dep_map, #name, &name##_lock_key, 0); \
104 for_each_possible_cpu(i) { \
105 arch_spinlock_t *lock; \
106 lock = &per_cpu(name##_lock, i); \
107 *lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; \
108 } \
109 register_hotcpu_notifier(&name##_lg_cpu_notifier); \
110 get_online_cpus(); \
111 for_each_online_cpu(i) \
112 cpu_set(i, name##_cpus); \
113 put_online_cpus(); \
114 } \
115 EXPORT_SYMBOL(name##_lock_init); \
116 \
117 void name##_local_lock(void) { \
118 arch_spinlock_t *lock; \
119 preempt_disable(); \
120 rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
121 lock = &__get_cpu_var(name##_lock); \
122 arch_spin_lock(lock); \
123 } \
124 EXPORT_SYMBOL(name##_local_lock); \
125 \
126 void name##_local_unlock(void) { \
127 arch_spinlock_t *lock; \
128 rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
129 lock = &__get_cpu_var(name##_lock); \
130 arch_spin_unlock(lock); \
131 preempt_enable(); \
132 } \
133 EXPORT_SYMBOL(name##_local_unlock); \
134 \
135 void name##_local_lock_cpu(int cpu) { \
136 arch_spinlock_t *lock; \
137 preempt_disable(); \
138 rwlock_acquire_read(&name##_lock_dep_map, 0, 0, _THIS_IP_); \
139 lock = &per_cpu(name##_lock, cpu); \
140 arch_spin_lock(lock); \
141 } \
142 EXPORT_SYMBOL(name##_local_lock_cpu); \
143 \
144 void name##_local_unlock_cpu(int cpu) { \
145 arch_spinlock_t *lock; \
146 rwlock_release(&name##_lock_dep_map, 1, _THIS_IP_); \
147 lock = &per_cpu(name##_lock, cpu); \
148 arch_spin_unlock(lock); \
149 preempt_enable(); \
150 } \
151 EXPORT_SYMBOL(name##_local_unlock_cpu); \
152 \
153 void name##_global_lock_online(void) { \
154 int i; \
155 spin_lock(&name##_cpu_lock); \
156 rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
157 for_each_cpu(i, &name##_cpus) { \
158 arch_spinlock_t *lock; \
159 lock = &per_cpu(name##_lock, i); \
160 arch_spin_lock(lock); \
161 } \
162 } \
163 EXPORT_SYMBOL(name##_global_lock_online); \
164 \
165 void name##_global_unlock_online(void) { \
166 int i; \
167 rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
168 for_each_cpu(i, &name##_cpus) { \
169 arch_spinlock_t *lock; \
170 lock = &per_cpu(name##_lock, i); \
171 arch_spin_unlock(lock); \
172 } \
173 spin_unlock(&name##_cpu_lock); \
174 } \
175 EXPORT_SYMBOL(name##_global_unlock_online); \
176 \
177 void name##_global_lock(void) { \
178 int i; \
179 preempt_disable(); \
180 rwlock_acquire(&name##_lock_dep_map, 0, 0, _RET_IP_); \
181 for_each_possible_cpu(i) { \
182 arch_spinlock_t *lock; \
183 lock = &per_cpu(name##_lock, i); \
184 arch_spin_lock(lock); \
185 } \
186 } \
187 EXPORT_SYMBOL(name##_global_lock); \
188 \
189 void name##_global_unlock(void) { \
190 int i; \
191 rwlock_release(&name##_lock_dep_map, 1, _RET_IP_); \
192 for_each_possible_cpu(i) { \
193 arch_spinlock_t *lock; \
194 lock = &per_cpu(name##_lock, i); \
195 arch_spin_unlock(lock); \
196 } \
197 preempt_enable(); \
198 } \
199 EXPORT_SYMBOL(name##_global_unlock);
200#endif 73#endif
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 11a966e5f829..4d24d64578c4 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -54,7 +54,7 @@ extern void nlmclnt_done(struct nlm_host *host);
54 54
55extern int nlmclnt_proc(struct nlm_host *host, int cmd, 55extern int nlmclnt_proc(struct nlm_host *host, int cmd,
56 struct file_lock *fl); 56 struct file_lock *fl);
57extern int lockd_up(void); 57extern int lockd_up(struct net *net);
58extern void lockd_down(void); 58extern void lockd_down(struct net *net);
59 59
60#endif /* LINUX_LOCKD_BIND_H */ 60#endif /* LINUX_LOCKD_BIND_H */
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 6e27fa99e8b9..6a8f002b8ed3 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -64,6 +64,7 @@ enum {
64 MLX4_MAX_NUM_PF = 16, 64 MLX4_MAX_NUM_PF = 16,
65 MLX4_MAX_NUM_VF = 64, 65 MLX4_MAX_NUM_VF = 64,
66 MLX4_MFUNC_MAX = 80, 66 MLX4_MFUNC_MAX = 80,
67 MLX4_MAX_EQ_NUM = 1024,
67 MLX4_MFUNC_EQ_NUM = 4, 68 MLX4_MFUNC_EQ_NUM = 4,
68 MLX4_MFUNC_MAX_EQES = 8, 69 MLX4_MFUNC_MAX_EQES = 8,
69 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1) 70 MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
@@ -239,6 +240,10 @@ static inline u64 mlx4_fw_ver(u64 major, u64 minor, u64 subminor)
239 return (major << 32) | (minor << 16) | subminor; 240 return (major << 32) | (minor << 16) | subminor;
240} 241}
241 242
243struct mlx4_phys_caps {
244 u32 num_phys_eqs;
245};
246
242struct mlx4_caps { 247struct mlx4_caps {
243 u64 fw_ver; 248 u64 fw_ver;
244 u32 function; 249 u32 function;
@@ -499,6 +504,7 @@ struct mlx4_dev {
499 unsigned long flags; 504 unsigned long flags;
500 unsigned long num_slaves; 505 unsigned long num_slaves;
501 struct mlx4_caps caps; 506 struct mlx4_caps caps;
507 struct mlx4_phys_caps phys_caps;
502 struct radix_tree_root qp_table_tree; 508 struct radix_tree_root qp_table_tree;
503 u8 rev_id; 509 u8 rev_id;
504 char board_id[MLX4_BOARD_ID_LEN]; 510 char board_id[MLX4_BOARD_ID_LEN];
diff --git a/include/linux/mm.h b/include/linux/mm.h
index ce26716238c3..b36d08ce5c57 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1392,7 +1392,7 @@ extern unsigned long get_unmapped_area(struct file *, unsigned long, unsigned lo
1392extern unsigned long mmap_region(struct file *file, unsigned long addr, 1392extern unsigned long mmap_region(struct file *file, unsigned long addr,
1393 unsigned long len, unsigned long flags, 1393 unsigned long len, unsigned long flags,
1394 vm_flags_t vm_flags, unsigned long pgoff); 1394 vm_flags_t vm_flags, unsigned long pgoff);
1395extern unsigned long do_mmap(struct file *, unsigned long, 1395extern unsigned long do_mmap_pgoff(struct file *, unsigned long,
1396 unsigned long, unsigned long, 1396 unsigned long, unsigned long,
1397 unsigned long, unsigned long); 1397 unsigned long, unsigned long);
1398extern int do_munmap(struct mm_struct *, unsigned long, size_t); 1398extern int do_munmap(struct mm_struct *, unsigned long, size_t);
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index dad95bdd06d7..704a626d94a0 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -57,8 +57,18 @@ struct page {
57 }; 57 };
58 58
59 union { 59 union {
60#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
61 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
60 /* Used for cmpxchg_double in slub */ 62 /* Used for cmpxchg_double in slub */
61 unsigned long counters; 63 unsigned long counters;
64#else
65 /*
66 * Keep _count separate from slub cmpxchg_double data.
67 * As the rest of the double word is protected by
68 * slab_lock but _count is not.
69 */
70 unsigned counters;
71#endif
62 72
63 struct { 73 struct {
64 74
diff --git a/include/linux/mmc/sdhci-spear.h b/include/linux/mmc/sdhci-spear.h
index 5cdc96da9dd5..e78c0e236e9d 100644
--- a/include/linux/mmc/sdhci-spear.h
+++ b/include/linux/mmc/sdhci-spear.h
@@ -4,7 +4,7 @@
4 * SDHCI declarations specific to ST SPEAr platform 4 * SDHCI declarations specific to ST SPEAr platform
5 * 5 *
6 * Copyright (C) 2010 ST Microelectronics 6 * Copyright (C) 2010 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/include/linux/mmc/sdio.h b/include/linux/mmc/sdio.h
index c9fe66c58f8f..17446d3c3602 100644
--- a/include/linux/mmc/sdio.h
+++ b/include/linux/mmc/sdio.h
@@ -98,7 +98,9 @@
98 98
99#define SDIO_CCCR_IF 0x07 /* bus interface controls */ 99#define SDIO_CCCR_IF 0x07 /* bus interface controls */
100 100
101#define SDIO_BUS_WIDTH_MASK 0x03 /* data bus width setting */
101#define SDIO_BUS_WIDTH_1BIT 0x00 102#define SDIO_BUS_WIDTH_1BIT 0x00
103#define SDIO_BUS_WIDTH_RESERVED 0x01
102#define SDIO_BUS_WIDTH_4BIT 0x02 104#define SDIO_BUS_WIDTH_4BIT 0x02
103#define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */ 105#define SDIO_BUS_ECSI 0x20 /* Enable continuous SPI interrupt */
104#define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */ 106#define SDIO_BUS_SCSI 0x40 /* Support continuous SPI interrupt */
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 1b14d25162cb..d6a58065c09c 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -128,7 +128,7 @@ struct kparam_array
128 * The ops can have NULL set or get functions. 128 * The ops can have NULL set or get functions.
129 */ 129 */
130#define module_param_cb(name, ops, arg, perm) \ 130#define module_param_cb(name, ops, arg, perm) \
131 __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, 0) 131 __module_param_call(MODULE_PARAM_PREFIX, name, ops, arg, perm, -1)
132 132
133/** 133/**
134 * <level>_param_cb - general callback for a module/cmdline parameter 134 * <level>_param_cb - general callback for a module/cmdline parameter
@@ -192,7 +192,7 @@ struct kparam_array
192 { (void *)set, (void *)get }; \ 192 { (void *)set, (void *)get }; \
193 __module_param_call(MODULE_PARAM_PREFIX, \ 193 __module_param_call(MODULE_PARAM_PREFIX, \
194 name, &__param_ops_##name, arg, \ 194 name, &__param_ops_##name, arg, \
195 (perm) + sizeof(__check_old_set_param(set))*0, 0) 195 (perm) + sizeof(__check_old_set_param(set))*0, -1)
196 196
197/* We don't get oldget: it's often a new-style param_get_uint, etc. */ 197/* We don't get oldget: it's often a new-style param_get_uint, etc. */
198static inline int 198static inline int
@@ -272,7 +272,7 @@ static inline void __kernel_param_unlock(void)
272 */ 272 */
273#define core_param(name, var, type, perm) \ 273#define core_param(name, var, type, perm) \
274 param_check_##type(name, &(var)); \ 274 param_check_##type(name, &(var)); \
275 __module_param_call("", name, &param_ops_##type, &var, perm, 0) 275 __module_param_call("", name, &param_ops_##type, &var, perm, -1)
276#endif /* !MODULE */ 276#endif /* !MODULE */
277 277
278/** 278/**
@@ -290,7 +290,7 @@ static inline void __kernel_param_unlock(void)
290 = { len, string }; \ 290 = { len, string }; \
291 __module_param_call(MODULE_PARAM_PREFIX, name, \ 291 __module_param_call(MODULE_PARAM_PREFIX, name, \
292 &param_ops_string, \ 292 &param_ops_string, \
293 .str = &__param_string_##name, perm, 0); \ 293 .str = &__param_string_##name, perm, -1); \
294 __MODULE_PARM_TYPE(name, "string") 294 __MODULE_PARM_TYPE(name, "string")
295 295
296/** 296/**
@@ -432,7 +432,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
432 __module_param_call(MODULE_PARAM_PREFIX, name, \ 432 __module_param_call(MODULE_PARAM_PREFIX, name, \
433 &param_array_ops, \ 433 &param_array_ops, \
434 .arr = &__param_arr_##name, \ 434 .arr = &__param_arr_##name, \
435 perm, 0); \ 435 perm, -1); \
436 __MODULE_PARM_TYPE(name, "array of " #type) 436 __MODULE_PARM_TYPE(name, "array of " #type)
437 437
438extern struct kernel_param_ops param_array_ops; 438extern struct kernel_param_ops param_array_ops;
diff --git a/include/linux/msdos_fs.h b/include/linux/msdos_fs.h
index 34066e65fdeb..11cc2ac67e75 100644
--- a/include/linux/msdos_fs.h
+++ b/include/linux/msdos_fs.h
@@ -21,8 +21,9 @@
21#define CT_LE_W(v) cpu_to_le16(v) 21#define CT_LE_W(v) cpu_to_le16(v)
22#define CT_LE_L(v) cpu_to_le32(v) 22#define CT_LE_L(v) cpu_to_le32(v)
23 23
24#define MSDOS_ROOT_INO 1 /* The root inode number */
25#define MSDOS_FSINFO_INO 2 /* Used for managing the FSINFO block */
24 26
25#define MSDOS_ROOT_INO 1 /* == MINIX_ROOT_INO */
26#define MSDOS_DIR_BITS 5 /* log2(sizeof(struct msdos_dir_entry)) */ 27#define MSDOS_DIR_BITS 5 /* log2(sizeof(struct msdos_dir_entry)) */
27 28
28/* directory limit */ 29/* directory limit */
diff --git a/include/linux/mtd/gpmi-nand.h b/include/linux/mtd/gpmi-nand.h
index 69b6dbf46b5e..ed3c4e09f3d1 100644
--- a/include/linux/mtd/gpmi-nand.h
+++ b/include/linux/mtd/gpmi-nand.h
@@ -23,12 +23,12 @@
23#define GPMI_NAND_RES_SIZE 6 23#define GPMI_NAND_RES_SIZE 6
24 24
25/* Resource names for the GPMI NAND driver. */ 25/* Resource names for the GPMI NAND driver. */
26#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "GPMI NAND GPMI Registers" 26#define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand"
27#define GPMI_NAND_GPMI_INTERRUPT_RES_NAME "GPMI NAND GPMI Interrupt" 27#define GPMI_NAND_GPMI_INTERRUPT_RES_NAME "GPMI NAND GPMI Interrupt"
28#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "GPMI NAND BCH Registers" 28#define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch"
29#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "GPMI NAND BCH Interrupt" 29#define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch"
30#define GPMI_NAND_DMA_CHANNELS_RES_NAME "GPMI NAND DMA Channels" 30#define GPMI_NAND_DMA_CHANNELS_RES_NAME "GPMI NAND DMA Channels"
31#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "GPMI NAND DMA Interrupt" 31#define GPMI_NAND_DMA_INTERRUPT_RES_NAME "gpmi-dma"
32 32
33/** 33/**
34 * struct gpmi_nand_platform_data - GPMI NAND driver platform data. 34 * struct gpmi_nand_platform_data - GPMI NAND driver platform data.
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index cf5ea8cdcf8e..63dadc0dfb62 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -157,6 +157,15 @@ struct mtd_info {
157 unsigned int erasesize_mask; 157 unsigned int erasesize_mask;
158 unsigned int writesize_mask; 158 unsigned int writesize_mask;
159 159
160 /*
161 * read ops return -EUCLEAN if max number of bitflips corrected on any
162 * one region comprising an ecc step equals or exceeds this value.
163 * Settable by driver, else defaults to ecc_strength. User can override
164 * in sysfs. N.B. The meaning of the -EUCLEAN return code has changed;
165 * see Documentation/ABI/testing/sysfs-class-mtd for more detail.
166 */
167 unsigned int bitflip_threshold;
168
160 // Kernel-only stuff starts here. 169 // Kernel-only stuff starts here.
161 const char *name; 170 const char *name;
162 int index; 171 int index;
@@ -164,7 +173,7 @@ struct mtd_info {
164 /* ECC layout structure pointer - read only! */ 173 /* ECC layout structure pointer - read only! */
165 struct nand_ecclayout *ecclayout; 174 struct nand_ecclayout *ecclayout;
166 175
167 /* max number of correctible bit errors per writesize */ 176 /* max number of correctible bit errors per ecc step */
168 unsigned int ecc_strength; 177 unsigned int ecc_strength;
169 178
170 /* Data for variable erase regions. If numeraseregions is zero, 179 /* Data for variable erase regions. If numeraseregions is zero,
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index 1482340d3d9f..57977c640529 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -161,8 +161,6 @@ typedef enum {
161 * Option constants for bizarre disfunctionality and real 161 * Option constants for bizarre disfunctionality and real
162 * features. 162 * features.
163 */ 163 */
164/* Chip can not auto increment pages */
165#define NAND_NO_AUTOINCR 0x00000001
166/* Buswidth is 16 bit */ 164/* Buswidth is 16 bit */
167#define NAND_BUSWIDTH_16 0x00000002 165#define NAND_BUSWIDTH_16 0x00000002
168/* Device supports partial programming without padding */ 166/* Device supports partial programming without padding */
@@ -207,7 +205,6 @@ typedef enum {
207 (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK) 205 (NAND_NO_PADDING | NAND_CACHEPRG | NAND_COPYBACK)
208 206
209/* Macros to identify the above */ 207/* Macros to identify the above */
210#define NAND_CANAUTOINCR(chip) (!(chip->options & NAND_NO_AUTOINCR))
211#define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING)) 208#define NAND_MUST_PAD(chip) (!(chip->options & NAND_NO_PADDING))
212#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG)) 209#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
213#define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK)) 210#define NAND_HAS_COPYBACK(chip) ((chip->options & NAND_COPYBACK))
@@ -216,7 +213,7 @@ typedef enum {
216 && (chip->page_shift > 9)) 213 && (chip->page_shift > 9))
217 214
218/* Mask to zero out the chip options, which come from the id table */ 215/* Mask to zero out the chip options, which come from the id table */
219#define NAND_CHIPOPTIONS_MSK (0x0000ffff & ~NAND_NO_AUTOINCR) 216#define NAND_CHIPOPTIONS_MSK 0x0000ffff
220 217
221/* Non chip related options */ 218/* Non chip related options */
222/* This option skips the bbt scan during initialization. */ 219/* This option skips the bbt scan during initialization. */
@@ -363,21 +360,20 @@ struct nand_ecc_ctrl {
363 int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc, 360 int (*correct)(struct mtd_info *mtd, uint8_t *dat, uint8_t *read_ecc,
364 uint8_t *calc_ecc); 361 uint8_t *calc_ecc);
365 int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, 362 int (*read_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
366 uint8_t *buf, int page); 363 uint8_t *buf, int oob_required, int page);
367 void (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip, 364 void (*write_page_raw)(struct mtd_info *mtd, struct nand_chip *chip,
368 const uint8_t *buf); 365 const uint8_t *buf, int oob_required);
369 int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip, 366 int (*read_page)(struct mtd_info *mtd, struct nand_chip *chip,
370 uint8_t *buf, int page); 367 uint8_t *buf, int oob_required, int page);
371 int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip, 368 int (*read_subpage)(struct mtd_info *mtd, struct nand_chip *chip,
372 uint32_t offs, uint32_t len, uint8_t *buf); 369 uint32_t offs, uint32_t len, uint8_t *buf);
373 void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, 370 void (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
374 const uint8_t *buf); 371 const uint8_t *buf, int oob_required);
375 int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, 372 int (*write_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
376 int page); 373 int page);
377 int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip, 374 int (*read_oob_raw)(struct mtd_info *mtd, struct nand_chip *chip,
378 int page, int sndcmd); 375 int page);
379 int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page, 376 int (*read_oob)(struct mtd_info *mtd, struct nand_chip *chip, int page);
380 int sndcmd);
381 int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip, 377 int (*write_oob)(struct mtd_info *mtd, struct nand_chip *chip,
382 int page); 378 int page);
383}; 379};
@@ -459,6 +455,8 @@ struct nand_buffers {
459 * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1 455 * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
460 * @pagebuf: [INTERN] holds the pagenumber which is currently in 456 * @pagebuf: [INTERN] holds the pagenumber which is currently in
461 * data_buf. 457 * data_buf.
458 * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
459 * currently in data_buf.
462 * @subpagesize: [INTERN] holds the subpagesize 460 * @subpagesize: [INTERN] holds the subpagesize
463 * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded), 461 * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded),
464 * non 0 if ONFI supported. 462 * non 0 if ONFI supported.
@@ -505,7 +503,8 @@ struct nand_chip {
505 int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state, 503 int (*errstat)(struct mtd_info *mtd, struct nand_chip *this, int state,
506 int status, int page); 504 int status, int page);
507 int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip, 505 int (*write_page)(struct mtd_info *mtd, struct nand_chip *chip,
508 const uint8_t *buf, int page, int cached, int raw); 506 const uint8_t *buf, int oob_required, int page,
507 int cached, int raw);
509 508
510 int chip_delay; 509 int chip_delay;
511 unsigned int options; 510 unsigned int options;
@@ -519,6 +518,7 @@ struct nand_chip {
519 uint64_t chipsize; 518 uint64_t chipsize;
520 int pagemask; 519 int pagemask;
521 int pagebuf; 520 int pagebuf;
521 unsigned int pagebuf_bitflips;
522 int subpagesize; 522 int subpagesize;
523 uint8_t cellinfo; 523 uint8_t cellinfo;
524 int badblockpos; 524 int badblockpos;
@@ -654,6 +654,7 @@ struct platform_nand_ctrl {
654 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl); 654 void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
655 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len); 655 void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
656 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len); 656 void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
657 unsigned char (*read_byte)(struct mtd_info *mtd);
657 void *priv; 658 void *priv;
658}; 659};
659 660
diff --git a/include/linux/net.h b/include/linux/net.h
index 2d7510f38934..e9ac2df079ba 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -313,5 +313,8 @@ extern int kernel_sock_shutdown(struct socket *sock,
313 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \ 313 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
314 "-type-" __stringify(type)) 314 "-type-" __stringify(type))
315 315
316#define MODULE_ALIAS_NET_PF_PROTO_NAME(pf, proto, name) \
317 MODULE_ALIAS("net-pf-" __stringify(pf) "-proto-" __stringify(proto) \
318 name)
316#endif /* __KERNEL__ */ 319#endif /* __KERNEL__ */
317#endif /* _LINUX_NET_H */ 320#endif /* _LINUX_NET_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e7fd468f7126..d94cb1431519 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2795,15 +2795,15 @@ do { \
2795#define netif_info(priv, type, dev, fmt, args...) \ 2795#define netif_info(priv, type, dev, fmt, args...) \
2796 netif_level(info, priv, type, dev, fmt, ##args) 2796 netif_level(info, priv, type, dev, fmt, ##args)
2797 2797
2798#if defined(DEBUG) 2798#if defined(CONFIG_DYNAMIC_DEBUG)
2799#define netif_dbg(priv, type, dev, format, args...) \
2800 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2801#elif defined(CONFIG_DYNAMIC_DEBUG)
2802#define netif_dbg(priv, type, netdev, format, args...) \ 2799#define netif_dbg(priv, type, netdev, format, args...) \
2803do { \ 2800do { \
2804 if (netif_msg_##type(priv)) \ 2801 if (netif_msg_##type(priv)) \
2805 dynamic_netdev_dbg(netdev, format, ##args); \ 2802 dynamic_netdev_dbg(netdev, format, ##args); \
2806} while (0) 2803} while (0)
2804#elif defined(DEBUG)
2805#define netif_dbg(priv, type, dev, format, args...) \
2806 netif_printk(priv, type, KERN_DEBUG, dev, format, ##args)
2807#else 2807#else
2808#define netif_dbg(priv, type, dev, format, args...) \ 2808#define netif_dbg(priv, type, dev, format, args...) \
2809({ \ 2809({ \
diff --git a/include/linux/netfilter/xt_HMARK.h b/include/linux/netfilter/xt_HMARK.h
index abb1650940d2..826fc5807577 100644
--- a/include/linux/netfilter/xt_HMARK.h
+++ b/include/linux/netfilter/xt_HMARK.h
@@ -27,7 +27,12 @@ union hmark_ports {
27 __u16 src; 27 __u16 src;
28 __u16 dst; 28 __u16 dst;
29 } p16; 29 } p16;
30 struct {
31 __be16 src;
32 __be16 dst;
33 } b16;
30 __u32 v32; 34 __u32 v32;
35 __be32 b32;
31}; 36};
32 37
33struct xt_hmark_info { 38struct xt_hmark_info {
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index fbb78fb09bd2..f58325a1d8fb 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -25,6 +25,7 @@ struct nfs41_impl_id;
25 */ 25 */
26struct nfs_client { 26struct nfs_client {
27 atomic_t cl_count; 27 atomic_t cl_count;
28 atomic_t cl_mds_count;
28 int cl_cons_state; /* current construction state (-ve: init error) */ 29 int cl_cons_state; /* current construction state (-ve: init error) */
29#define NFS_CS_READY 0 /* ready to be used */ 30#define NFS_CS_READY 0 /* ready to be used */
30#define NFS_CS_INITING 1 /* busy initialising */ 31#define NFS_CS_INITING 1 /* busy initialising */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index d1a7bf51c326..8aadd90b808a 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -348,6 +348,7 @@ struct nfs_openargs {
348 const struct qstr * name; 348 const struct qstr * name;
349 const struct nfs_server *server; /* Needed for ID mapping */ 349 const struct nfs_server *server; /* Needed for ID mapping */
350 const u32 * bitmask; 350 const u32 * bitmask;
351 const u32 * open_bitmap;
351 __u32 claim; 352 __u32 claim;
352 struct nfs4_sequence_args seq_args; 353 struct nfs4_sequence_args seq_args;
353}; 354};
@@ -1236,6 +1237,7 @@ struct nfs_pgio_header {
1236 struct list_head rpc_list; 1237 struct list_head rpc_list;
1237 atomic_t refcnt; 1238 atomic_t refcnt;
1238 struct nfs_page *req; 1239 struct nfs_page *req;
1240 struct nfs_writeverf *verf;
1239 struct pnfs_layout_segment *lseg; 1241 struct pnfs_layout_segment *lseg;
1240 loff_t io_start; 1242 loff_t io_start;
1241 const struct rpc_call_ops *mds_ops; 1243 const struct rpc_call_ops *mds_ops;
@@ -1273,6 +1275,7 @@ struct nfs_write_data {
1273struct nfs_write_header { 1275struct nfs_write_header {
1274 struct nfs_pgio_header header; 1276 struct nfs_pgio_header header;
1275 struct nfs_write_data rpc_data; 1277 struct nfs_write_data rpc_data;
1278 struct nfs_writeverf verf;
1276}; 1279};
1277 1280
1278struct nfs_mds_commit_info { 1281struct nfs_mds_commit_info {
diff --git a/include/linux/nfsd/export.h b/include/linux/nfsd/export.h
index f85308e688fd..e33f747b173c 100644
--- a/include/linux/nfsd/export.h
+++ b/include/linux/nfsd/export.h
@@ -103,6 +103,7 @@ struct svc_export {
103 struct nfsd4_fs_locations ex_fslocs; 103 struct nfsd4_fs_locations ex_fslocs;
104 int ex_nflavors; 104 int ex_nflavors;
105 struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST]; 105 struct exp_flavor_info ex_flavors[MAX_SECINFO_LIST];
106 struct cache_detail *cd;
106}; 107};
107 108
108/* an "export key" (expkey) maps a filehandlefragement to an 109/* an "export key" (expkey) maps a filehandlefragement to an
@@ -129,24 +130,22 @@ __be32 check_nfsd_access(struct svc_export *exp, struct svc_rqst *rqstp);
129/* 130/*
130 * Function declarations 131 * Function declarations
131 */ 132 */
132int nfsd_export_init(void); 133int nfsd_export_init(struct net *);
133void nfsd_export_shutdown(void); 134void nfsd_export_shutdown(struct net *);
134void nfsd_export_flush(void); 135void nfsd_export_flush(struct net *);
135struct svc_export * rqst_exp_get_by_name(struct svc_rqst *, 136struct svc_export * rqst_exp_get_by_name(struct svc_rqst *,
136 struct path *); 137 struct path *);
137struct svc_export * rqst_exp_parent(struct svc_rqst *, 138struct svc_export * rqst_exp_parent(struct svc_rqst *,
138 struct path *); 139 struct path *);
139struct svc_export * rqst_find_fsidzero_export(struct svc_rqst *); 140struct svc_export * rqst_find_fsidzero_export(struct svc_rqst *);
140int exp_rootfh(struct auth_domain *, 141int exp_rootfh(struct net *, struct auth_domain *,
141 char *path, struct knfsd_fh *, int maxsize); 142 char *path, struct knfsd_fh *, int maxsize);
142__be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *); 143__be32 exp_pseudoroot(struct svc_rqst *, struct svc_fh *);
143__be32 nfserrno(int errno); 144__be32 nfserrno(int errno);
144 145
145extern struct cache_detail svc_export_cache;
146
147static inline void exp_put(struct svc_export *exp) 146static inline void exp_put(struct svc_export *exp)
148{ 147{
149 cache_put(&exp->h, &svc_export_cache); 148 cache_put(&exp->h, exp->cd);
150} 149}
151 150
152static inline void exp_get(struct svc_export *exp) 151static inline void exp_get(struct svc_export *exp)
diff --git a/include/linux/pata_arasan_cf_data.h b/include/linux/pata_arasan_cf_data.h
index a6ee9aa898bb..a7b4fc386e63 100644
--- a/include/linux/pata_arasan_cf_data.h
+++ b/include/linux/pata_arasan_cf_data.h
@@ -4,7 +4,7 @@
4 * Arasan Compact Flash host controller platform data header file 4 * Arasan Compact Flash host controller platform data header file
5 * 5 *
6 * Copyright (C) 2011 ST Microelectronics 6 * Copyright (C) 2011 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com> 7 * Viresh Kumar <viresh.linux@gmail.com>
8 * 8 *
9 * This file is licensed under the terms of the GNU General Public 9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any 10 * License version 2. This program is licensed "as is" without any
diff --git a/include/linux/pci.h b/include/linux/pci.h
index d8c379dba6ad..fefb4e19bf6a 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -176,6 +176,8 @@ enum pci_dev_flags {
176 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2, 176 PCI_DEV_FLAGS_NO_D3 = (__force pci_dev_flags_t) 2,
177 /* Provide indication device is assigned by a Virtual Machine Manager */ 177 /* Provide indication device is assigned by a Virtual Machine Manager */
178 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4, 178 PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) 4,
179 /* Device causes system crash if in D3 during S3 sleep */
180 PCI_DEV_FLAGS_NO_D3_DURING_SLEEP = (__force pci_dev_flags_t) 8,
179}; 181};
180 182
181enum pci_irq_reroute_variant { 183enum pci_irq_reroute_variant {
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index f32578634d9d..45db49f64bb4 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -555,6 +555,8 @@ enum perf_event_type {
555 PERF_RECORD_MAX, /* non-ABI */ 555 PERF_RECORD_MAX, /* non-ABI */
556}; 556};
557 557
558#define PERF_MAX_STACK_DEPTH 127
559
558enum perf_callchain_context { 560enum perf_callchain_context {
559 PERF_CONTEXT_HV = (__u64)-32, 561 PERF_CONTEXT_HV = (__u64)-32,
560 PERF_CONTEXT_KERNEL = (__u64)-128, 562 PERF_CONTEXT_KERNEL = (__u64)-128,
@@ -609,8 +611,6 @@ struct perf_guest_info_callbacks {
609#include <linux/sysfs.h> 611#include <linux/sysfs.h>
610#include <asm/local.h> 612#include <asm/local.h>
611 613
612#define PERF_MAX_STACK_DEPTH 255
613
614struct perf_callchain_entry { 614struct perf_callchain_entry {
615 __u64 nr; 615 __u64 nr;
616 __u64 ip[PERF_MAX_STACK_DEPTH]; 616 __u64 ip[PERF_MAX_STACK_DEPTH];
diff --git a/include/linux/power/charger-manager.h b/include/linux/power/charger-manager.h
index 4f75e531c112..241065c9ce51 100644
--- a/include/linux/power/charger-manager.h
+++ b/include/linux/power/charger-manager.h
@@ -18,6 +18,8 @@
18#include <linux/power_supply.h> 18#include <linux/power_supply.h>
19 19
20enum data_source { 20enum data_source {
21 CM_BATTERY_PRESENT,
22 CM_NO_BATTERY,
21 CM_FUEL_GAUGE, 23 CM_FUEL_GAUGE,
22 CM_CHARGER_STAT, 24 CM_CHARGER_STAT,
23}; 25};
@@ -29,6 +31,16 @@ enum polling_modes {
29 CM_POLL_CHARGING_ONLY, 31 CM_POLL_CHARGING_ONLY,
30}; 32};
31 33
34enum cm_event_types {
35 CM_EVENT_UNKNOWN = 0,
36 CM_EVENT_BATT_FULL,
37 CM_EVENT_BATT_IN,
38 CM_EVENT_BATT_OUT,
39 CM_EVENT_EXT_PWR_IN_OUT,
40 CM_EVENT_CHG_START_STOP,
41 CM_EVENT_OTHERS,
42};
43
32/** 44/**
33 * struct charger_global_desc 45 * struct charger_global_desc
34 * @rtc_name: the name of RTC used to wake up the system from suspend. 46 * @rtc_name: the name of RTC used to wake up the system from suspend.
@@ -38,11 +50,18 @@ enum polling_modes {
38 * rtc_only_wakeup() returning false. 50 * rtc_only_wakeup() returning false.
39 * If the RTC given to CM is the only wakeup reason, 51 * If the RTC given to CM is the only wakeup reason,
40 * rtc_only_wakeup should return true. 52 * rtc_only_wakeup should return true.
53 * @assume_timer_stops_in_suspend:
54 * Assume that the jiffy timer stops in suspend-to-RAM.
55 * When enabled, CM does not rely on jiffies value in
56 * suspend_again and assumes that jiffies value does not
57 * change during suspend.
41 */ 58 */
42struct charger_global_desc { 59struct charger_global_desc {
43 char *rtc_name; 60 char *rtc_name;
44 61
45 bool (*rtc_only_wakeup)(void); 62 bool (*rtc_only_wakeup)(void);
63
64 bool assume_timer_stops_in_suspend;
46}; 65};
47 66
48/** 67/**
@@ -50,6 +69,11 @@ struct charger_global_desc {
50 * @psy_name: the name of power-supply-class for charger manager 69 * @psy_name: the name of power-supply-class for charger manager
51 * @polling_mode: 70 * @polling_mode:
52 * Determine which polling mode will be used 71 * Determine which polling mode will be used
72 * @fullbatt_vchkdrop_ms:
73 * @fullbatt_vchkdrop_uV:
74 * Check voltage drop after the battery is fully charged.
75 * If it has dropped more than fullbatt_vchkdrop_uV after
76 * fullbatt_vchkdrop_ms, CM will restart charging.
53 * @fullbatt_uV: voltage in microvolt 77 * @fullbatt_uV: voltage in microvolt
54 * If it is not being charged and VBATT >= fullbatt_uV, 78 * If it is not being charged and VBATT >= fullbatt_uV,
55 * it is assumed to be full. 79 * it is assumed to be full.
@@ -76,6 +100,8 @@ struct charger_desc {
76 enum polling_modes polling_mode; 100 enum polling_modes polling_mode;
77 unsigned int polling_interval_ms; 101 unsigned int polling_interval_ms;
78 102
103 unsigned int fullbatt_vchkdrop_ms;
104 unsigned int fullbatt_vchkdrop_uV;
79 unsigned int fullbatt_uV; 105 unsigned int fullbatt_uV;
80 106
81 enum data_source battery_present; 107 enum data_source battery_present;
@@ -101,6 +127,11 @@ struct charger_desc {
101 * @fuel_gauge: power_supply for fuel gauge 127 * @fuel_gauge: power_supply for fuel gauge
102 * @charger_stat: array of power_supply for chargers 128 * @charger_stat: array of power_supply for chargers
103 * @charger_enabled: the state of charger 129 * @charger_enabled: the state of charger
130 * @fullbatt_vchk_jiffies_at:
131 * jiffies at the time full battery check will occur.
132 * @fullbatt_vchk_uV: voltage in microvolt
133 * criteria for full battery
134 * @fullbatt_vchk_work: work queue for full battery check
104 * @emergency_stop: 135 * @emergency_stop:
105 * When setting true, stop charging 136 * When setting true, stop charging
106 * @last_temp_mC: the measured temperature in milli-Celsius 137 * @last_temp_mC: the measured temperature in milli-Celsius
@@ -121,6 +152,10 @@ struct charger_manager {
121 152
122 bool charger_enabled; 153 bool charger_enabled;
123 154
155 unsigned long fullbatt_vchk_jiffies_at;
156 unsigned int fullbatt_vchk_uV;
157 struct delayed_work fullbatt_vchk_work;
158
124 int emergency_stop; 159 int emergency_stop;
125 int last_temp_mC; 160 int last_temp_mC;
126 161
@@ -134,14 +169,13 @@ struct charger_manager {
134#ifdef CONFIG_CHARGER_MANAGER 169#ifdef CONFIG_CHARGER_MANAGER
135extern int setup_charger_manager(struct charger_global_desc *gd); 170extern int setup_charger_manager(struct charger_global_desc *gd);
136extern bool cm_suspend_again(void); 171extern bool cm_suspend_again(void);
172extern void cm_notify_event(struct power_supply *psy,
173 enum cm_event_types type, char *msg);
137#else 174#else
138static void __maybe_unused setup_charger_manager(struct charger_global_desc *gd) 175static inline int setup_charger_manager(struct charger_global_desc *gd)
139{ } 176{ return 0; }
140 177static inline bool cm_suspend_again(void) { return false; }
141static bool __maybe_unused cm_suspend_again(void) 178static inline void cm_notify_event(struct power_supply *psy,
142{ 179 enum cm_event_types type, char *msg) { }
143 return false;
144}
145#endif 180#endif
146
147#endif /* _CHARGER_MANAGER_H */ 181#endif /* _CHARGER_MANAGER_H */
diff --git a/include/linux/power/max17042_battery.h b/include/linux/power/max17042_battery.h
index e01b167e66f0..89dd84f47c6e 100644
--- a/include/linux/power/max17042_battery.h
+++ b/include/linux/power/max17042_battery.h
@@ -116,6 +116,18 @@ enum max17042_register {
116 MAX17042_VFSOC = 0xFF, 116 MAX17042_VFSOC = 0xFF,
117}; 117};
118 118
119/* Registers specific to max17047/50 */
120enum max17047_register {
121 MAX17047_QRTbl00 = 0x12,
122 MAX17047_FullSOCThr = 0x13,
123 MAX17047_QRTbl10 = 0x22,
124 MAX17047_QRTbl20 = 0x32,
125 MAX17047_V_empty = 0x3A,
126 MAX17047_QRTbl30 = 0x42,
127};
128
129enum max170xx_chip_type {MAX17042, MAX17047};
130
119/* 131/*
120 * used for setting a register to a desired value 132 * used for setting a register to a desired value
121 * addr : address for a register 133 * addr : address for a register
@@ -144,6 +156,7 @@ struct max17042_config_data {
144 u16 shdntimer; /* 0x03F */ 156 u16 shdntimer; /* 0x03F */
145 157
146 /* App data */ 158 /* App data */
159 u16 full_soc_thresh; /* 0x13 */
147 u16 design_cap; /* 0x18 */ 160 u16 design_cap; /* 0x18 */
148 u16 ichgt_term; /* 0x1E */ 161 u16 ichgt_term; /* 0x1E */
149 162
@@ -162,6 +175,10 @@ struct max17042_config_data {
162 u16 lavg_empty; /* 0x36 */ 175 u16 lavg_empty; /* 0x36 */
163 u16 dqacc; /* 0x45 */ 176 u16 dqacc; /* 0x45 */
164 u16 dpacc; /* 0x46 */ 177 u16 dpacc; /* 0x46 */
178 u16 qrtbl00; /* 0x12 */
179 u16 qrtbl10; /* 0x22 */
180 u16 qrtbl20; /* 0x32 */
181 u16 qrtbl30; /* 0x42 */
165 182
166 /* Cell technology from power_supply.h */ 183 /* Cell technology from power_supply.h */
167 u16 cell_technology; 184 u16 cell_technology;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index c38c13db8832..3b912bee28d1 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -96,6 +96,7 @@ enum power_supply_property {
96 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN, 96 POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN,
97 POWER_SUPPLY_PROP_VOLTAGE_NOW, 97 POWER_SUPPLY_PROP_VOLTAGE_NOW,
98 POWER_SUPPLY_PROP_VOLTAGE_AVG, 98 POWER_SUPPLY_PROP_VOLTAGE_AVG,
99 POWER_SUPPLY_PROP_VOLTAGE_OCV,
99 POWER_SUPPLY_PROP_CURRENT_MAX, 100 POWER_SUPPLY_PROP_CURRENT_MAX,
100 POWER_SUPPLY_PROP_CURRENT_NOW, 101 POWER_SUPPLY_PROP_CURRENT_NOW,
101 POWER_SUPPLY_PROP_CURRENT_AVG, 102 POWER_SUPPLY_PROP_CURRENT_AVG,
@@ -211,7 +212,7 @@ extern void power_supply_changed(struct power_supply *psy);
211extern int power_supply_am_i_supplied(struct power_supply *psy); 212extern int power_supply_am_i_supplied(struct power_supply *psy);
212extern int power_supply_set_battery_charged(struct power_supply *psy); 213extern int power_supply_set_battery_charged(struct power_supply *psy);
213 214
214#if defined(CONFIG_POWER_SUPPLY) || defined(CONFIG_POWER_SUPPLY_MODULE) 215#ifdef CONFIG_POWER_SUPPLY
215extern int power_supply_is_system_supplied(void); 216extern int power_supply_is_system_supplied(void);
216#else 217#else
217static inline int power_supply_is_system_supplied(void) { return -ENOSYS; } 218static inline int power_supply_is_system_supplied(void) { return -ENOSYS; }
@@ -261,6 +262,7 @@ static inline bool power_supply_is_watt_property(enum power_supply_property psp)
261 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN: 262 case POWER_SUPPLY_PROP_VOLTAGE_MIN_DESIGN:
262 case POWER_SUPPLY_PROP_VOLTAGE_NOW: 263 case POWER_SUPPLY_PROP_VOLTAGE_NOW:
263 case POWER_SUPPLY_PROP_VOLTAGE_AVG: 264 case POWER_SUPPLY_PROP_VOLTAGE_AVG:
265 case POWER_SUPPLY_PROP_VOLTAGE_OCV:
264 case POWER_SUPPLY_PROP_POWER_NOW: 266 case POWER_SUPPLY_PROP_POWER_NOW:
265 return 1; 267 return 1;
266 default: 268 default:
diff --git a/include/linux/prctl.h b/include/linux/prctl.h
index 78b76e24cc7e..3988012255dc 100644
--- a/include/linux/prctl.h
+++ b/include/linux/prctl.h
@@ -113,6 +113,12 @@
113# define PR_SET_MM_START_STACK 5 113# define PR_SET_MM_START_STACK 5
114# define PR_SET_MM_START_BRK 6 114# define PR_SET_MM_START_BRK 6
115# define PR_SET_MM_BRK 7 115# define PR_SET_MM_BRK 7
116# define PR_SET_MM_ARG_START 8
117# define PR_SET_MM_ARG_END 9
118# define PR_SET_MM_ENV_START 10
119# define PR_SET_MM_ENV_END 11
120# define PR_SET_MM_AUXV 12
121# define PR_SET_MM_EXE_FILE 13
116 122
117/* 123/*
118 * Set specific pid that is allowed to ptrace the current task. 124 * Set specific pid that is allowed to ptrace the current task.
@@ -121,8 +127,8 @@
121#define PR_SET_PTRACER 0x59616d61 127#define PR_SET_PTRACER 0x59616d61
122# define PR_SET_PTRACER_ANY ((unsigned long)-1) 128# define PR_SET_PTRACER_ANY ((unsigned long)-1)
123 129
124#define PR_SET_CHILD_SUBREAPER 36 130#define PR_SET_CHILD_SUBREAPER 36
125#define PR_GET_CHILD_SUBREAPER 37 131#define PR_GET_CHILD_SUBREAPER 37
126 132
127/* 133/*
128 * If no_new_privs is set, then operations that grant new privileges (i.e. 134 * If no_new_privs is set, then operations that grant new privileges (i.e.
@@ -136,7 +142,9 @@
136 * asking selinux for a specific new context (e.g. with runcon) will result 142 * asking selinux for a specific new context (e.g. with runcon) will result
137 * in execve returning -EPERM. 143 * in execve returning -EPERM.
138 */ 144 */
139#define PR_SET_NO_NEW_PRIVS 38 145#define PR_SET_NO_NEW_PRIVS 38
140#define PR_GET_NO_NEW_PRIVS 39 146#define PR_GET_NO_NEW_PRIVS 39
147
148#define PR_GET_TID_ADDRESS 40
141 149
142#endif /* _LINUX_PRCTL_H */ 150#endif /* _LINUX_PRCTL_H */
diff --git a/include/linux/pstore_ram.h b/include/linux/pstore_ram.h
index 7ed7fd4dba49..3b823d49a85a 100644
--- a/include/linux/pstore_ram.h
+++ b/include/linux/pstore_ram.h
@@ -69,12 +69,14 @@ struct persistent_ram_zone * __init persistent_ram_new(phys_addr_t start,
69 size_t size, 69 size_t size,
70 bool ecc); 70 bool ecc);
71void persistent_ram_free(struct persistent_ram_zone *prz); 71void persistent_ram_free(struct persistent_ram_zone *prz);
72void persistent_ram_zap(struct persistent_ram_zone *prz);
72struct persistent_ram_zone *persistent_ram_init_ringbuffer(struct device *dev, 73struct persistent_ram_zone *persistent_ram_init_ringbuffer(struct device *dev,
73 bool ecc); 74 bool ecc);
74 75
75int persistent_ram_write(struct persistent_ram_zone *prz, const void *s, 76int persistent_ram_write(struct persistent_ram_zone *prz, const void *s,
76 unsigned int count); 77 unsigned int count);
77 78
79void persistent_ram_save_old(struct persistent_ram_zone *prz);
78size_t persistent_ram_old_size(struct persistent_ram_zone *prz); 80size_t persistent_ram_old_size(struct persistent_ram_zone *prz);
79void *persistent_ram_old(struct persistent_ram_zone *prz); 81void *persistent_ram_old(struct persistent_ram_zone *prz);
80void persistent_ram_free_old(struct persistent_ram_zone *prz); 82void persistent_ram_free_old(struct persistent_ram_zone *prz);
diff --git a/include/linux/pxa2xx_ssp.h b/include/linux/pxa2xx_ssp.h
index 44835fb39793..f36632061c66 100644
--- a/include/linux/pxa2xx_ssp.h
+++ b/include/linux/pxa2xx_ssp.h
@@ -160,7 +160,9 @@ enum pxa_ssp_type {
160 PXA25x_SSP, /* pxa 210, 250, 255, 26x */ 160 PXA25x_SSP, /* pxa 210, 250, 255, 26x */
161 PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */ 161 PXA25x_NSSP, /* pxa 255, 26x (including ASSP) */
162 PXA27x_SSP, 162 PXA27x_SSP,
163 PXA3xx_SSP,
163 PXA168_SSP, 164 PXA168_SSP,
165 PXA910_SSP,
164 CE4100_SSP, 166 CE4100_SSP,
165}; 167};
166 168
diff --git a/include/linux/radix-tree.h b/include/linux/radix-tree.h
index 0d04cd69ab9b..ffc444c38b0a 100644
--- a/include/linux/radix-tree.h
+++ b/include/linux/radix-tree.h
@@ -368,8 +368,11 @@ radix_tree_next_slot(void **slot, struct radix_tree_iter *iter, unsigned flags)
368 iter->index++; 368 iter->index++;
369 if (likely(*slot)) 369 if (likely(*slot))
370 return slot; 370 return slot;
371 if (flags & RADIX_TREE_ITER_CONTIG) 371 if (flags & RADIX_TREE_ITER_CONTIG) {
372 /* forbid switching to the next chunk */
373 iter->next_index = 0;
372 break; 374 break;
375 }
373 } 376 }
374 } 377 }
375 return NULL; 378 return NULL;
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index adb5e5a38cae..854dc4c5c271 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -87,8 +87,9 @@ static inline void kfree_call_rcu(struct rcu_head *head,
87 87
88#ifdef CONFIG_TINY_RCU 88#ifdef CONFIG_TINY_RCU
89 89
90static inline int rcu_needs_cpu(int cpu) 90static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
91{ 91{
92 *delta_jiffies = ULONG_MAX;
92 return 0; 93 return 0;
93} 94}
94 95
@@ -96,8 +97,9 @@ static inline int rcu_needs_cpu(int cpu)
96 97
97int rcu_preempt_needs_cpu(void); 98int rcu_preempt_needs_cpu(void);
98 99
99static inline int rcu_needs_cpu(int cpu) 100static inline int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
100{ 101{
102 *delta_jiffies = ULONG_MAX;
101 return rcu_preempt_needs_cpu(); 103 return rcu_preempt_needs_cpu();
102} 104}
103 105
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 3c6083cde4fc..952b79339304 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -32,7 +32,7 @@
32 32
33extern void rcu_init(void); 33extern void rcu_init(void);
34extern void rcu_note_context_switch(int cpu); 34extern void rcu_note_context_switch(int cpu);
35extern int rcu_needs_cpu(int cpu); 35extern int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies);
36extern void rcu_cpu_stall_reset(void); 36extern void rcu_cpu_stall_reset(void);
37 37
38/* 38/*
diff --git a/include/linux/rio.h b/include/linux/rio.h
index 4d50611112ba..a90ebadd9da0 100644
--- a/include/linux/rio.h
+++ b/include/linux/rio.h
@@ -20,6 +20,9 @@
20#include <linux/errno.h> 20#include <linux/errno.h>
21#include <linux/device.h> 21#include <linux/device.h>
22#include <linux/rio_regs.h> 22#include <linux/rio_regs.h>
23#ifdef CONFIG_RAPIDIO_DMA_ENGINE
24#include <linux/dmaengine.h>
25#endif
23 26
24#define RIO_NO_HOPCOUNT -1 27#define RIO_NO_HOPCOUNT -1
25#define RIO_INVALID_DESTID 0xffff 28#define RIO_INVALID_DESTID 0xffff
@@ -254,6 +257,9 @@ struct rio_mport {
254 u32 phys_efptr; 257 u32 phys_efptr;
255 unsigned char name[40]; 258 unsigned char name[40];
256 void *priv; /* Master port private data */ 259 void *priv; /* Master port private data */
260#ifdef CONFIG_RAPIDIO_DMA_ENGINE
261 struct dma_device dma;
262#endif
257}; 263};
258 264
259/** 265/**
@@ -395,6 +401,47 @@ union rio_pw_msg {
395 u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)]; 401 u32 raw[RIO_PW_MSG_SIZE/sizeof(u32)];
396}; 402};
397 403
404#ifdef CONFIG_RAPIDIO_DMA_ENGINE
405
406/**
407 * enum rio_write_type - RIO write transaction types used in DMA transfers
408 *
409 * Note: RapidIO specification defines write (NWRITE) and
410 * write-with-response (NWRITE_R) data transfer operations.
411 * Existing DMA controllers that service RapidIO may use one of these operations
412 * for entire data transfer or their combination with only the last data packet
413 * requires response.
414 */
415enum rio_write_type {
416 RDW_DEFAULT, /* default method used by DMA driver */
417 RDW_ALL_NWRITE, /* all packets use NWRITE */
418 RDW_ALL_NWRITE_R, /* all packets use NWRITE_R */
419 RDW_LAST_NWRITE_R, /* last packet uses NWRITE_R, others - NWRITE */
420};
421
422struct rio_dma_ext {
423 u16 destid;
424 u64 rio_addr; /* low 64-bits of 66-bit RapidIO address */
425 u8 rio_addr_u; /* upper 2-bits of 66-bit RapidIO address */
426 enum rio_write_type wr_type; /* preferred RIO write operation type */
427};
428
429struct rio_dma_data {
430 /* Local data (as scatterlist) */
431 struct scatterlist *sg; /* I/O scatter list */
432 unsigned int sg_len; /* size of scatter list */
433 /* Remote device address (flat buffer) */
434 u64 rio_addr; /* low 64-bits of 66-bit RapidIO address */
435 u8 rio_addr_u; /* upper 2-bits of 66-bit RapidIO address */
436 enum rio_write_type wr_type; /* preferred RIO write operation type */
437};
438
439static inline struct rio_mport *dma_to_mport(struct dma_device *ddev)
440{
441 return container_of(ddev, struct rio_mport, dma);
442}
443#endif /* CONFIG_RAPIDIO_DMA_ENGINE */
444
398/* Architecture and hardware-specific functions */ 445/* Architecture and hardware-specific functions */
399extern int rio_register_mport(struct rio_mport *); 446extern int rio_register_mport(struct rio_mport *);
400extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int); 447extern int rio_open_inb_mbox(struct rio_mport *, void *, int, int);
diff --git a/include/linux/rio_drv.h b/include/linux/rio_drv.h
index 7f07470e1ed9..31ad146be316 100644
--- a/include/linux/rio_drv.h
+++ b/include/linux/rio_drv.h
@@ -377,6 +377,15 @@ void rio_unregister_driver(struct rio_driver *);
377struct rio_dev *rio_dev_get(struct rio_dev *); 377struct rio_dev *rio_dev_get(struct rio_dev *);
378void rio_dev_put(struct rio_dev *); 378void rio_dev_put(struct rio_dev *);
379 379
380#ifdef CONFIG_RAPIDIO_DMA_ENGINE
381extern struct dma_chan *rio_request_dma(struct rio_dev *rdev);
382extern void rio_release_dma(struct dma_chan *dchan);
383extern struct dma_async_tx_descriptor *rio_dma_prep_slave_sg(
384 struct rio_dev *rdev, struct dma_chan *dchan,
385 struct rio_dma_data *data,
386 enum dma_transfer_direction direction, unsigned long flags);
387#endif
388
380/** 389/**
381 * rio_name - Get the unique RIO device identifier 390 * rio_name - Get the unique RIO device identifier
382 * @rdev: RIO device 391 * @rdev: RIO device
diff --git a/include/linux/sched.h b/include/linux/sched.h
index f45c0b280b5d..4059c0f33f07 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -145,6 +145,7 @@ extern unsigned long this_cpu_load(void);
145 145
146 146
147extern void calc_global_load(unsigned long ticks); 147extern void calc_global_load(unsigned long ticks);
148extern void update_cpu_load_nohz(void);
148 149
149extern unsigned long get_parent_ip(unsigned long addr); 150extern unsigned long get_parent_ip(unsigned long addr);
150 151
@@ -438,6 +439,7 @@ extern int get_dumpable(struct mm_struct *mm);
438 /* leave room for more dump flags */ 439 /* leave room for more dump flags */
439#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */ 440#define MMF_VM_MERGEABLE 16 /* KSM may merge identical pages */
440#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */ 441#define MMF_VM_HUGEPAGE 17 /* set when VM_HUGEPAGE is set on vma */
442#define MMF_EXE_FILE_CHANGED 18 /* see prctl_set_mm_exe_file() */
441 443
442#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) 444#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK)
443 445
@@ -875,6 +877,8 @@ struct sched_group_power {
875 * Number of busy cpus in this group. 877 * Number of busy cpus in this group.
876 */ 878 */
877 atomic_t nr_busy_cpus; 879 atomic_t nr_busy_cpus;
880
881 unsigned long cpumask[0]; /* iteration mask */
878}; 882};
879 883
880struct sched_group { 884struct sched_group {
@@ -899,6 +903,15 @@ static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
899 return to_cpumask(sg->cpumask); 903 return to_cpumask(sg->cpumask);
900} 904}
901 905
906/*
907 * cpumask masking which cpus in the group are allowed to iterate up the domain
908 * tree.
909 */
910static inline struct cpumask *sched_group_mask(struct sched_group *sg)
911{
912 return to_cpumask(sg->sgp->cpumask);
913}
914
902/** 915/**
903 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group. 916 * group_first_cpu - Returns the first cpu in the cpumask of a sched_group.
904 * @group: The group whose first cpu is to be returned. 917 * @group: The group whose first cpu is to be returned.
@@ -1187,7 +1200,6 @@ struct sched_rt_entity {
1187 struct list_head run_list; 1200 struct list_head run_list;
1188 unsigned long timeout; 1201 unsigned long timeout;
1189 unsigned int time_slice; 1202 unsigned int time_slice;
1190 int nr_cpus_allowed;
1191 1203
1192 struct sched_rt_entity *back; 1204 struct sched_rt_entity *back;
1193#ifdef CONFIG_RT_GROUP_SCHED 1205#ifdef CONFIG_RT_GROUP_SCHED
@@ -1252,6 +1264,7 @@ struct task_struct {
1252#endif 1264#endif
1253 1265
1254 unsigned int policy; 1266 unsigned int policy;
1267 int nr_cpus_allowed;
1255 cpumask_t cpus_allowed; 1268 cpumask_t cpus_allowed;
1256 1269
1257#ifdef CONFIG_PREEMPT_RCU 1270#ifdef CONFIG_PREEMPT_RCU
@@ -1301,11 +1314,6 @@ struct task_struct {
1301 unsigned sched_reset_on_fork:1; 1314 unsigned sched_reset_on_fork:1;
1302 unsigned sched_contributes_to_load:1; 1315 unsigned sched_contributes_to_load:1;
1303 1316
1304#ifdef CONFIG_GENERIC_HARDIRQS
1305 /* IRQ handler threads */
1306 unsigned irq_thread:1;
1307#endif
1308
1309 pid_t pid; 1317 pid_t pid;
1310 pid_t tgid; 1318 pid_t tgid;
1311 1319
@@ -1313,10 +1321,9 @@ struct task_struct {
1313 /* Canary value for the -fstack-protector gcc feature */ 1321 /* Canary value for the -fstack-protector gcc feature */
1314 unsigned long stack_canary; 1322 unsigned long stack_canary;
1315#endif 1323#endif
1316 1324 /*
1317 /*
1318 * pointers to (original) parent process, youngest child, younger sibling, 1325 * pointers to (original) parent process, youngest child, younger sibling,
1319 * older sibling, respectively. (p->father can be replaced with 1326 * older sibling, respectively. (p->father can be replaced with
1320 * p->real_parent->pid) 1327 * p->real_parent->pid)
1321 */ 1328 */
1322 struct task_struct __rcu *real_parent; /* real parent process */ 1329 struct task_struct __rcu *real_parent; /* real parent process */
@@ -1363,8 +1370,6 @@ struct task_struct {
1363 * credentials (COW) */ 1370 * credentials (COW) */
1364 const struct cred __rcu *cred; /* effective (overridable) subjective task 1371 const struct cred __rcu *cred; /* effective (overridable) subjective task
1365 * credentials (COW) */ 1372 * credentials (COW) */
1366 struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */
1367
1368 char comm[TASK_COMM_LEN]; /* executable name excluding path 1373 char comm[TASK_COMM_LEN]; /* executable name excluding path
1369 - access with [gs]et_task_comm (which lock 1374 - access with [gs]et_task_comm (which lock
1370 it with task_lock()) 1375 it with task_lock())
@@ -1400,6 +1405,8 @@ struct task_struct {
1400 int (*notifier)(void *priv); 1405 int (*notifier)(void *priv);
1401 void *notifier_data; 1406 void *notifier_data;
1402 sigset_t *notifier_mask; 1407 sigset_t *notifier_mask;
1408 struct hlist_head task_works;
1409
1403 struct audit_context *audit_context; 1410 struct audit_context *audit_context;
1404#ifdef CONFIG_AUDITSYSCALL 1411#ifdef CONFIG_AUDITSYSCALL
1405 uid_t loginuid; 1412 uid_t loginuid;
@@ -2213,6 +2220,20 @@ extern int send_sigqueue(struct sigqueue *, struct task_struct *, int group);
2213extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *); 2220extern int do_sigaction(int, struct k_sigaction *, struct k_sigaction *);
2214extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long); 2221extern int do_sigaltstack(const stack_t __user *, stack_t __user *, unsigned long);
2215 2222
2223static inline void restore_saved_sigmask(void)
2224{
2225 if (test_and_clear_restore_sigmask())
2226 __set_current_blocked(&current->saved_sigmask);
2227}
2228
2229static inline sigset_t *sigmask_to_save(void)
2230{
2231 sigset_t *res = &current->blocked;
2232 if (unlikely(test_restore_sigmask()))
2233 res = &current->saved_sigmask;
2234 return res;
2235}
2236
2216static inline int kill_cad_pid(int sig, int priv) 2237static inline int kill_cad_pid(int sig, int priv)
2217{ 2238{
2218 return kill_pid(cad_pid, sig, priv); 2239 return kill_pid(cad_pid, sig, priv);
diff --git a/include/linux/security.h b/include/linux/security.h
index ab0e091ce5fa..4e5a73cdbbef 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -86,9 +86,9 @@ extern int cap_inode_setxattr(struct dentry *dentry, const char *name,
86extern int cap_inode_removexattr(struct dentry *dentry, const char *name); 86extern int cap_inode_removexattr(struct dentry *dentry, const char *name);
87extern int cap_inode_need_killpriv(struct dentry *dentry); 87extern int cap_inode_need_killpriv(struct dentry *dentry);
88extern int cap_inode_killpriv(struct dentry *dentry); 88extern int cap_inode_killpriv(struct dentry *dentry);
89extern int cap_file_mmap(struct file *file, unsigned long reqprot, 89extern int cap_mmap_addr(unsigned long addr);
90 unsigned long prot, unsigned long flags, 90extern int cap_mmap_file(struct file *file, unsigned long reqprot,
91 unsigned long addr, unsigned long addr_only); 91 unsigned long prot, unsigned long flags);
92extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags); 92extern int cap_task_fix_setuid(struct cred *new, const struct cred *old, int flags);
93extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3, 93extern int cap_task_prctl(int option, unsigned long arg2, unsigned long arg3,
94 unsigned long arg4, unsigned long arg5); 94 unsigned long arg4, unsigned long arg5);
@@ -586,15 +586,17 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
586 * simple integer value. When @arg represents a user space pointer, it 586 * simple integer value. When @arg represents a user space pointer, it
587 * should never be used by the security module. 587 * should never be used by the security module.
588 * Return 0 if permission is granted. 588 * Return 0 if permission is granted.
589 * @file_mmap : 589 * @mmap_addr :
590 * Check permissions for a mmap operation at @addr.
591 * @addr contains virtual address that will be used for the operation.
592 * Return 0 if permission is granted.
593 * @mmap_file :
590 * Check permissions for a mmap operation. The @file may be NULL, e.g. 594 * Check permissions for a mmap operation. The @file may be NULL, e.g.
591 * if mapping anonymous memory. 595 * if mapping anonymous memory.
592 * @file contains the file structure for file to map (may be NULL). 596 * @file contains the file structure for file to map (may be NULL).
593 * @reqprot contains the protection requested by the application. 597 * @reqprot contains the protection requested by the application.
594 * @prot contains the protection that will be applied by the kernel. 598 * @prot contains the protection that will be applied by the kernel.
595 * @flags contains the operational flags. 599 * @flags contains the operational flags.
596 * @addr contains virtual address that will be used for the operation.
597 * @addr_only contains a boolean: 0 if file-backed VMA, otherwise 1.
598 * Return 0 if permission is granted. 600 * Return 0 if permission is granted.
599 * @file_mprotect: 601 * @file_mprotect:
600 * Check permissions before changing memory access permissions. 602 * Check permissions before changing memory access permissions.
@@ -1481,10 +1483,10 @@ struct security_operations {
1481 void (*file_free_security) (struct file *file); 1483 void (*file_free_security) (struct file *file);
1482 int (*file_ioctl) (struct file *file, unsigned int cmd, 1484 int (*file_ioctl) (struct file *file, unsigned int cmd,
1483 unsigned long arg); 1485 unsigned long arg);
1484 int (*file_mmap) (struct file *file, 1486 int (*mmap_addr) (unsigned long addr);
1487 int (*mmap_file) (struct file *file,
1485 unsigned long reqprot, unsigned long prot, 1488 unsigned long reqprot, unsigned long prot,
1486 unsigned long flags, unsigned long addr, 1489 unsigned long flags);
1487 unsigned long addr_only);
1488 int (*file_mprotect) (struct vm_area_struct *vma, 1490 int (*file_mprotect) (struct vm_area_struct *vma,
1489 unsigned long reqprot, 1491 unsigned long reqprot,
1490 unsigned long prot); 1492 unsigned long prot);
@@ -1743,9 +1745,9 @@ int security_file_permission(struct file *file, int mask);
1743int security_file_alloc(struct file *file); 1745int security_file_alloc(struct file *file);
1744void security_file_free(struct file *file); 1746void security_file_free(struct file *file);
1745int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg); 1747int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
1746int security_file_mmap(struct file *file, unsigned long reqprot, 1748int security_mmap_file(struct file *file, unsigned long prot,
1747 unsigned long prot, unsigned long flags, 1749 unsigned long flags);
1748 unsigned long addr, unsigned long addr_only); 1750int security_mmap_addr(unsigned long addr);
1749int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, 1751int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
1750 unsigned long prot); 1752 unsigned long prot);
1751int security_file_lock(struct file *file, unsigned int cmd); 1753int security_file_lock(struct file *file, unsigned int cmd);
@@ -2181,13 +2183,15 @@ static inline int security_file_ioctl(struct file *file, unsigned int cmd,
2181 return 0; 2183 return 0;
2182} 2184}
2183 2185
2184static inline int security_file_mmap(struct file *file, unsigned long reqprot, 2186static inline int security_mmap_file(struct file *file, unsigned long prot,
2185 unsigned long prot, 2187 unsigned long flags)
2186 unsigned long flags, 2188{
2187 unsigned long addr, 2189 return 0;
2188 unsigned long addr_only) 2190}
2191
2192static inline int security_mmap_addr(unsigned long addr)
2189{ 2193{
2190 return cap_file_mmap(file, reqprot, prot, flags, addr, addr_only); 2194 return cap_mmap_addr(addr);
2191} 2195}
2192 2196
2193static inline int security_file_mprotect(struct vm_area_struct *vma, 2197static inline int security_file_mprotect(struct vm_area_struct *vma,
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 17046cc484bc..26b424adc842 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -250,12 +250,13 @@ extern long do_sigpending(void __user *, unsigned long);
250extern int do_sigtimedwait(const sigset_t *, siginfo_t *, 250extern int do_sigtimedwait(const sigset_t *, siginfo_t *,
251 const struct timespec *); 251 const struct timespec *);
252extern int sigprocmask(int, sigset_t *, sigset_t *); 252extern int sigprocmask(int, sigset_t *, sigset_t *);
253extern void set_current_blocked(const sigset_t *); 253extern void set_current_blocked(sigset_t *);
254extern void __set_current_blocked(const sigset_t *);
254extern int show_unhandled_signals; 255extern int show_unhandled_signals;
255extern int sigsuspend(sigset_t *); 256extern int sigsuspend(sigset_t *);
256 257
257extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie); 258extern int get_signal_to_deliver(siginfo_t *info, struct k_sigaction *return_ka, struct pt_regs *regs, void *cookie);
258extern void block_sigmask(struct k_sigaction *ka, int signr); 259extern void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka, struct pt_regs *regs, int stepping);
259extern void exit_signals(struct task_struct *tsk); 260extern void exit_signals(struct task_struct *tsk);
260 261
261extern struct kmem_cache *sighand_cachep; 262extern struct kmem_cache *sighand_cachep;
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 0e501714d47f..b534a1be540a 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1896,8 +1896,6 @@ static inline int __skb_cow(struct sk_buff *skb, unsigned int headroom,
1896{ 1896{
1897 int delta = 0; 1897 int delta = 0;
1898 1898
1899 if (headroom < NET_SKB_PAD)
1900 headroom = NET_SKB_PAD;
1901 if (headroom > skb_headroom(skb)) 1899 if (headroom > skb_headroom(skb))
1902 delta = headroom - skb_headroom(skb); 1900 delta = headroom - skb_headroom(skb);
1903 1901
diff --git a/include/linux/slab.h b/include/linux/slab.h
index a595dce6b0c7..67d5d94b783a 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -242,7 +242,7 @@ size_t ksize(const void *);
242 */ 242 */
243static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags) 243static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
244{ 244{
245 if (size != 0 && n > ULONG_MAX / size) 245 if (size != 0 && n > SIZE_MAX / size)
246 return NULL; 246 return NULL;
247 return __kmalloc(n * size, flags); 247 return __kmalloc(n * size, flags);
248} 248}
diff --git a/include/linux/spi/pxa2xx_spi.h b/include/linux/spi/pxa2xx_spi.h
index d3e1075f7b60..c73d1445c77e 100644
--- a/include/linux/spi/pxa2xx_spi.h
+++ b/include/linux/spi/pxa2xx_spi.h
@@ -43,7 +43,7 @@ struct pxa2xx_spi_chip {
43 void (*cs_control)(u32 command); 43 void (*cs_control)(u32 command);
44}; 44};
45 45
46#ifdef CONFIG_ARCH_PXA 46#if defined(CONFIG_ARCH_PXA) || defined(CONFIG_ARCH_MMP)
47 47
48#include <linux/clk.h> 48#include <linux/clk.h>
49#include <mach/dma.h> 49#include <mach/dma.h>
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index 51b29ac45a8e..40e0a273faea 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -232,7 +232,6 @@ struct svc_rqst {
232 struct svc_pool * rq_pool; /* thread pool */ 232 struct svc_pool * rq_pool; /* thread pool */
233 struct svc_procedure * rq_procinfo; /* procedure info */ 233 struct svc_procedure * rq_procinfo; /* procedure info */
234 struct auth_ops * rq_authop; /* authentication flavour */ 234 struct auth_ops * rq_authop; /* authentication flavour */
235 u32 rq_flavor; /* pseudoflavor */
236 struct svc_cred rq_cred; /* auth info */ 235 struct svc_cred rq_cred; /* auth info */
237 void * rq_xprt_ctxt; /* transport specific context ptr */ 236 void * rq_xprt_ctxt; /* transport specific context ptr */
238 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */ 237 struct svc_deferred_req*rq_deferred; /* deferred request we are replaying */
@@ -416,6 +415,7 @@ struct svc_procedure {
416 */ 415 */
417int svc_rpcb_setup(struct svc_serv *serv, struct net *net); 416int svc_rpcb_setup(struct svc_serv *serv, struct net *net);
418void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net); 417void svc_rpcb_cleanup(struct svc_serv *serv, struct net *net);
418int svc_bind(struct svc_serv *serv, struct net *net);
419struct svc_serv *svc_create(struct svc_program *, unsigned int, 419struct svc_serv *svc_create(struct svc_program *, unsigned int,
420 void (*shutdown)(struct svc_serv *, struct net *net)); 420 void (*shutdown)(struct svc_serv *, struct net *net));
421struct svc_rqst *svc_prepare_thread(struct svc_serv *serv, 421struct svc_rqst *svc_prepare_thread(struct svc_serv *serv,
diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
index 548790e9113b..dd74084a9799 100644
--- a/include/linux/sunrpc/svcauth.h
+++ b/include/linux/sunrpc/svcauth.h
@@ -15,14 +15,23 @@
15#include <linux/sunrpc/msg_prot.h> 15#include <linux/sunrpc/msg_prot.h>
16#include <linux/sunrpc/cache.h> 16#include <linux/sunrpc/cache.h>
17#include <linux/hash.h> 17#include <linux/hash.h>
18#include <linux/cred.h>
18 19
19#define SVC_CRED_NGROUPS 32
20struct svc_cred { 20struct svc_cred {
21 uid_t cr_uid; 21 uid_t cr_uid;
22 gid_t cr_gid; 22 gid_t cr_gid;
23 struct group_info *cr_group_info; 23 struct group_info *cr_group_info;
24 u32 cr_flavor; /* pseudoflavor */
25 char *cr_principal; /* for gss */
24}; 26};
25 27
28static inline void free_svc_cred(struct svc_cred *cred)
29{
30 if (cred->cr_group_info)
31 put_group_info(cred->cr_group_info);
32 kfree(cred->cr_principal);
33}
34
26struct svc_rqst; /* forward decl */ 35struct svc_rqst; /* forward decl */
27struct in6_addr; 36struct in6_addr;
28 37
@@ -131,7 +140,7 @@ extern struct auth_domain *auth_domain_lookup(char *name, struct auth_domain *ne
131extern struct auth_domain *auth_domain_find(char *name); 140extern struct auth_domain *auth_domain_find(char *name);
132extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr); 141extern struct auth_domain *auth_unix_lookup(struct net *net, struct in6_addr *addr);
133extern int auth_unix_forget_old(struct auth_domain *dom); 142extern int auth_unix_forget_old(struct auth_domain *dom);
134extern void svcauth_unix_purge(void); 143extern void svcauth_unix_purge(struct net *net);
135extern void svcauth_unix_info_release(struct svc_xprt *xpt); 144extern void svcauth_unix_info_release(struct svc_xprt *xpt);
136extern int svcauth_unix_set_client(struct svc_rqst *rqstp); 145extern int svcauth_unix_set_client(struct svc_rqst *rqstp);
137 146
diff --git a/include/linux/sunrpc/svcauth_gss.h b/include/linux/sunrpc/svcauth_gss.h
index 7c32daa025eb..726aff1a5201 100644
--- a/include/linux/sunrpc/svcauth_gss.h
+++ b/include/linux/sunrpc/svcauth_gss.h
@@ -22,7 +22,6 @@ int gss_svc_init_net(struct net *net);
22void gss_svc_shutdown_net(struct net *net); 22void gss_svc_shutdown_net(struct net *net);
23int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name); 23int svcauth_gss_register_pseudoflavor(u32 pseudoflavor, char * name);
24u32 svcauth_gss_flavor(struct auth_domain *dom); 24u32 svcauth_gss_flavor(struct auth_domain *dom);
25char *svc_gss_principal(struct svc_rqst *);
26 25
27#endif /* __KERNEL__ */ 26#endif /* __KERNEL__ */
28#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */ 27#endif /* _LINUX_SUNRPC_SVCAUTH_GSS_H */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b6661933e252..c84ec68eaec9 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -197,6 +197,10 @@ struct swap_info_struct {
197 struct block_device *bdev; /* swap device or bdev of swap file */ 197 struct block_device *bdev; /* swap device or bdev of swap file */
198 struct file *swap_file; /* seldom referenced */ 198 struct file *swap_file; /* seldom referenced */
199 unsigned int old_block_size; /* seldom referenced */ 199 unsigned int old_block_size; /* seldom referenced */
200#ifdef CONFIG_FRONTSWAP
201 unsigned long *frontswap_map; /* frontswap in-use, one bit per page */
202 atomic_t frontswap_pages; /* frontswap pages in-use counter */
203#endif
200}; 204};
201 205
202struct swap_list_t { 206struct swap_list_t {
diff --git a/include/linux/swapfile.h b/include/linux/swapfile.h
new file mode 100644
index 000000000000..e282624e8c10
--- /dev/null
+++ b/include/linux/swapfile.h
@@ -0,0 +1,13 @@
1#ifndef _LINUX_SWAPFILE_H
2#define _LINUX_SWAPFILE_H
3
4/*
5 * these were static in swapfile.c but frontswap.c needs them and we don't
6 * want to expose them to the dozens of source files that include swap.h
7 */
8extern spinlock_t swap_lock;
9extern struct swap_list_t swap_list;
10extern struct swap_info_struct *swap_info[];
11extern int try_to_unuse(unsigned int, bool, unsigned long);
12
13#endif /* _LINUX_SWAPFILE_H */
diff --git a/include/linux/swapops.h b/include/linux/swapops.h
index 792d16d9cbc7..47ead515c811 100644
--- a/include/linux/swapops.h
+++ b/include/linux/swapops.h
@@ -9,13 +9,15 @@
9 * get good packing density in that tree, so the index should be dense in 9 * get good packing density in that tree, so the index should be dense in
10 * the low-order bits. 10 * the low-order bits.
11 * 11 *
12 * We arrange the `type' and `offset' fields so that `type' is at the five 12 * We arrange the `type' and `offset' fields so that `type' is at the seven
13 * high-order bits of the swp_entry_t and `offset' is right-aligned in the 13 * high-order bits of the swp_entry_t and `offset' is right-aligned in the
14 * remaining bits. 14 * remaining bits. Although `type' itself needs only five bits, we allow for
15 * shmem/tmpfs to shift it all up a further two bits: see swp_to_radix_entry().
15 * 16 *
16 * swp_entry_t's are *never* stored anywhere in their arch-dependent format. 17 * swp_entry_t's are *never* stored anywhere in their arch-dependent format.
17 */ 18 */
18#define SWP_TYPE_SHIFT(e) (sizeof(e.val) * 8 - MAX_SWAPFILES_SHIFT) 19#define SWP_TYPE_SHIFT(e) ((sizeof(e.val) * 8) - \
20 (MAX_SWAPFILES_SHIFT + RADIX_TREE_EXCEPTIONAL_SHIFT))
19#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1) 21#define SWP_OFFSET_MASK(e) ((1UL << SWP_TYPE_SHIFT(e)) - 1)
20 22
21/* 23/*
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 3de3acb84a95..19439c75c5b2 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -858,4 +858,6 @@ asmlinkage long sys_process_vm_writev(pid_t pid,
858 unsigned long riovcnt, 858 unsigned long riovcnt,
859 unsigned long flags); 859 unsigned long flags);
860 860
861asmlinkage long sys_kcmp(pid_t pid1, pid_t pid2, int type,
862 unsigned long idx1, unsigned long idx2);
861#endif 863#endif
diff --git a/include/linux/task_work.h b/include/linux/task_work.h
new file mode 100644
index 000000000000..294d5d5e90b1
--- /dev/null
+++ b/include/linux/task_work.h
@@ -0,0 +1,33 @@
1#ifndef _LINUX_TASK_WORK_H
2#define _LINUX_TASK_WORK_H
3
4#include <linux/list.h>
5#include <linux/sched.h>
6
7struct task_work;
8typedef void (*task_work_func_t)(struct task_work *);
9
10struct task_work {
11 struct hlist_node hlist;
12 task_work_func_t func;
13 void *data;
14};
15
16static inline void
17init_task_work(struct task_work *twork, task_work_func_t func, void *data)
18{
19 twork->func = func;
20 twork->data = data;
21}
22
23int task_work_add(struct task_struct *task, struct task_work *twork, bool);
24struct task_work *task_work_cancel(struct task_struct *, task_work_func_t);
25void task_work_run(void);
26
27static inline void exit_task_work(struct task_struct *task)
28{
29 if (unlikely(!hlist_empty(&task->task_works)))
30 task_work_run();
31}
32
33#endif /* _LINUX_TASK_WORK_H */
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 4c5b63283377..5f359dbfcdce 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -69,16 +69,16 @@ union tcp_word_hdr {
69#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3]) 69#define tcp_flag_word(tp) ( ((union tcp_word_hdr *)(tp))->words [3])
70 70
71enum { 71enum {
72 TCP_FLAG_CWR = __cpu_to_be32(0x00800000), 72 TCP_FLAG_CWR = __constant_cpu_to_be32(0x00800000),
73 TCP_FLAG_ECE = __cpu_to_be32(0x00400000), 73 TCP_FLAG_ECE = __constant_cpu_to_be32(0x00400000),
74 TCP_FLAG_URG = __cpu_to_be32(0x00200000), 74 TCP_FLAG_URG = __constant_cpu_to_be32(0x00200000),
75 TCP_FLAG_ACK = __cpu_to_be32(0x00100000), 75 TCP_FLAG_ACK = __constant_cpu_to_be32(0x00100000),
76 TCP_FLAG_PSH = __cpu_to_be32(0x00080000), 76 TCP_FLAG_PSH = __constant_cpu_to_be32(0x00080000),
77 TCP_FLAG_RST = __cpu_to_be32(0x00040000), 77 TCP_FLAG_RST = __constant_cpu_to_be32(0x00040000),
78 TCP_FLAG_SYN = __cpu_to_be32(0x00020000), 78 TCP_FLAG_SYN = __constant_cpu_to_be32(0x00020000),
79 TCP_FLAG_FIN = __cpu_to_be32(0x00010000), 79 TCP_FLAG_FIN = __constant_cpu_to_be32(0x00010000),
80 TCP_RESERVED_BITS = __cpu_to_be32(0x0F000000), 80 TCP_RESERVED_BITS = __constant_cpu_to_be32(0x0F000000),
81 TCP_DATA_OFFSET = __cpu_to_be32(0xF0000000) 81 TCP_DATA_OFFSET = __constant_cpu_to_be32(0xF0000000)
82}; 82};
83 83
84/* 84/*
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index db78775eff3b..ccc1899bd62e 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -8,6 +8,7 @@
8#define _LINUX_THREAD_INFO_H 8#define _LINUX_THREAD_INFO_H
9 9
10#include <linux/types.h> 10#include <linux/types.h>
11#include <linux/bug.h>
11 12
12struct timespec; 13struct timespec;
13struct compat_timespec; 14struct compat_timespec;
@@ -125,10 +126,26 @@ static inline int test_ti_thread_flag(struct thread_info *ti, int flag)
125static inline void set_restore_sigmask(void) 126static inline void set_restore_sigmask(void)
126{ 127{
127 set_thread_flag(TIF_RESTORE_SIGMASK); 128 set_thread_flag(TIF_RESTORE_SIGMASK);
128 set_thread_flag(TIF_SIGPENDING); 129 WARN_ON(!test_thread_flag(TIF_SIGPENDING));
130}
131static inline void clear_restore_sigmask(void)
132{
133 clear_thread_flag(TIF_RESTORE_SIGMASK);
134}
135static inline bool test_restore_sigmask(void)
136{
137 return test_thread_flag(TIF_RESTORE_SIGMASK);
138}
139static inline bool test_and_clear_restore_sigmask(void)
140{
141 return test_and_clear_thread_flag(TIF_RESTORE_SIGMASK);
129} 142}
130#endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */ 143#endif /* TIF_RESTORE_SIGMASK && !HAVE_SET_RESTORE_SIGMASK */
131 144
145#ifndef HAVE_SET_RESTORE_SIGMASK
146#error "no set_restore_sigmask() provided and default one won't work"
147#endif
148
132#endif /* __KERNEL__ */ 149#endif /* __KERNEL__ */
133 150
134#endif /* _LINUX_THREAD_INFO_H */ 151#endif /* _LINUX_THREAD_INFO_H */
diff --git a/include/linux/tracehook.h b/include/linux/tracehook.h
index 51bd91d911c3..6a4d82bedb03 100644
--- a/include/linux/tracehook.h
+++ b/include/linux/tracehook.h
@@ -49,6 +49,7 @@
49#include <linux/sched.h> 49#include <linux/sched.h>
50#include <linux/ptrace.h> 50#include <linux/ptrace.h>
51#include <linux/security.h> 51#include <linux/security.h>
52#include <linux/task_work.h>
52struct linux_binprm; 53struct linux_binprm;
53 54
54/* 55/*
@@ -153,7 +154,6 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
153 ptrace_notify(SIGTRAP); 154 ptrace_notify(SIGTRAP);
154} 155}
155 156
156#ifdef TIF_NOTIFY_RESUME
157/** 157/**
158 * set_notify_resume - cause tracehook_notify_resume() to be called 158 * set_notify_resume - cause tracehook_notify_resume() to be called
159 * @task: task that will call tracehook_notify_resume() 159 * @task: task that will call tracehook_notify_resume()
@@ -165,8 +165,10 @@ static inline void tracehook_signal_handler(int sig, siginfo_t *info,
165 */ 165 */
166static inline void set_notify_resume(struct task_struct *task) 166static inline void set_notify_resume(struct task_struct *task)
167{ 167{
168#ifdef TIF_NOTIFY_RESUME
168 if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME)) 169 if (!test_and_set_tsk_thread_flag(task, TIF_NOTIFY_RESUME))
169 kick_process(task); 170 kick_process(task);
171#endif
170} 172}
171 173
172/** 174/**
@@ -184,7 +186,14 @@ static inline void set_notify_resume(struct task_struct *task)
184 */ 186 */
185static inline void tracehook_notify_resume(struct pt_regs *regs) 187static inline void tracehook_notify_resume(struct pt_regs *regs)
186{ 188{
189 /*
190 * The caller just cleared TIF_NOTIFY_RESUME. This barrier
191 * pairs with task_work_add()->set_notify_resume() after
192 * hlist_add_head(task->task_works);
193 */
194 smp_mb__after_clear_bit();
195 if (unlikely(!hlist_empty(&current->task_works)))
196 task_work_run();
187} 197}
188#endif /* TIF_NOTIFY_RESUME */
189 198
190#endif /* <linux/tracehook.h> */ 199#endif /* <linux/tracehook.h> */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 4990ef2b1fb7..9f47ab540f65 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -268,7 +268,6 @@ struct tty_struct {
268 struct mutex ldisc_mutex; 268 struct mutex ldisc_mutex;
269 struct tty_ldisc *ldisc; 269 struct tty_ldisc *ldisc;
270 270
271 struct mutex legacy_mutex;
272 struct mutex termios_mutex; 271 struct mutex termios_mutex;
273 spinlock_t ctrl_lock; 272 spinlock_t ctrl_lock;
274 /* Termios values are protected by the termios mutex */ 273 /* Termios values are protected by the termios mutex */
@@ -606,12 +605,8 @@ extern long vt_compat_ioctl(struct tty_struct *tty,
606 605
607/* tty_mutex.c */ 606/* tty_mutex.c */
608/* functions for preparation of BKL removal */ 607/* functions for preparation of BKL removal */
609extern void __lockfunc tty_lock(struct tty_struct *tty); 608extern void __lockfunc tty_lock(void) __acquires(tty_lock);
610extern void __lockfunc tty_unlock(struct tty_struct *tty); 609extern void __lockfunc tty_unlock(void) __releases(tty_lock);
611extern void __lockfunc tty_lock_pair(struct tty_struct *tty,
612 struct tty_struct *tty2);
613extern void __lockfunc tty_unlock_pair(struct tty_struct *tty,
614 struct tty_struct *tty2);
615 610
616/* 611/*
617 * this shall be called only from where BTM is held (like close) 612 * this shall be called only from where BTM is held (like close)
@@ -626,9 +621,9 @@ extern void __lockfunc tty_unlock_pair(struct tty_struct *tty,
626static inline void tty_wait_until_sent_from_close(struct tty_struct *tty, 621static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
627 long timeout) 622 long timeout)
628{ 623{
629 tty_unlock(tty); /* tty->ops->close holds the BTM, drop it while waiting */ 624 tty_unlock(); /* tty->ops->close holds the BTM, drop it while waiting */
630 tty_wait_until_sent(tty, timeout); 625 tty_wait_until_sent(tty, timeout);
631 tty_lock(tty); 626 tty_lock();
632} 627}
633 628
634/* 629/*
@@ -643,16 +638,16 @@ static inline void tty_wait_until_sent_from_close(struct tty_struct *tty,
643 * 638 *
644 * Do not use in new code. 639 * Do not use in new code.
645 */ 640 */
646#define wait_event_interruptible_tty(tty, wq, condition) \ 641#define wait_event_interruptible_tty(wq, condition) \
647({ \ 642({ \
648 int __ret = 0; \ 643 int __ret = 0; \
649 if (!(condition)) { \ 644 if (!(condition)) { \
650 __wait_event_interruptible_tty(tty, wq, condition, __ret); \ 645 __wait_event_interruptible_tty(wq, condition, __ret); \
651 } \ 646 } \
652 __ret; \ 647 __ret; \
653}) 648})
654 649
655#define __wait_event_interruptible_tty(tty, wq, condition, ret) \ 650#define __wait_event_interruptible_tty(wq, condition, ret) \
656do { \ 651do { \
657 DEFINE_WAIT(__wait); \ 652 DEFINE_WAIT(__wait); \
658 \ 653 \
@@ -661,9 +656,9 @@ do { \
661 if (condition) \ 656 if (condition) \
662 break; \ 657 break; \
663 if (!signal_pending(current)) { \ 658 if (!signal_pending(current)) { \
664 tty_unlock(tty); \ 659 tty_unlock(); \
665 schedule(); \ 660 schedule(); \
666 tty_lock(tty); \ 661 tty_lock(); \
667 continue; \ 662 continue; \
668 } \ 663 } \
669 ret = -ERESTARTSYS; \ 664 ret = -ERESTARTSYS; \
diff --git a/include/linux/types.h b/include/linux/types.h
index 7f480db60231..9c1bd539ea70 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -25,7 +25,7 @@ typedef __kernel_dev_t dev_t;
25typedef __kernel_ino_t ino_t; 25typedef __kernel_ino_t ino_t;
26typedef __kernel_mode_t mode_t; 26typedef __kernel_mode_t mode_t;
27typedef unsigned short umode_t; 27typedef unsigned short umode_t;
28typedef __kernel_nlink_t nlink_t; 28typedef __u32 nlink_t;
29typedef __kernel_off_t off_t; 29typedef __kernel_off_t off_t;
30typedef __kernel_pid_t pid_t; 30typedef __kernel_pid_t pid_t;
31typedef __kernel_daddr_t daddr_t; 31typedef __kernel_daddr_t daddr_t;
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 7f855d50cdf5..49b3ac29726a 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -126,8 +126,6 @@ struct usb_hcd {
126 unsigned wireless:1; /* Wireless USB HCD */ 126 unsigned wireless:1; /* Wireless USB HCD */
127 unsigned authorized_default:1; 127 unsigned authorized_default:1;
128 unsigned has_tt:1; /* Integrated TT in root hub */ 128 unsigned has_tt:1; /* Integrated TT in root hub */
129 unsigned broken_pci_sleep:1; /* Don't put the
130 controller in PCI-D3 for system sleep */
131 129
132 unsigned int irq; /* irq allocated */ 130 unsigned int irq; /* irq allocated */
133 void __iomem *regs; /* device memory/io */ 131 void __iomem *regs; /* device memory/io */
diff --git a/include/linux/vga_switcheroo.h b/include/linux/vga_switcheroo.h
index b455c7c212eb..ddb419cf4530 100644
--- a/include/linux/vga_switcheroo.h
+++ b/include/linux/vga_switcheroo.h
@@ -7,11 +7,19 @@
7 * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs 7 * vga_switcheroo.h - Support for laptop with dual GPU using one set of outputs
8 */ 8 */
9 9
10#ifndef _LINUX_VGA_SWITCHEROO_H_
11#define _LINUX_VGA_SWITCHEROO_H_
12
10#include <linux/fb.h> 13#include <linux/fb.h>
11 14
15struct pci_dev;
16
12enum vga_switcheroo_state { 17enum vga_switcheroo_state {
13 VGA_SWITCHEROO_OFF, 18 VGA_SWITCHEROO_OFF,
14 VGA_SWITCHEROO_ON, 19 VGA_SWITCHEROO_ON,
20 /* below are referred only from vga_switcheroo_get_client_state() */
21 VGA_SWITCHEROO_INIT,
22 VGA_SWITCHEROO_NOT_FOUND,
15}; 23};
16 24
17enum vga_switcheroo_client_id { 25enum vga_switcheroo_client_id {
@@ -50,6 +58,8 @@ void vga_switcheroo_unregister_handler(void);
50 58
51int vga_switcheroo_process_delayed_switch(void); 59int vga_switcheroo_process_delayed_switch(void);
52 60
61int vga_switcheroo_get_client_state(struct pci_dev *dev);
62
53#else 63#else
54 64
55static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {} 65static inline void vga_switcheroo_unregister_client(struct pci_dev *dev) {}
@@ -62,5 +72,8 @@ static inline int vga_switcheroo_register_audio_client(struct pci_dev *pdev,
62 int id, bool active) { return 0; } 72 int id, bool active) { return 0; }
63static inline void vga_switcheroo_unregister_handler(void) {} 73static inline void vga_switcheroo_unregister_handler(void) {}
64static inline int vga_switcheroo_process_delayed_switch(void) { return 0; } 74static inline int vga_switcheroo_process_delayed_switch(void) { return 0; }
75static inline int vga_switcheroo_get_client_state(struct pci_dev *dev) { return VGA_SWITCHEROO_ON; }
76
65 77
66#endif 78#endif
79#endif /* _LINUX_VGA_SWITCHEROO_H_ */
diff --git a/include/net/cipso_ipv4.h b/include/net/cipso_ipv4.h
index 9808877c2ab9..a7a683e30b64 100644
--- a/include/net/cipso_ipv4.h
+++ b/include/net/cipso_ipv4.h
@@ -42,6 +42,7 @@
42#include <net/netlabel.h> 42#include <net/netlabel.h>
43#include <net/request_sock.h> 43#include <net/request_sock.h>
44#include <linux/atomic.h> 44#include <linux/atomic.h>
45#include <asm/unaligned.h>
45 46
46/* known doi values */ 47/* known doi values */
47#define CIPSO_V4_DOI_UNKNOWN 0x00000000 48#define CIPSO_V4_DOI_UNKNOWN 0x00000000
@@ -285,7 +286,33 @@ static inline int cipso_v4_skbuff_getattr(const struct sk_buff *skb,
285static inline int cipso_v4_validate(const struct sk_buff *skb, 286static inline int cipso_v4_validate(const struct sk_buff *skb,
286 unsigned char **option) 287 unsigned char **option)
287{ 288{
288 return -ENOSYS; 289 unsigned char *opt = *option;
290 unsigned char err_offset = 0;
291 u8 opt_len = opt[1];
292 u8 opt_iter;
293
294 if (opt_len < 8) {
295 err_offset = 1;
296 goto out;
297 }
298
299 if (get_unaligned_be32(&opt[2]) == 0) {
300 err_offset = 2;
301 goto out;
302 }
303
304 for (opt_iter = 6; opt_iter < opt_len;) {
305 if (opt[opt_iter + 1] > (opt_len - opt_iter)) {
306 err_offset = opt_iter + 1;
307 goto out;
308 }
309 opt_iter += opt[opt_iter + 1];
310 }
311
312out:
313 *option = opt + err_offset;
314 return err_offset;
315
289} 316}
290#endif /* CONFIG_NETLABEL */ 317#endif /* CONFIG_NETLABEL */
291 318
diff --git a/include/net/dst.h b/include/net/dst.h
index bed833d9796a..8197eadca819 100644
--- a/include/net/dst.h
+++ b/include/net/dst.h
@@ -60,6 +60,7 @@ struct dst_entry {
60#define DST_NOCOUNT 0x0020 60#define DST_NOCOUNT 0x0020
61#define DST_NOPEER 0x0040 61#define DST_NOPEER 0x0040
62#define DST_FAKE_RTABLE 0x0080 62#define DST_FAKE_RTABLE 0x0080
63#define DST_XFRM_TUNNEL 0x0100
63 64
64 short error; 65 short error;
65 short obsolete; 66 short obsolete;
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index b94765e38e80..2040bff945d4 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -40,7 +40,10 @@ struct inet_peer {
40 u32 pmtu_orig; 40 u32 pmtu_orig;
41 u32 pmtu_learned; 41 u32 pmtu_learned;
42 struct inetpeer_addr_base redirect_learned; 42 struct inetpeer_addr_base redirect_learned;
43 struct list_head gc_list; 43 union {
44 struct list_head gc_list;
45 struct rcu_head gc_rcu;
46 };
44 /* 47 /*
45 * Once inet_peer is queued for deletion (refcnt == -1), following fields 48 * Once inet_peer is queued for deletion (refcnt == -1), following fields
46 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp 49 * are not available: rid, ip_id_count, tcp_ts, tcp_ts_stamp
diff --git a/include/net/route.h b/include/net/route.h
index ed2b78e2375d..98705468ac03 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -130,9 +130,9 @@ static inline struct rtable *ip_route_output(struct net *net, __be32 daddr,
130{ 130{
131 struct flowi4 fl4 = { 131 struct flowi4 fl4 = {
132 .flowi4_oif = oif, 132 .flowi4_oif = oif,
133 .flowi4_tos = tos,
133 .daddr = daddr, 134 .daddr = daddr,
134 .saddr = saddr, 135 .saddr = saddr,
135 .flowi4_tos = tos,
136 }; 136 };
137 return ip_route_output_key(net, &fl4); 137 return ip_route_output_key(net, &fl4);
138} 138}
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index 55ce96b53b09..9d7d54a00e63 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -220,13 +220,16 @@ struct tcf_proto {
220 220
221struct qdisc_skb_cb { 221struct qdisc_skb_cb {
222 unsigned int pkt_len; 222 unsigned int pkt_len;
223 unsigned char data[24]; 223 u16 bond_queue_mapping;
224 u16 _pad;
225 unsigned char data[20];
224}; 226};
225 227
226static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) 228static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz)
227{ 229{
228 struct qdisc_skb_cb *qcb; 230 struct qdisc_skb_cb *qcb;
229 BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz); 231
232 BUILD_BUG_ON(sizeof(skb->cb) < offsetof(struct qdisc_skb_cb, data) + sz);
230 BUILD_BUG_ON(sizeof(qcb->data) < sz); 233 BUILD_BUG_ON(sizeof(qcb->data) < sz);
231} 234}
232 235
diff --git a/include/scsi/fcoe_sysfs.h b/include/scsi/fcoe_sysfs.h
new file mode 100644
index 000000000000..604cb9bb3e76
--- /dev/null
+++ b/include/scsi/fcoe_sysfs.h
@@ -0,0 +1,124 @@
1/*
2 * Copyright (c) 2011-2012 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 *
13 * You should have received a copy of the GNU General Public License along with
14 * this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
16 *
17 * Maintained at www.Open-FCoE.org
18 */
19
20#ifndef FCOE_SYSFS
21#define FCOE_SYSFS
22
23#include <linux/if_ether.h>
24#include <linux/device.h>
25#include <scsi/fc/fc_fcoe.h>
26
27struct fcoe_ctlr_device;
28struct fcoe_fcf_device;
29
30struct fcoe_sysfs_function_template {
31 void (*get_fcoe_ctlr_link_fail)(struct fcoe_ctlr_device *);
32 void (*get_fcoe_ctlr_vlink_fail)(struct fcoe_ctlr_device *);
33 void (*get_fcoe_ctlr_miss_fka)(struct fcoe_ctlr_device *);
34 void (*get_fcoe_ctlr_symb_err)(struct fcoe_ctlr_device *);
35 void (*get_fcoe_ctlr_err_block)(struct fcoe_ctlr_device *);
36 void (*get_fcoe_ctlr_fcs_error)(struct fcoe_ctlr_device *);
37 void (*get_fcoe_ctlr_mode)(struct fcoe_ctlr_device *);
38 void (*get_fcoe_fcf_selected)(struct fcoe_fcf_device *);
39 void (*get_fcoe_fcf_vlan_id)(struct fcoe_fcf_device *);
40};
41
42#define dev_to_ctlr(d) \
43 container_of((d), struct fcoe_ctlr_device, dev)
44
45enum fip_conn_type {
46 FIP_CONN_TYPE_UNKNOWN,
47 FIP_CONN_TYPE_FABRIC,
48 FIP_CONN_TYPE_VN2VN,
49};
50
51struct fcoe_ctlr_device {
52 u32 id;
53
54 struct device dev;
55 struct fcoe_sysfs_function_template *f;
56
57 struct list_head fcfs;
58 char work_q_name[20];
59 struct workqueue_struct *work_q;
60 char devloss_work_q_name[20];
61 struct workqueue_struct *devloss_work_q;
62 struct mutex lock;
63
64 int fcf_dev_loss_tmo;
65 enum fip_conn_type mode;
66
67 /* expected in host order for displaying */
68 struct fcoe_fc_els_lesb lesb;
69};
70
71static inline void *fcoe_ctlr_device_priv(const struct fcoe_ctlr_device *ctlr)
72{
73 return (void *)(ctlr + 1);
74}
75
76/* fcf states */
77enum fcf_state {
78 FCOE_FCF_STATE_UNKNOWN,
79 FCOE_FCF_STATE_DISCONNECTED,
80 FCOE_FCF_STATE_CONNECTED,
81 FCOE_FCF_STATE_DELETED,
82};
83
84struct fcoe_fcf_device {
85 u32 id;
86 struct device dev;
87 struct list_head peers;
88 struct work_struct delete_work;
89 struct delayed_work dev_loss_work;
90 u32 dev_loss_tmo;
91 void *priv;
92 enum fcf_state state;
93
94 u64 fabric_name;
95 u64 switch_name;
96 u32 fc_map;
97 u16 vfid;
98 u8 mac[ETH_ALEN];
99 u8 priority;
100 u32 fka_period;
101 u8 selected;
102 u16 vlan_id;
103};
104
105#define dev_to_fcf(d) \
106 container_of((d), struct fcoe_fcf_device, dev)
107/* parentage should never be missing */
108#define fcoe_fcf_dev_to_ctlr_dev(x) \
109 dev_to_ctlr((x)->dev.parent)
110#define fcoe_fcf_device_priv(x) \
111 ((x)->priv)
112
113struct fcoe_ctlr_device *fcoe_ctlr_device_add(struct device *parent,
114 struct fcoe_sysfs_function_template *f,
115 int priv_size);
116void fcoe_ctlr_device_delete(struct fcoe_ctlr_device *);
117struct fcoe_fcf_device *fcoe_fcf_device_add(struct fcoe_ctlr_device *,
118 struct fcoe_fcf_device *);
119void fcoe_fcf_device_delete(struct fcoe_fcf_device *);
120
121int __init fcoe_sysfs_setup(void);
122void __exit fcoe_sysfs_teardown(void);
123
124#endif /* FCOE_SYSFS */
diff --git a/include/scsi/libfcoe.h b/include/scsi/libfcoe.h
index cfdb55f0937e..22b07cc99808 100644
--- a/include/scsi/libfcoe.h
+++ b/include/scsi/libfcoe.h
@@ -29,6 +29,7 @@
29#include <linux/random.h> 29#include <linux/random.h>
30#include <scsi/fc/fc_fcoe.h> 30#include <scsi/fc/fc_fcoe.h>
31#include <scsi/libfc.h> 31#include <scsi/libfc.h>
32#include <scsi/fcoe_sysfs.h>
32 33
33#define FCOE_MAX_CMD_LEN 16 /* Supported CDB length */ 34#define FCOE_MAX_CMD_LEN 16 /* Supported CDB length */
34 35
@@ -159,8 +160,24 @@ struct fcoe_ctlr {
159}; 160};
160 161
161/** 162/**
163 * fcoe_ctlr_priv() - Return the private data from a fcoe_ctlr
164 * @cltr: The fcoe_ctlr whose private data will be returned
165 */
166static inline void *fcoe_ctlr_priv(const struct fcoe_ctlr *ctlr)
167{
168 return (void *)(ctlr + 1);
169}
170
171#define fcoe_ctlr_to_ctlr_dev(x) \
172 (struct fcoe_ctlr_device *)(((struct fcoe_ctlr_device *)(x)) - 1)
173
174/**
162 * struct fcoe_fcf - Fibre-Channel Forwarder 175 * struct fcoe_fcf - Fibre-Channel Forwarder
163 * @list: list linkage 176 * @list: list linkage
177 * @event_work: Work for FC Transport actions queue
178 * @event: The event to be processed
179 * @fip: The controller that the FCF was discovered on
180 * @fcf_dev: The associated fcoe_fcf_device instance
164 * @time: system time (jiffies) when an advertisement was last received 181 * @time: system time (jiffies) when an advertisement was last received
165 * @switch_name: WWN of switch from advertisement 182 * @switch_name: WWN of switch from advertisement
166 * @fabric_name: WWN of fabric from advertisement 183 * @fabric_name: WWN of fabric from advertisement
@@ -182,6 +199,9 @@ struct fcoe_ctlr {
182 */ 199 */
183struct fcoe_fcf { 200struct fcoe_fcf {
184 struct list_head list; 201 struct list_head list;
202 struct work_struct event_work;
203 struct fcoe_ctlr *fip;
204 struct fcoe_fcf_device *fcf_dev;
185 unsigned long time; 205 unsigned long time;
186 206
187 u64 switch_name; 207 u64 switch_name;
@@ -198,6 +218,9 @@ struct fcoe_fcf {
198 u8 fd_flags:1; 218 u8 fd_flags:1;
199}; 219};
200 220
221#define fcoe_fcf_to_fcf_dev(x) \
222 ((x)->fcf_dev)
223
201/** 224/**
202 * struct fcoe_rport - VN2VN remote port 225 * struct fcoe_rport - VN2VN remote port
203 * @time: time of create or last beacon packet received from node 226 * @time: time of create or last beacon packet received from node
@@ -333,6 +356,10 @@ void fcoe_queue_timer(ulong lport);
333int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen, 356int fcoe_get_paged_crc_eof(struct sk_buff *skb, int tlen,
334 struct fcoe_percpu_s *fps); 357 struct fcoe_percpu_s *fps);
335 358
359/* FCoE Sysfs helpers */
360void fcoe_fcf_get_selected(struct fcoe_fcf_device *);
361void fcoe_ctlr_get_fip_mode(struct fcoe_ctlr_device *);
362
336/** 363/**
337 * struct netdev_list 364 * struct netdev_list
338 * A mapping from netdevice to fcoe_transport 365 * A mapping from netdevice to fcoe_transport
diff --git a/include/target/target_core_fabric.h b/include/target/target_core_fabric.h
index 116959933f46..c78a23333c4f 100644
--- a/include/target/target_core_fabric.h
+++ b/include/target/target_core_fabric.h
@@ -47,6 +47,7 @@ struct target_core_fabric_ops {
47 */ 47 */
48 int (*check_stop_free)(struct se_cmd *); 48 int (*check_stop_free)(struct se_cmd *);
49 void (*release_cmd)(struct se_cmd *); 49 void (*release_cmd)(struct se_cmd *);
50 void (*put_session)(struct se_session *);
50 /* 51 /*
51 * Called with spin_lock_bh(struct se_portal_group->session_lock held. 52 * Called with spin_lock_bh(struct se_portal_group->session_lock held.
52 */ 53 */
diff --git a/include/trace/events/rcu.h b/include/trace/events/rcu.h
index 1480900c511c..d274734b2aa4 100644
--- a/include/trace/events/rcu.h
+++ b/include/trace/events/rcu.h
@@ -289,6 +289,7 @@ TRACE_EVENT(rcu_dyntick,
289 * "In holdoff": Nothing to do, holding off after unsuccessful attempt. 289 * "In holdoff": Nothing to do, holding off after unsuccessful attempt.
290 * "Begin holdoff": Attempt failed, don't retry until next jiffy. 290 * "Begin holdoff": Attempt failed, don't retry until next jiffy.
291 * "Dyntick with callbacks": Entering dyntick-idle despite callbacks. 291 * "Dyntick with callbacks": Entering dyntick-idle despite callbacks.
292 * "Dyntick with lazy callbacks": Entering dyntick-idle w/lazy callbacks.
292 * "More callbacks": Still more callbacks, try again to clear them out. 293 * "More callbacks": Still more callbacks, try again to clear them out.
293 * "Callbacks drained": All callbacks processed, off to dyntick idle! 294 * "Callbacks drained": All callbacks processed, off to dyntick idle!
294 * "Timer": Timer fired to cause CPU to continue processing callbacks. 295 * "Timer": Timer fired to cause CPU to continue processing callbacks.
diff --git a/include/video/auo_k190xfb.h b/include/video/auo_k190xfb.h
new file mode 100644
index 000000000000..609efe8c686e
--- /dev/null
+++ b/include/video/auo_k190xfb.h
@@ -0,0 +1,106 @@
1/*
2 * Definitions for AUO-K190X framebuffer drivers
3 *
4 * Copyright (C) 2012 Heiko Stuebner <heiko@sntech.de>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifndef _LINUX_VIDEO_AUO_K190XFB_H_
12#define _LINUX_VIDEO_AUO_K190XFB_H_
13
14/* Controller standby command needs a param */
15#define AUOK190X_QUIRK_STANDBYPARAM (1 << 0)
16
17/* Controller standby is completely broken */
18#define AUOK190X_QUIRK_STANDBYBROKEN (1 << 1)
19
20/*
21 * Resolutions for the displays
22 */
23#define AUOK190X_RESOLUTION_800_600 0
24#define AUOK190X_RESOLUTION_1024_768 1
25
26/*
27 * struct used by auok190x. board specific stuff comes from *board
28 */
29struct auok190xfb_par {
30 struct fb_info *info;
31 struct auok190x_board *board;
32
33 struct regulator *regulator;
34
35 struct mutex io_lock;
36 struct delayed_work work;
37 wait_queue_head_t waitq;
38 int resolution;
39 int rotation;
40 int consecutive_threshold;
41 int update_cnt;
42
43 /* panel and controller informations */
44 int epd_type;
45 int panel_size_int;
46 int panel_size_float;
47 int panel_model;
48 int tcon_version;
49 int lut_version;
50
51 /* individual controller callbacks */
52 void (*update_partial)(struct auok190xfb_par *par, u16 y1, u16 y2);
53 void (*update_all)(struct auok190xfb_par *par);
54 bool (*need_refresh)(struct auok190xfb_par *par);
55 void (*init)(struct auok190xfb_par *par);
56 void (*recover)(struct auok190xfb_par *par);
57
58 int update_mode; /* mode to use for updates */
59 int last_mode; /* update mode last used */
60 int flash;
61
62 /* power management */
63 int autosuspend_delay;
64 bool standby;
65 bool manual_standby;
66};
67
68/**
69 * Board specific platform-data
70 * @init: initialize the controller interface
71 * @cleanup: cleanup the controller interface
72 * @wait_for_rdy: wait until the controller is not busy anymore
73 * @set_ctl: change an interface control
74 * @set_hdb: write a value to the data register
75 * @get_hdb: read a value from the data register
76 * @setup_irq: method to setup the irq handling on the busy gpio
77 * @gpio_nsleep: sleep gpio
78 * @gpio_nrst: reset gpio
79 * @gpio_nbusy: busy gpio
80 * @resolution: one of the AUOK190X_RESOLUTION constants
81 * @rotation: rotation of the framebuffer
82 * @quirks: controller quirks to honor
83 * @fps: frames per second for defio
84 */
85struct auok190x_board {
86 int (*init)(struct auok190xfb_par *);
87 void (*cleanup)(struct auok190xfb_par *);
88 int (*wait_for_rdy)(struct auok190xfb_par *);
89
90 void (*set_ctl)(struct auok190xfb_par *, unsigned char, u8);
91 void (*set_hdb)(struct auok190xfb_par *, u16);
92 u16 (*get_hdb)(struct auok190xfb_par *);
93
94 int (*setup_irq)(struct fb_info *);
95
96 int gpio_nsleep;
97 int gpio_nrst;
98 int gpio_nbusy;
99
100 int resolution;
101 int rotation;
102 int quirks;
103 int fps;
104};
105
106#endif
diff --git a/include/video/exynos_dp.h b/include/video/exynos_dp.h
index 8847a9d6dd42..bd8cabd344db 100644
--- a/include/video/exynos_dp.h
+++ b/include/video/exynos_dp.h
@@ -14,7 +14,7 @@
14 14
15#define DP_TIMEOUT_LOOP_COUNT 100 15#define DP_TIMEOUT_LOOP_COUNT 100
16#define MAX_CR_LOOP 5 16#define MAX_CR_LOOP 5
17#define MAX_EQ_LOOP 4 17#define MAX_EQ_LOOP 5
18 18
19enum link_rate_type { 19enum link_rate_type {
20 LINK_RATE_1_62GBPS = 0x06, 20 LINK_RATE_1_62GBPS = 0x06,
diff --git a/include/video/exynos_mipi_dsim.h b/include/video/exynos_mipi_dsim.h
index 772c770535f1..83ce5e667d47 100644
--- a/include/video/exynos_mipi_dsim.h
+++ b/include/video/exynos_mipi_dsim.h
@@ -315,6 +315,7 @@ struct mipi_dsim_lcd_device {
315 int id; 315 int id;
316 int bus_id; 316 int bus_id;
317 int irq; 317 int irq;
318 int panel_reverse;
318 319
319 struct mipi_dsim_device *master; 320 struct mipi_dsim_device *master;
320 void *platform_data; 321 void *platform_data;
diff --git a/include/video/omapdss.h b/include/video/omapdss.h
index 1c46a14341dd..c8e59b4a3364 100644
--- a/include/video/omapdss.h
+++ b/include/video/omapdss.h
@@ -51,6 +51,8 @@
51 51
52struct omap_dss_device; 52struct omap_dss_device;
53struct omap_overlay_manager; 53struct omap_overlay_manager;
54struct snd_aes_iec958;
55struct snd_cea_861_aud_if;
54 56
55enum omap_display_type { 57enum omap_display_type {
56 OMAP_DISPLAY_TYPE_NONE = 0, 58 OMAP_DISPLAY_TYPE_NONE = 0,
@@ -158,6 +160,13 @@ enum omap_dss_display_state {
158 OMAP_DSS_DISPLAY_SUSPENDED, 160 OMAP_DSS_DISPLAY_SUSPENDED,
159}; 161};
160 162
163enum omap_dss_audio_state {
164 OMAP_DSS_AUDIO_DISABLED = 0,
165 OMAP_DSS_AUDIO_ENABLED,
166 OMAP_DSS_AUDIO_CONFIGURED,
167 OMAP_DSS_AUDIO_PLAYING,
168};
169
161/* XXX perhaps this should be removed */ 170/* XXX perhaps this should be removed */
162enum omap_dss_overlay_managers { 171enum omap_dss_overlay_managers {
163 OMAP_DSS_OVL_MGR_LCD, 172 OMAP_DSS_OVL_MGR_LCD,
@@ -166,8 +175,9 @@ enum omap_dss_overlay_managers {
166}; 175};
167 176
168enum omap_dss_rotation_type { 177enum omap_dss_rotation_type {
169 OMAP_DSS_ROT_DMA = 0, 178 OMAP_DSS_ROT_DMA = 1 << 0,
170 OMAP_DSS_ROT_VRFB = 1, 179 OMAP_DSS_ROT_VRFB = 1 << 1,
180 OMAP_DSS_ROT_TILER = 1 << 2,
171}; 181};
172 182
173/* clockwise rotation angle */ 183/* clockwise rotation angle */
@@ -309,6 +319,7 @@ struct omap_dss_board_info {
309 struct omap_dss_device *default_device; 319 struct omap_dss_device *default_device;
310 int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask); 320 int (*dsi_enable_pads)(int dsi_id, unsigned lane_mask);
311 void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask); 321 void (*dsi_disable_pads)(int dsi_id, unsigned lane_mask);
322 int (*set_min_bus_tput)(struct device *dev, unsigned long r);
312}; 323};
313 324
314/* Init with the board info */ 325/* Init with the board info */
@@ -316,11 +327,6 @@ extern int omap_display_init(struct omap_dss_board_info *board_data);
316/* HDMI mux init*/ 327/* HDMI mux init*/
317extern int omap_hdmi_init(enum omap_hdmi_flags flags); 328extern int omap_hdmi_init(enum omap_hdmi_flags flags);
318 329
319struct omap_display_platform_data {
320 struct omap_dss_board_info *board_data;
321 /* TODO: Additional members to be added when PM is considered */
322};
323
324struct omap_video_timings { 330struct omap_video_timings {
325 /* Unit: pixels */ 331 /* Unit: pixels */
326 u16 x_res; 332 u16 x_res;
@@ -587,6 +593,8 @@ struct omap_dss_device {
587 593
588 enum omap_dss_display_state state; 594 enum omap_dss_display_state state;
589 595
596 enum omap_dss_audio_state audio_state;
597
590 /* platform specific */ 598 /* platform specific */
591 int (*platform_enable)(struct omap_dss_device *dssdev); 599 int (*platform_enable)(struct omap_dss_device *dssdev);
592 void (*platform_disable)(struct omap_dss_device *dssdev); 600 void (*platform_disable)(struct omap_dss_device *dssdev);
@@ -599,6 +607,11 @@ struct omap_dss_hdmi_data
599 int hpd_gpio; 607 int hpd_gpio;
600}; 608};
601 609
610struct omap_dss_audio {
611 struct snd_aes_iec958 *iec;
612 struct snd_cea_861_aud_if *cea;
613};
614
602struct omap_dss_driver { 615struct omap_dss_driver {
603 struct device_driver driver; 616 struct device_driver driver;
604 617
@@ -646,6 +659,24 @@ struct omap_dss_driver {
646 659
647 int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len); 660 int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
648 bool (*detect)(struct omap_dss_device *dssdev); 661 bool (*detect)(struct omap_dss_device *dssdev);
662
663 /*
664 * For display drivers that support audio. This encompasses
665 * HDMI and DisplayPort at the moment.
666 */
667 /*
668 * Note: These functions might sleep. Do not call while
669 * holding a spinlock/readlock.
670 */
671 int (*audio_enable)(struct omap_dss_device *dssdev);
672 void (*audio_disable)(struct omap_dss_device *dssdev);
673 bool (*audio_supported)(struct omap_dss_device *dssdev);
674 int (*audio_config)(struct omap_dss_device *dssdev,
675 struct omap_dss_audio *audio);
676 /* Note: These functions may not sleep */
677 int (*audio_start)(struct omap_dss_device *dssdev);
678 void (*audio_stop)(struct omap_dss_device *dssdev);
679
649}; 680};
650 681
651int omap_dss_register_driver(struct omap_dss_driver *); 682int omap_dss_register_driver(struct omap_dss_driver *);
@@ -670,6 +701,8 @@ struct omap_overlay *omap_dss_get_overlay(int num);
670void omapdss_default_get_resolution(struct omap_dss_device *dssdev, 701void omapdss_default_get_resolution(struct omap_dss_device *dssdev,
671 u16 *xres, u16 *yres); 702 u16 *xres, u16 *yres);
672int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev); 703int omapdss_default_get_recommended_bpp(struct omap_dss_device *dssdev);
704void omapdss_default_get_timings(struct omap_dss_device *dssdev,
705 struct omap_video_timings *timings);
673 706
674typedef void (*omap_dispc_isr_t) (void *arg, u32 mask); 707typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
675int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask); 708int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
diff --git a/include/video/sh_mobile_hdmi.h b/include/video/sh_mobile_hdmi.h
index 728f9de9c258..63d20efa254a 100644
--- a/include/video/sh_mobile_hdmi.h
+++ b/include/video/sh_mobile_hdmi.h
@@ -18,9 +18,11 @@ struct clk;
18/* 18/*
19 * flags format 19 * flags format
20 * 20 *
21 * 0x0000000A 21 * 0x00000CBA
22 * 22 *
23 * A: Audio source select 23 * A: Audio source select
24 * B: Int output option
25 * C: Chip specific option
24 */ 26 */
25 27
26/* Audio source select */ 28/* Audio source select */
@@ -30,6 +32,14 @@ struct clk;
30#define HDMI_SND_SRC_DSD (2 << 0) 32#define HDMI_SND_SRC_DSD (2 << 0)
31#define HDMI_SND_SRC_HBR (3 << 0) 33#define HDMI_SND_SRC_HBR (3 << 0)
32 34
35/* Int output option */
36#define HDMI_OUTPUT_PUSH_PULL (1 << 4) /* System control : output mode */
37#define HDMI_OUTPUT_POLARITY_HI (1 << 5) /* System control : output polarity */
38
39/* Chip specific option */
40#define HDMI_32BIT_REG (1 << 8)
41#define HDMI_HAS_HTOP1 (1 << 9)
42
33struct sh_mobile_hdmi_info { 43struct sh_mobile_hdmi_info {
34 unsigned int flags; 44 unsigned int flags;
35 long (*clk_optimize_parent)(unsigned long target, unsigned long *best_freq, 45 long (*clk_optimize_parent)(unsigned long target, unsigned long *best_freq,
diff --git a/init/Kconfig b/init/Kconfig
index 1e004d057468..d07dcf9fc8a9 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -167,7 +167,7 @@ config KERNEL_BZIP2
167 depends on HAVE_KERNEL_BZIP2 167 depends on HAVE_KERNEL_BZIP2
168 help 168 help
169 Its compression ratio and speed is intermediate. 169 Its compression ratio and speed is intermediate.
170 Decompression speed is slowest among the three. The kernel 170 Decompression speed is slowest among the choices. The kernel
171 size is about 10% smaller with bzip2, in comparison to gzip. 171 size is about 10% smaller with bzip2, in comparison to gzip.
172 Bzip2 uses a large amount of memory. For modern kernels you 172 Bzip2 uses a large amount of memory. For modern kernels you
173 will need at least 8MB RAM or more for booting. 173 will need at least 8MB RAM or more for booting.
@@ -176,10 +176,9 @@ config KERNEL_LZMA
176 bool "LZMA" 176 bool "LZMA"
177 depends on HAVE_KERNEL_LZMA 177 depends on HAVE_KERNEL_LZMA
178 help 178 help
179 The most recent compression algorithm. 179 This compression algorithm's ratio is best. Decompression speed
180 Its ratio is best, decompression speed is between the other 180 is between gzip and bzip2. Compression is slowest.
181 two. Compression is slowest. The kernel size is about 33% 181 The kernel size is about 33% smaller with LZMA in comparison to gzip.
182 smaller with LZMA in comparison to gzip.
183 182
184config KERNEL_XZ 183config KERNEL_XZ
185 bool "XZ" 184 bool "XZ"
@@ -200,7 +199,7 @@ config KERNEL_LZO
200 bool "LZO" 199 bool "LZO"
201 depends on HAVE_KERNEL_LZO 200 depends on HAVE_KERNEL_LZO
202 help 201 help
203 Its compression ratio is the poorest among the 4. The kernel 202 Its compression ratio is the poorest among the choices. The kernel
204 size is about 10% bigger than gzip; however its speed 203 size is about 10% bigger than gzip; however its speed
205 (both compression and decompression) is the fastest. 204 (both compression and decompression) is the fastest.
206 205
diff --git a/init/do_mounts.c b/init/do_mounts.c
index 42b0707c3481..d3f0aeed2d39 100644
--- a/init/do_mounts.c
+++ b/init/do_mounts.c
@@ -1,3 +1,13 @@
1/*
2 * Many of the syscalls used in this file expect some of the arguments
3 * to be __user pointers not __kernel pointers. To limit the sparse
4 * noise, turn off sparse checking for this file.
5 */
6#ifdef __CHECKER__
7#undef __CHECKER__
8#warning "Sparse checking disabled for this file"
9#endif
10
1#include <linux/module.h> 11#include <linux/module.h>
2#include <linux/sched.h> 12#include <linux/sched.h>
3#include <linux/ctype.h> 13#include <linux/ctype.h>
@@ -330,7 +340,7 @@ static int __init do_mount_root(char *name, char *fs, int flags, void *data)
330 if (err) 340 if (err)
331 return err; 341 return err;
332 342
333 sys_chdir((const char __user __force *)"/root"); 343 sys_chdir("/root");
334 s = current->fs->pwd.dentry->d_sb; 344 s = current->fs->pwd.dentry->d_sb;
335 ROOT_DEV = s->s_dev; 345 ROOT_DEV = s->s_dev;
336 printk(KERN_INFO 346 printk(KERN_INFO
@@ -556,5 +566,5 @@ void __init prepare_namespace(void)
556out: 566out:
557 devtmpfs_mount("dev"); 567 devtmpfs_mount("dev");
558 sys_mount(".", "/", NULL, MS_MOVE, NULL); 568 sys_mount(".", "/", NULL, MS_MOVE, NULL);
559 sys_chroot((const char __user __force *)"."); 569 sys_chroot(".");
560} 570}
diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
index 9047330c73e9..135959a276be 100644
--- a/init/do_mounts_initrd.c
+++ b/init/do_mounts_initrd.c
@@ -1,3 +1,13 @@
1/*
2 * Many of the syscalls used in this file expect some of the arguments
3 * to be __user pointers not __kernel pointers. To limit the sparse
4 * noise, turn off sparse checking for this file.
5 */
6#ifdef __CHECKER__
7#undef __CHECKER__
8#warning "Sparse checking disabled for this file"
9#endif
10
1#include <linux/unistd.h> 11#include <linux/unistd.h>
2#include <linux/kernel.h> 12#include <linux/kernel.h>
3#include <linux/fs.h> 13#include <linux/fs.h>
diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
index 32c4799b8c91..8cb6db54285b 100644
--- a/init/do_mounts_md.c
+++ b/init/do_mounts_md.c
@@ -1,3 +1,13 @@
1/*
2 * Many of the syscalls used in this file expect some of the arguments
3 * to be __user pointers not __kernel pointers. To limit the sparse
4 * noise, turn off sparse checking for this file.
5 */
6#ifdef __CHECKER__
7#undef __CHECKER__
8#warning "Sparse checking disabled for this file"
9#endif
10
1#include <linux/delay.h> 11#include <linux/delay.h>
2#include <linux/raid/md_u.h> 12#include <linux/raid/md_u.h>
3#include <linux/raid/md_p.h> 13#include <linux/raid/md_p.h>
@@ -283,7 +293,7 @@ static void __init autodetect_raid(void)
283 293
284 wait_for_device_probe(); 294 wait_for_device_probe();
285 295
286 fd = sys_open((const char __user __force *) "/dev/md0", 0, 0); 296 fd = sys_open("/dev/md0", 0, 0);
287 if (fd >= 0) { 297 if (fd >= 0) {
288 sys_ioctl(fd, RAID_AUTORUN, raid_autopart); 298 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
289 sys_close(fd); 299 sys_close(fd);
diff --git a/init/do_mounts_rd.c b/init/do_mounts_rd.c
index 6212586df29a..6be2879cca66 100644
--- a/init/do_mounts_rd.c
+++ b/init/do_mounts_rd.c
@@ -1,3 +1,12 @@
1/*
2 * Many of the syscalls used in this file expect some of the arguments
3 * to be __user pointers not __kernel pointers. To limit the sparse
4 * noise, turn off sparse checking for this file.
5 */
6#ifdef __CHECKER__
7#undef __CHECKER__
8#warning "Sparse checking disabled for this file"
9#endif
1 10
2#include <linux/kernel.h> 11#include <linux/kernel.h>
3#include <linux/fs.h> 12#include <linux/fs.h>
@@ -181,7 +190,7 @@ int __init rd_load_image(char *from)
181 char rotator[4] = { '|' , '/' , '-' , '\\' }; 190 char rotator[4] = { '|' , '/' , '-' , '\\' };
182#endif 191#endif
183 192
184 out_fd = sys_open((const char __user __force *) "/dev/ram", O_RDWR, 0); 193 out_fd = sys_open("/dev/ram", O_RDWR, 0);
185 if (out_fd < 0) 194 if (out_fd < 0)
186 goto out; 195 goto out;
187 196
@@ -280,7 +289,7 @@ noclose_input:
280 sys_close(out_fd); 289 sys_close(out_fd);
281out: 290out:
282 kfree(buf); 291 kfree(buf);
283 sys_unlink((const char __user __force *) "/dev/ram"); 292 sys_unlink("/dev/ram");
284 return res; 293 return res;
285} 294}
286 295
diff --git a/init/initramfs.c b/init/initramfs.c
index 8216c303b082..84c6bf111300 100644
--- a/init/initramfs.c
+++ b/init/initramfs.c
@@ -1,3 +1,13 @@
1/*
2 * Many of the syscalls used in this file expect some of the arguments
3 * to be __user pointers not __kernel pointers. To limit the sparse
4 * noise, turn off sparse checking for this file.
5 */
6#ifdef __CHECKER__
7#undef __CHECKER__
8#warning "Sparse checking disabled for this file"
9#endif
10
1#include <linux/init.h> 11#include <linux/init.h>
2#include <linux/fs.h> 12#include <linux/fs.h>
3#include <linux/slab.h> 13#include <linux/slab.h>
@@ -74,7 +84,7 @@ static void __init free_hash(void)
74 } 84 }
75} 85}
76 86
77static long __init do_utime(char __user *filename, time_t mtime) 87static long __init do_utime(char *filename, time_t mtime)
78{ 88{
79 struct timespec t[2]; 89 struct timespec t[2];
80 90
@@ -529,7 +539,7 @@ static void __init clean_rootfs(void)
529 struct linux_dirent64 *dirp; 539 struct linux_dirent64 *dirp;
530 int num; 540 int num;
531 541
532 fd = sys_open((const char __user __force *) "/", O_RDONLY, 0); 542 fd = sys_open("/", O_RDONLY, 0);
533 WARN_ON(fd < 0); 543 WARN_ON(fd < 0);
534 if (fd < 0) 544 if (fd < 0)
535 return; 545 return;
@@ -589,7 +599,7 @@ static int __init populate_rootfs(void)
589 } 599 }
590 printk(KERN_INFO "rootfs image is not initramfs (%s)" 600 printk(KERN_INFO "rootfs image is not initramfs (%s)"
591 "; looks like an initrd\n", err); 601 "; looks like an initrd\n", err);
592 fd = sys_open((const char __user __force *) "/initrd.image", 602 fd = sys_open("/initrd.image",
593 O_WRONLY|O_CREAT, 0700); 603 O_WRONLY|O_CREAT, 0700);
594 if (fd >= 0) { 604 if (fd >= 0) {
595 sys_write(fd, (char *)initrd_start, 605 sys_write(fd, (char *)initrd_start,
diff --git a/init/main.c b/init/main.c
index 1ca6b32c4828..b5cc0a7c4708 100644
--- a/init/main.c
+++ b/init/main.c
@@ -508,7 +508,7 @@ asmlinkage void __init start_kernel(void)
508 parse_early_param(); 508 parse_early_param();
509 parse_args("Booting kernel", static_command_line, __start___param, 509 parse_args("Booting kernel", static_command_line, __start___param,
510 __stop___param - __start___param, 510 __stop___param - __start___param,
511 0, 0, &unknown_bootoption); 511 -1, -1, &unknown_bootoption);
512 512
513 jump_label_init(); 513 jump_label_init();
514 514
@@ -755,13 +755,8 @@ static void __init do_initcalls(void)
755{ 755{
756 int level; 756 int level;
757 757
758 for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++) { 758 for (level = 0; level < ARRAY_SIZE(initcall_levels) - 1; level++)
759 pr_info("initlevel:%d=%s, %d registered initcalls\n",
760 level, initcall_level_names[level],
761 (int) (initcall_levels[level+1]
762 - initcall_levels[level]));
763 do_initcall_level(level); 759 do_initcall_level(level);
764 }
765} 760}
766 761
767/* 762/*
diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
index 0c09366b96f3..383d638340b8 100644
--- a/ipc/mq_sysctl.c
+++ b/ipc/mq_sysctl.c
@@ -13,15 +13,6 @@
13#include <linux/ipc_namespace.h> 13#include <linux/ipc_namespace.h>
14#include <linux/sysctl.h> 14#include <linux/sysctl.h>
15 15
16/*
17 * Define the ranges various user-specified maximum values can
18 * be set to.
19 */
20#define MIN_MSGMAX 1 /* min value for msg_max */
21#define MAX_MSGMAX HARD_MSGMAX /* max value for msg_max */
22#define MIN_MSGSIZEMAX 128 /* min value for msgsize_max */
23#define MAX_MSGSIZEMAX (8192*128) /* max value for msgsize_max */
24
25#ifdef CONFIG_PROC_SYSCTL 16#ifdef CONFIG_PROC_SYSCTL
26static void *get_mq(ctl_table *table) 17static void *get_mq(ctl_table *table)
27{ 18{
@@ -31,16 +22,6 @@ static void *get_mq(ctl_table *table)
31 return which; 22 return which;
32} 23}
33 24
34static int proc_mq_dointvec(ctl_table *table, int write,
35 void __user *buffer, size_t *lenp, loff_t *ppos)
36{
37 struct ctl_table mq_table;
38 memcpy(&mq_table, table, sizeof(mq_table));
39 mq_table.data = get_mq(table);
40
41 return proc_dointvec(&mq_table, write, buffer, lenp, ppos);
42}
43
44static int proc_mq_dointvec_minmax(ctl_table *table, int write, 25static int proc_mq_dointvec_minmax(ctl_table *table, int write,
45 void __user *buffer, size_t *lenp, loff_t *ppos) 26 void __user *buffer, size_t *lenp, loff_t *ppos)
46{ 27{
@@ -52,15 +33,17 @@ static int proc_mq_dointvec_minmax(ctl_table *table, int write,
52 lenp, ppos); 33 lenp, ppos);
53} 34}
54#else 35#else
55#define proc_mq_dointvec NULL
56#define proc_mq_dointvec_minmax NULL 36#define proc_mq_dointvec_minmax NULL
57#endif 37#endif
58 38
39static int msg_queues_limit_min = MIN_QUEUESMAX;
40static int msg_queues_limit_max = HARD_QUEUESMAX;
41
59static int msg_max_limit_min = MIN_MSGMAX; 42static int msg_max_limit_min = MIN_MSGMAX;
60static int msg_max_limit_max = MAX_MSGMAX; 43static int msg_max_limit_max = HARD_MSGMAX;
61 44
62static int msg_maxsize_limit_min = MIN_MSGSIZEMAX; 45static int msg_maxsize_limit_min = MIN_MSGSIZEMAX;
63static int msg_maxsize_limit_max = MAX_MSGSIZEMAX; 46static int msg_maxsize_limit_max = HARD_MSGSIZEMAX;
64 47
65static ctl_table mq_sysctls[] = { 48static ctl_table mq_sysctls[] = {
66 { 49 {
@@ -68,7 +51,9 @@ static ctl_table mq_sysctls[] = {
68 .data = &init_ipc_ns.mq_queues_max, 51 .data = &init_ipc_ns.mq_queues_max,
69 .maxlen = sizeof(int), 52 .maxlen = sizeof(int),
70 .mode = 0644, 53 .mode = 0644,
71 .proc_handler = proc_mq_dointvec, 54 .proc_handler = proc_mq_dointvec_minmax,
55 .extra1 = &msg_queues_limit_min,
56 .extra2 = &msg_queues_limit_max,
72 }, 57 },
73 { 58 {
74 .procname = "msg_max", 59 .procname = "msg_max",
@@ -88,6 +73,24 @@ static ctl_table mq_sysctls[] = {
88 .extra1 = &msg_maxsize_limit_min, 73 .extra1 = &msg_maxsize_limit_min,
89 .extra2 = &msg_maxsize_limit_max, 74 .extra2 = &msg_maxsize_limit_max,
90 }, 75 },
76 {
77 .procname = "msg_default",
78 .data = &init_ipc_ns.mq_msg_default,
79 .maxlen = sizeof(int),
80 .mode = 0644,
81 .proc_handler = proc_mq_dointvec_minmax,
82 .extra1 = &msg_max_limit_min,
83 .extra2 = &msg_max_limit_max,
84 },
85 {
86 .procname = "msgsize_default",
87 .data = &init_ipc_ns.mq_msgsize_default,
88 .maxlen = sizeof(int),
89 .mode = 0644,
90 .proc_handler = proc_mq_dointvec_minmax,
91 .extra1 = &msg_maxsize_limit_min,
92 .extra2 = &msg_maxsize_limit_max,
93 },
91 {} 94 {}
92}; 95};
93 96
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index a2757d4ab773..8ce57691e7b6 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -24,6 +24,7 @@
24#include <linux/mqueue.h> 24#include <linux/mqueue.h>
25#include <linux/msg.h> 25#include <linux/msg.h>
26#include <linux/skbuff.h> 26#include <linux/skbuff.h>
27#include <linux/vmalloc.h>
27#include <linux/netlink.h> 28#include <linux/netlink.h>
28#include <linux/syscalls.h> 29#include <linux/syscalls.h>
29#include <linux/audit.h> 30#include <linux/audit.h>
@@ -49,6 +50,12 @@
49#define STATE_PENDING 1 50#define STATE_PENDING 1
50#define STATE_READY 2 51#define STATE_READY 2
51 52
53struct posix_msg_tree_node {
54 struct rb_node rb_node;
55 struct list_head msg_list;
56 int priority;
57};
58
52struct ext_wait_queue { /* queue of sleeping tasks */ 59struct ext_wait_queue { /* queue of sleeping tasks */
53 struct task_struct *task; 60 struct task_struct *task;
54 struct list_head list; 61 struct list_head list;
@@ -61,7 +68,8 @@ struct mqueue_inode_info {
61 struct inode vfs_inode; 68 struct inode vfs_inode;
62 wait_queue_head_t wait_q; 69 wait_queue_head_t wait_q;
63 70
64 struct msg_msg **messages; 71 struct rb_root msg_tree;
72 struct posix_msg_tree_node *node_cache;
65 struct mq_attr attr; 73 struct mq_attr attr;
66 74
67 struct sigevent notify; 75 struct sigevent notify;
@@ -109,6 +117,103 @@ static struct ipc_namespace *get_ns_from_inode(struct inode *inode)
109 return ns; 117 return ns;
110} 118}
111 119
120/* Auxiliary functions to manipulate messages' list */
121static int msg_insert(struct msg_msg *msg, struct mqueue_inode_info *info)
122{
123 struct rb_node **p, *parent = NULL;
124 struct posix_msg_tree_node *leaf;
125
126 p = &info->msg_tree.rb_node;
127 while (*p) {
128 parent = *p;
129 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
130
131 if (likely(leaf->priority == msg->m_type))
132 goto insert_msg;
133 else if (msg->m_type < leaf->priority)
134 p = &(*p)->rb_left;
135 else
136 p = &(*p)->rb_right;
137 }
138 if (info->node_cache) {
139 leaf = info->node_cache;
140 info->node_cache = NULL;
141 } else {
142 leaf = kmalloc(sizeof(*leaf), GFP_ATOMIC);
143 if (!leaf)
144 return -ENOMEM;
145 rb_init_node(&leaf->rb_node);
146 INIT_LIST_HEAD(&leaf->msg_list);
147 info->qsize += sizeof(*leaf);
148 }
149 leaf->priority = msg->m_type;
150 rb_link_node(&leaf->rb_node, parent, p);
151 rb_insert_color(&leaf->rb_node, &info->msg_tree);
152insert_msg:
153 info->attr.mq_curmsgs++;
154 info->qsize += msg->m_ts;
155 list_add_tail(&msg->m_list, &leaf->msg_list);
156 return 0;
157}
158
159static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
160{
161 struct rb_node **p, *parent = NULL;
162 struct posix_msg_tree_node *leaf;
163 struct msg_msg *msg;
164
165try_again:
166 p = &info->msg_tree.rb_node;
167 while (*p) {
168 parent = *p;
169 /*
170 * During insert, low priorities go to the left and high to the
171 * right. On receive, we want the highest priorities first, so
172 * walk all the way to the right.
173 */
174 p = &(*p)->rb_right;
175 }
176 if (!parent) {
177 if (info->attr.mq_curmsgs) {
178 pr_warn_once("Inconsistency in POSIX message queue, "
179 "no tree element, but supposedly messages "
180 "should exist!\n");
181 info->attr.mq_curmsgs = 0;
182 }
183 return NULL;
184 }
185 leaf = rb_entry(parent, struct posix_msg_tree_node, rb_node);
186 if (unlikely(list_empty(&leaf->msg_list))) {
187 pr_warn_once("Inconsistency in POSIX message queue, "
188 "empty leaf node but we haven't implemented "
189 "lazy leaf delete!\n");
190 rb_erase(&leaf->rb_node, &info->msg_tree);
191 if (info->node_cache) {
192 info->qsize -= sizeof(*leaf);
193 kfree(leaf);
194 } else {
195 info->node_cache = leaf;
196 }
197 goto try_again;
198 } else {
199 msg = list_first_entry(&leaf->msg_list,
200 struct msg_msg, m_list);
201 list_del(&msg->m_list);
202 if (list_empty(&leaf->msg_list)) {
203 rb_erase(&leaf->rb_node, &info->msg_tree);
204 if (info->node_cache) {
205 info->qsize -= sizeof(*leaf);
206 kfree(leaf);
207 } else {
208 info->node_cache = leaf;
209 }
210 }
211 }
212 info->attr.mq_curmsgs--;
213 info->qsize -= msg->m_ts;
214 return msg;
215}
216
112static struct inode *mqueue_get_inode(struct super_block *sb, 217static struct inode *mqueue_get_inode(struct super_block *sb,
113 struct ipc_namespace *ipc_ns, umode_t mode, 218 struct ipc_namespace *ipc_ns, umode_t mode,
114 struct mq_attr *attr) 219 struct mq_attr *attr)
@@ -129,7 +234,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
129 234
130 if (S_ISREG(mode)) { 235 if (S_ISREG(mode)) {
131 struct mqueue_inode_info *info; 236 struct mqueue_inode_info *info;
132 unsigned long mq_bytes, mq_msg_tblsz; 237 unsigned long mq_bytes, mq_treesize;
133 238
134 inode->i_fop = &mqueue_file_operations; 239 inode->i_fop = &mqueue_file_operations;
135 inode->i_size = FILENT_SIZE; 240 inode->i_size = FILENT_SIZE;
@@ -143,20 +248,36 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
143 info->notify_user_ns = NULL; 248 info->notify_user_ns = NULL;
144 info->qsize = 0; 249 info->qsize = 0;
145 info->user = NULL; /* set when all is ok */ 250 info->user = NULL; /* set when all is ok */
251 info->msg_tree = RB_ROOT;
252 info->node_cache = NULL;
146 memset(&info->attr, 0, sizeof(info->attr)); 253 memset(&info->attr, 0, sizeof(info->attr));
147 info->attr.mq_maxmsg = ipc_ns->mq_msg_max; 254 info->attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
148 info->attr.mq_msgsize = ipc_ns->mq_msgsize_max; 255 ipc_ns->mq_msg_default);
256 info->attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
257 ipc_ns->mq_msgsize_default);
149 if (attr) { 258 if (attr) {
150 info->attr.mq_maxmsg = attr->mq_maxmsg; 259 info->attr.mq_maxmsg = attr->mq_maxmsg;
151 info->attr.mq_msgsize = attr->mq_msgsize; 260 info->attr.mq_msgsize = attr->mq_msgsize;
152 } 261 }
153 mq_msg_tblsz = info->attr.mq_maxmsg * sizeof(struct msg_msg *); 262 /*
154 info->messages = kmalloc(mq_msg_tblsz, GFP_KERNEL); 263 * We used to allocate a static array of pointers and account
155 if (!info->messages) 264 * the size of that array as well as one msg_msg struct per
156 goto out_inode; 265 * possible message into the queue size. That's no longer
266 * accurate as the queue is now an rbtree and will grow and
267 * shrink depending on usage patterns. We can, however, still
268 * account one msg_msg struct per message, but the nodes are
269 * allocated depending on priority usage, and most programs
270 * only use one, or a handful, of priorities. However, since
271 * this is pinned memory, we need to assume worst case, so
272 * that means the min(mq_maxmsg, max_priorities) * struct
273 * posix_msg_tree_node.
274 */
275 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
276 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
277 sizeof(struct posix_msg_tree_node);
157 278
158 mq_bytes = (mq_msg_tblsz + 279 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
159 (info->attr.mq_maxmsg * info->attr.mq_msgsize)); 280 info->attr.mq_msgsize);
160 281
161 spin_lock(&mq_lock); 282 spin_lock(&mq_lock);
162 if (u->mq_bytes + mq_bytes < u->mq_bytes || 283 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
@@ -247,9 +368,9 @@ static void mqueue_evict_inode(struct inode *inode)
247{ 368{
248 struct mqueue_inode_info *info; 369 struct mqueue_inode_info *info;
249 struct user_struct *user; 370 struct user_struct *user;
250 unsigned long mq_bytes; 371 unsigned long mq_bytes, mq_treesize;
251 int i;
252 struct ipc_namespace *ipc_ns; 372 struct ipc_namespace *ipc_ns;
373 struct msg_msg *msg;
253 374
254 clear_inode(inode); 375 clear_inode(inode);
255 376
@@ -259,14 +380,19 @@ static void mqueue_evict_inode(struct inode *inode)
259 ipc_ns = get_ns_from_inode(inode); 380 ipc_ns = get_ns_from_inode(inode);
260 info = MQUEUE_I(inode); 381 info = MQUEUE_I(inode);
261 spin_lock(&info->lock); 382 spin_lock(&info->lock);
262 for (i = 0; i < info->attr.mq_curmsgs; i++) 383 while ((msg = msg_get(info)) != NULL)
263 free_msg(info->messages[i]); 384 free_msg(msg);
264 kfree(info->messages); 385 kfree(info->node_cache);
265 spin_unlock(&info->lock); 386 spin_unlock(&info->lock);
266 387
267 /* Total amount of bytes accounted for the mqueue */ 388 /* Total amount of bytes accounted for the mqueue */
268 mq_bytes = info->attr.mq_maxmsg * (sizeof(struct msg_msg *) 389 mq_treesize = info->attr.mq_maxmsg * sizeof(struct msg_msg) +
269 + info->attr.mq_msgsize); 390 min_t(unsigned int, info->attr.mq_maxmsg, MQ_PRIO_MAX) *
391 sizeof(struct posix_msg_tree_node);
392
393 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
394 info->attr.mq_msgsize);
395
270 user = info->user; 396 user = info->user;
271 if (user) { 397 if (user) {
272 spin_lock(&mq_lock); 398 spin_lock(&mq_lock);
@@ -300,8 +426,9 @@ static int mqueue_create(struct inode *dir, struct dentry *dentry,
300 error = -EACCES; 426 error = -EACCES;
301 goto out_unlock; 427 goto out_unlock;
302 } 428 }
303 if (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max && 429 if (ipc_ns->mq_queues_count >= HARD_QUEUESMAX ||
304 !capable(CAP_SYS_RESOURCE)) { 430 (ipc_ns->mq_queues_count >= ipc_ns->mq_queues_max &&
431 !capable(CAP_SYS_RESOURCE))) {
305 error = -ENOSPC; 432 error = -ENOSPC;
306 goto out_unlock; 433 goto out_unlock;
307 } 434 }
@@ -485,26 +612,6 @@ static struct ext_wait_queue *wq_get_first_waiter(
485 return list_entry(ptr, struct ext_wait_queue, list); 612 return list_entry(ptr, struct ext_wait_queue, list);
486} 613}
487 614
488/* Auxiliary functions to manipulate messages' list */
489static void msg_insert(struct msg_msg *ptr, struct mqueue_inode_info *info)
490{
491 int k;
492
493 k = info->attr.mq_curmsgs - 1;
494 while (k >= 0 && info->messages[k]->m_type >= ptr->m_type) {
495 info->messages[k + 1] = info->messages[k];
496 k--;
497 }
498 info->attr.mq_curmsgs++;
499 info->qsize += ptr->m_ts;
500 info->messages[k + 1] = ptr;
501}
502
503static inline struct msg_msg *msg_get(struct mqueue_inode_info *info)
504{
505 info->qsize -= info->messages[--info->attr.mq_curmsgs]->m_ts;
506 return info->messages[info->attr.mq_curmsgs];
507}
508 615
509static inline void set_cookie(struct sk_buff *skb, char code) 616static inline void set_cookie(struct sk_buff *skb, char code)
510{ 617{
@@ -585,24 +692,30 @@ static void remove_notification(struct mqueue_inode_info *info)
585 692
586static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr) 693static int mq_attr_ok(struct ipc_namespace *ipc_ns, struct mq_attr *attr)
587{ 694{
695 int mq_treesize;
696 unsigned long total_size;
697
588 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0) 698 if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
589 return 0; 699 return -EINVAL;
590 if (capable(CAP_SYS_RESOURCE)) { 700 if (capable(CAP_SYS_RESOURCE)) {
591 if (attr->mq_maxmsg > HARD_MSGMAX) 701 if (attr->mq_maxmsg > HARD_MSGMAX ||
592 return 0; 702 attr->mq_msgsize > HARD_MSGSIZEMAX)
703 return -EINVAL;
593 } else { 704 } else {
594 if (attr->mq_maxmsg > ipc_ns->mq_msg_max || 705 if (attr->mq_maxmsg > ipc_ns->mq_msg_max ||
595 attr->mq_msgsize > ipc_ns->mq_msgsize_max) 706 attr->mq_msgsize > ipc_ns->mq_msgsize_max)
596 return 0; 707 return -EINVAL;
597 } 708 }
598 /* check for overflow */ 709 /* check for overflow */
599 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg) 710 if (attr->mq_msgsize > ULONG_MAX/attr->mq_maxmsg)
600 return 0; 711 return -EOVERFLOW;
601 if ((unsigned long)(attr->mq_maxmsg * (attr->mq_msgsize 712 mq_treesize = attr->mq_maxmsg * sizeof(struct msg_msg) +
602 + sizeof (struct msg_msg *))) < 713 min_t(unsigned int, attr->mq_maxmsg, MQ_PRIO_MAX) *
603 (unsigned long)(attr->mq_maxmsg * attr->mq_msgsize)) 714 sizeof(struct posix_msg_tree_node);
604 return 0; 715 total_size = attr->mq_maxmsg * attr->mq_msgsize;
605 return 1; 716 if (total_size + mq_treesize < total_size)
717 return -EOVERFLOW;
718 return 0;
606} 719}
607 720
608/* 721/*
@@ -617,12 +730,21 @@ static struct file *do_create(struct ipc_namespace *ipc_ns, struct dentry *dir,
617 int ret; 730 int ret;
618 731
619 if (attr) { 732 if (attr) {
620 if (!mq_attr_ok(ipc_ns, attr)) { 733 ret = mq_attr_ok(ipc_ns, attr);
621 ret = -EINVAL; 734 if (ret)
622 goto out; 735 goto out;
623 }
624 /* store for use during create */ 736 /* store for use during create */
625 dentry->d_fsdata = attr; 737 dentry->d_fsdata = attr;
738 } else {
739 struct mq_attr def_attr;
740
741 def_attr.mq_maxmsg = min(ipc_ns->mq_msg_max,
742 ipc_ns->mq_msg_default);
743 def_attr.mq_msgsize = min(ipc_ns->mq_msgsize_max,
744 ipc_ns->mq_msgsize_default);
745 ret = mq_attr_ok(ipc_ns, &def_attr);
746 if (ret)
747 goto out;
626 } 748 }
627 749
628 mode &= ~current_umask(); 750 mode &= ~current_umask();
@@ -837,7 +959,8 @@ static inline void pipelined_receive(struct mqueue_inode_info *info)
837 wake_up_interruptible(&info->wait_q); 959 wake_up_interruptible(&info->wait_q);
838 return; 960 return;
839 } 961 }
840 msg_insert(sender->msg, info); 962 if (msg_insert(sender->msg, info))
963 return;
841 list_del(&sender->list); 964 list_del(&sender->list);
842 sender->state = STATE_PENDING; 965 sender->state = STATE_PENDING;
843 wake_up_process(sender->task); 966 wake_up_process(sender->task);
@@ -857,7 +980,8 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
857 struct mqueue_inode_info *info; 980 struct mqueue_inode_info *info;
858 ktime_t expires, *timeout = NULL; 981 ktime_t expires, *timeout = NULL;
859 struct timespec ts; 982 struct timespec ts;
860 int ret; 983 struct posix_msg_tree_node *new_leaf = NULL;
984 int ret = 0;
861 985
862 if (u_abs_timeout) { 986 if (u_abs_timeout) {
863 int res = prepare_timeout(u_abs_timeout, &expires, &ts); 987 int res = prepare_timeout(u_abs_timeout, &expires, &ts);
@@ -905,34 +1029,60 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
905 msg_ptr->m_ts = msg_len; 1029 msg_ptr->m_ts = msg_len;
906 msg_ptr->m_type = msg_prio; 1030 msg_ptr->m_type = msg_prio;
907 1031
1032 /*
1033 * msg_insert really wants us to have a valid, spare node struct so
1034 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1035 * fall back to that if necessary.
1036 */
1037 if (!info->node_cache)
1038 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1039
908 spin_lock(&info->lock); 1040 spin_lock(&info->lock);
909 1041
1042 if (!info->node_cache && new_leaf) {
1043 /* Save our speculative allocation into the cache */
1044 rb_init_node(&new_leaf->rb_node);
1045 INIT_LIST_HEAD(&new_leaf->msg_list);
1046 info->node_cache = new_leaf;
1047 info->qsize += sizeof(*new_leaf);
1048 new_leaf = NULL;
1049 } else {
1050 kfree(new_leaf);
1051 }
1052
910 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) { 1053 if (info->attr.mq_curmsgs == info->attr.mq_maxmsg) {
911 if (filp->f_flags & O_NONBLOCK) { 1054 if (filp->f_flags & O_NONBLOCK) {
912 spin_unlock(&info->lock);
913 ret = -EAGAIN; 1055 ret = -EAGAIN;
914 } else { 1056 } else {
915 wait.task = current; 1057 wait.task = current;
916 wait.msg = (void *) msg_ptr; 1058 wait.msg = (void *) msg_ptr;
917 wait.state = STATE_NONE; 1059 wait.state = STATE_NONE;
918 ret = wq_sleep(info, SEND, timeout, &wait); 1060 ret = wq_sleep(info, SEND, timeout, &wait);
1061 /*
1062 * wq_sleep must be called with info->lock held, and
1063 * returns with the lock released
1064 */
1065 goto out_free;
919 } 1066 }
920 if (ret < 0)
921 free_msg(msg_ptr);
922 } else { 1067 } else {
923 receiver = wq_get_first_waiter(info, RECV); 1068 receiver = wq_get_first_waiter(info, RECV);
924 if (receiver) { 1069 if (receiver) {
925 pipelined_send(info, msg_ptr, receiver); 1070 pipelined_send(info, msg_ptr, receiver);
926 } else { 1071 } else {
927 /* adds message to the queue */ 1072 /* adds message to the queue */
928 msg_insert(msg_ptr, info); 1073 ret = msg_insert(msg_ptr, info);
1074 if (ret)
1075 goto out_unlock;
929 __do_notify(info); 1076 __do_notify(info);
930 } 1077 }
931 inode->i_atime = inode->i_mtime = inode->i_ctime = 1078 inode->i_atime = inode->i_mtime = inode->i_ctime =
932 CURRENT_TIME; 1079 CURRENT_TIME;
933 spin_unlock(&info->lock);
934 ret = 0;
935 } 1080 }
1081out_unlock:
1082 spin_unlock(&info->lock);
1083out_free:
1084 if (ret)
1085 free_msg(msg_ptr);
936out_fput: 1086out_fput:
937 fput(filp); 1087 fput(filp);
938out: 1088out:
@@ -951,6 +1101,7 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
951 struct ext_wait_queue wait; 1101 struct ext_wait_queue wait;
952 ktime_t expires, *timeout = NULL; 1102 ktime_t expires, *timeout = NULL;
953 struct timespec ts; 1103 struct timespec ts;
1104 struct posix_msg_tree_node *new_leaf = NULL;
954 1105
955 if (u_abs_timeout) { 1106 if (u_abs_timeout) {
956 int res = prepare_timeout(u_abs_timeout, &expires, &ts); 1107 int res = prepare_timeout(u_abs_timeout, &expires, &ts);
@@ -986,7 +1137,26 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
986 goto out_fput; 1137 goto out_fput;
987 } 1138 }
988 1139
1140 /*
1141 * msg_insert really wants us to have a valid, spare node struct so
1142 * it doesn't have to kmalloc a GFP_ATOMIC allocation, but it will
1143 * fall back to that if necessary.
1144 */
1145 if (!info->node_cache)
1146 new_leaf = kmalloc(sizeof(*new_leaf), GFP_KERNEL);
1147
989 spin_lock(&info->lock); 1148 spin_lock(&info->lock);
1149
1150 if (!info->node_cache && new_leaf) {
1151 /* Save our speculative allocation into the cache */
1152 rb_init_node(&new_leaf->rb_node);
1153 INIT_LIST_HEAD(&new_leaf->msg_list);
1154 info->node_cache = new_leaf;
1155 info->qsize += sizeof(*new_leaf);
1156 } else {
1157 kfree(new_leaf);
1158 }
1159
990 if (info->attr.mq_curmsgs == 0) { 1160 if (info->attr.mq_curmsgs == 0) {
991 if (filp->f_flags & O_NONBLOCK) { 1161 if (filp->f_flags & O_NONBLOCK) {
992 spin_unlock(&info->lock); 1162 spin_unlock(&info->lock);
@@ -1251,6 +1421,8 @@ int mq_init_ns(struct ipc_namespace *ns)
1251 ns->mq_queues_max = DFLT_QUEUESMAX; 1421 ns->mq_queues_max = DFLT_QUEUESMAX;
1252 ns->mq_msg_max = DFLT_MSGMAX; 1422 ns->mq_msg_max = DFLT_MSGMAX;
1253 ns->mq_msgsize_max = DFLT_MSGSIZEMAX; 1423 ns->mq_msgsize_max = DFLT_MSGSIZEMAX;
1424 ns->mq_msg_default = DFLT_MSG;
1425 ns->mq_msgsize_default = DFLT_MSGSIZE;
1254 1426
1255 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns); 1427 ns->mq_mnt = kern_mount_data(&mqueue_fs_type, ns);
1256 if (IS_ERR(ns->mq_mnt)) { 1428 if (IS_ERR(ns->mq_mnt)) {
diff --git a/ipc/shm.c b/ipc/shm.c
index 406c5b208193..41c1285d697a 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -393,6 +393,16 @@ static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync)
393 return sfd->file->f_op->fsync(sfd->file, start, end, datasync); 393 return sfd->file->f_op->fsync(sfd->file, start, end, datasync);
394} 394}
395 395
396static long shm_fallocate(struct file *file, int mode, loff_t offset,
397 loff_t len)
398{
399 struct shm_file_data *sfd = shm_file_data(file);
400
401 if (!sfd->file->f_op->fallocate)
402 return -EOPNOTSUPP;
403 return sfd->file->f_op->fallocate(file, mode, offset, len);
404}
405
396static unsigned long shm_get_unmapped_area(struct file *file, 406static unsigned long shm_get_unmapped_area(struct file *file,
397 unsigned long addr, unsigned long len, unsigned long pgoff, 407 unsigned long addr, unsigned long len, unsigned long pgoff,
398 unsigned long flags) 408 unsigned long flags)
@@ -410,6 +420,7 @@ static const struct file_operations shm_file_operations = {
410 .get_unmapped_area = shm_get_unmapped_area, 420 .get_unmapped_area = shm_get_unmapped_area,
411#endif 421#endif
412 .llseek = noop_llseek, 422 .llseek = noop_llseek,
423 .fallocate = shm_fallocate,
413}; 424};
414 425
415static const struct file_operations shm_file_operations_huge = { 426static const struct file_operations shm_file_operations_huge = {
@@ -418,6 +429,7 @@ static const struct file_operations shm_file_operations_huge = {
418 .release = shm_release, 429 .release = shm_release,
419 .get_unmapped_area = shm_get_unmapped_area, 430 .get_unmapped_area = shm_get_unmapped_area,
420 .llseek = noop_llseek, 431 .llseek = noop_llseek,
432 .fallocate = shm_fallocate,
421}; 433};
422 434
423int is_file_shm_hugepages(struct file *file) 435int is_file_shm_hugepages(struct file *file)
@@ -1036,6 +1048,10 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
1036 sfd->file = shp->shm_file; 1048 sfd->file = shp->shm_file;
1037 sfd->vm_ops = NULL; 1049 sfd->vm_ops = NULL;
1038 1050
1051 err = security_mmap_file(file, prot, flags);
1052 if (err)
1053 goto out_fput;
1054
1039 down_write(&current->mm->mmap_sem); 1055 down_write(&current->mm->mmap_sem);
1040 if (addr && !(shmflg & SHM_REMAP)) { 1056 if (addr && !(shmflg & SHM_REMAP)) {
1041 err = -EINVAL; 1057 err = -EINVAL;
@@ -1050,7 +1066,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
1050 goto invalid; 1066 goto invalid;
1051 } 1067 }
1052 1068
1053 user_addr = do_mmap (file, addr, size, prot, flags, 0); 1069 user_addr = do_mmap_pgoff(file, addr, size, prot, flags, 0);
1054 *raddr = user_addr; 1070 *raddr = user_addr;
1055 err = 0; 1071 err = 0;
1056 if (IS_ERR_VALUE(user_addr)) 1072 if (IS_ERR_VALUE(user_addr))
@@ -1058,6 +1074,7 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr)
1058invalid: 1074invalid:
1059 up_write(&current->mm->mmap_sem); 1075 up_write(&current->mm->mmap_sem);
1060 1076
1077out_fput:
1061 fput(file); 1078 fput(file);
1062 1079
1063out_nattch: 1080out_nattch:
diff --git a/kernel/Makefile b/kernel/Makefile
index 6c07f30fa9b7..c0cc67ad764c 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -5,12 +5,12 @@
5obj-y = fork.o exec_domain.o panic.o printk.o \ 5obj-y = fork.o exec_domain.o panic.o printk.o \
6 cpu.o exit.o itimer.o time.o softirq.o resource.o \ 6 cpu.o exit.o itimer.o time.o softirq.o resource.o \
7 sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ 7 sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \
8 signal.o sys.o kmod.o workqueue.o pid.o \ 8 signal.o sys.o kmod.o workqueue.o pid.o task_work.o \
9 rcupdate.o extable.o params.o posix-timers.o \ 9 rcupdate.o extable.o params.o posix-timers.o \
10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ 10 kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \
11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ 11 hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \
12 notifier.o ksysfs.o cred.o \ 12 notifier.o ksysfs.o cred.o \
13 async.o range.o groups.o 13 async.o range.o groups.o lglock.o
14 14
15ifdef CONFIG_FUNCTION_TRACER 15ifdef CONFIG_FUNCTION_TRACER
16# Do not trace debug files and internal ftrace files 16# Do not trace debug files and internal ftrace files
@@ -25,6 +25,9 @@ endif
25obj-y += sched/ 25obj-y += sched/
26obj-y += power/ 26obj-y += power/
27 27
28ifeq ($(CONFIG_CHECKPOINT_RESTORE),y)
29obj-$(CONFIG_X86) += kcmp.o
30endif
28obj-$(CONFIG_FREEZER) += freezer.o 31obj-$(CONFIG_FREEZER) += freezer.o
29obj-$(CONFIG_PROFILING) += profile.o 32obj-$(CONFIG_PROFILING) += profile.o
30obj-$(CONFIG_STACKTRACE) += stacktrace.o 33obj-$(CONFIG_STACKTRACE) += stacktrace.o
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 0f3527d6184a..2097684cf194 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -255,12 +255,17 @@ int cgroup_lock_is_held(void)
255 255
256EXPORT_SYMBOL_GPL(cgroup_lock_is_held); 256EXPORT_SYMBOL_GPL(cgroup_lock_is_held);
257 257
258static int css_unbias_refcnt(int refcnt)
259{
260 return refcnt >= 0 ? refcnt : refcnt - CSS_DEACT_BIAS;
261}
262
258/* the current nr of refs, always >= 0 whether @css is deactivated or not */ 263/* the current nr of refs, always >= 0 whether @css is deactivated or not */
259static int css_refcnt(struct cgroup_subsys_state *css) 264static int css_refcnt(struct cgroup_subsys_state *css)
260{ 265{
261 int v = atomic_read(&css->refcnt); 266 int v = atomic_read(&css->refcnt);
262 267
263 return v >= 0 ? v : v - CSS_DEACT_BIAS; 268 return css_unbias_refcnt(v);
264} 269}
265 270
266/* convenient tests for these bits */ 271/* convenient tests for these bits */
@@ -896,10 +901,13 @@ static void cgroup_diput(struct dentry *dentry, struct inode *inode)
896 mutex_unlock(&cgroup_mutex); 901 mutex_unlock(&cgroup_mutex);
897 902
898 /* 903 /*
899 * Drop the active superblock reference that we took when we 904 * We want to drop the active superblock reference from the
900 * created the cgroup 905 * cgroup creation after all the dentry refs are gone -
906 * kill_sb gets mighty unhappy otherwise. Mark
907 * dentry->d_fsdata with cgroup_diput() to tell
908 * cgroup_d_release() to call deactivate_super().
901 */ 909 */
902 deactivate_super(cgrp->root->sb); 910 dentry->d_fsdata = cgroup_diput;
903 911
904 /* 912 /*
905 * if we're getting rid of the cgroup, refcount should ensure 913 * if we're getting rid of the cgroup, refcount should ensure
@@ -925,6 +933,13 @@ static int cgroup_delete(const struct dentry *d)
925 return 1; 933 return 1;
926} 934}
927 935
936static void cgroup_d_release(struct dentry *dentry)
937{
938 /* did cgroup_diput() tell me to deactivate super? */
939 if (dentry->d_fsdata == cgroup_diput)
940 deactivate_super(dentry->d_sb);
941}
942
928static void remove_dir(struct dentry *d) 943static void remove_dir(struct dentry *d)
929{ 944{
930 struct dentry *parent = dget(d->d_parent); 945 struct dentry *parent = dget(d->d_parent);
@@ -1532,6 +1547,7 @@ static int cgroup_get_rootdir(struct super_block *sb)
1532 static const struct dentry_operations cgroup_dops = { 1547 static const struct dentry_operations cgroup_dops = {
1533 .d_iput = cgroup_diput, 1548 .d_iput = cgroup_diput,
1534 .d_delete = cgroup_delete, 1549 .d_delete = cgroup_delete,
1550 .d_release = cgroup_d_release,
1535 }; 1551 };
1536 1552
1537 struct inode *inode = 1553 struct inode *inode =
@@ -4971,10 +4987,12 @@ EXPORT_SYMBOL_GPL(__css_tryget);
4971void __css_put(struct cgroup_subsys_state *css) 4987void __css_put(struct cgroup_subsys_state *css)
4972{ 4988{
4973 struct cgroup *cgrp = css->cgroup; 4989 struct cgroup *cgrp = css->cgroup;
4990 int v;
4974 4991
4975 rcu_read_lock(); 4992 rcu_read_lock();
4976 atomic_dec(&css->refcnt); 4993 v = css_unbias_refcnt(atomic_dec_return(&css->refcnt));
4977 switch (css_refcnt(css)) { 4994
4995 switch (v) {
4978 case 1: 4996 case 1:
4979 if (notify_on_release(cgrp)) { 4997 if (notify_on_release(cgrp)) {
4980 set_bit(CGRP_RELEASABLE, &cgrp->flags); 4998 set_bit(CGRP_RELEASABLE, &cgrp->flags);
diff --git a/kernel/cpu.c b/kernel/cpu.c
index 0e6353cf147a..a4eb5227a19e 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -10,7 +10,10 @@
10#include <linux/sched.h> 10#include <linux/sched.h>
11#include <linux/unistd.h> 11#include <linux/unistd.h>
12#include <linux/cpu.h> 12#include <linux/cpu.h>
13#include <linux/oom.h>
14#include <linux/rcupdate.h>
13#include <linux/export.h> 15#include <linux/export.h>
16#include <linux/bug.h>
14#include <linux/kthread.h> 17#include <linux/kthread.h>
15#include <linux/stop_machine.h> 18#include <linux/stop_machine.h>
16#include <linux/mutex.h> 19#include <linux/mutex.h>
@@ -173,6 +176,47 @@ void __ref unregister_cpu_notifier(struct notifier_block *nb)
173} 176}
174EXPORT_SYMBOL(unregister_cpu_notifier); 177EXPORT_SYMBOL(unregister_cpu_notifier);
175 178
179/**
180 * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
181 * @cpu: a CPU id
182 *
183 * This function walks all processes, finds a valid mm struct for each one and
184 * then clears a corresponding bit in mm's cpumask. While this all sounds
185 * trivial, there are various non-obvious corner cases, which this function
186 * tries to solve in a safe manner.
187 *
188 * Also note that the function uses a somewhat relaxed locking scheme, so it may
189 * be called only for an already offlined CPU.
190 */
191void clear_tasks_mm_cpumask(int cpu)
192{
193 struct task_struct *p;
194
195 /*
196 * This function is called after the cpu is taken down and marked
197 * offline, so its not like new tasks will ever get this cpu set in
198 * their mm mask. -- Peter Zijlstra
199 * Thus, we may use rcu_read_lock() here, instead of grabbing
200 * full-fledged tasklist_lock.
201 */
202 WARN_ON(cpu_online(cpu));
203 rcu_read_lock();
204 for_each_process(p) {
205 struct task_struct *t;
206
207 /*
208 * Main thread might exit, but other threads may still have
209 * a valid mm. Find one.
210 */
211 t = find_lock_task_mm(p);
212 if (!t)
213 continue;
214 cpumask_clear_cpu(cpu, mm_cpumask(t->mm));
215 task_unlock(t);
216 }
217 rcu_read_unlock();
218}
219
176static inline void check_for_tasks(int cpu) 220static inline void check_for_tasks(int cpu)
177{ 221{
178 struct task_struct *p; 222 struct task_struct *p;
diff --git a/kernel/cpu_pm.c b/kernel/cpu_pm.c
index 249152e15308..9656a3c36503 100644
--- a/kernel/cpu_pm.c
+++ b/kernel/cpu_pm.c
@@ -81,7 +81,7 @@ int cpu_pm_unregister_notifier(struct notifier_block *nb)
81EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier); 81EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
82 82
83/** 83/**
84 * cpm_pm_enter - CPU low power entry notifier 84 * cpu_pm_enter - CPU low power entry notifier
85 * 85 *
86 * Notifies listeners that a single CPU is entering a low power state that may 86 * Notifies listeners that a single CPU is entering a low power state that may
87 * cause some blocks in the same power domain as the cpu to reset. 87 * cause some blocks in the same power domain as the cpu to reset.
@@ -89,7 +89,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_unregister_notifier);
89 * Must be called on the affected CPU with interrupts disabled. Platform is 89 * Must be called on the affected CPU with interrupts disabled. Platform is
90 * responsible for ensuring that cpu_pm_enter is not called twice on the same 90 * responsible for ensuring that cpu_pm_enter is not called twice on the same
91 * CPU before cpu_pm_exit is called. Notified drivers can include VFP 91 * CPU before cpu_pm_exit is called. Notified drivers can include VFP
92 * co-processor, interrupt controller and it's PM extensions, local CPU 92 * co-processor, interrupt controller and its PM extensions, local CPU
93 * timers context save/restore which shouldn't be interrupted. Hence it 93 * timers context save/restore which shouldn't be interrupted. Hence it
94 * must be called with interrupts disabled. 94 * must be called with interrupts disabled.
95 * 95 *
@@ -115,13 +115,13 @@ int cpu_pm_enter(void)
115EXPORT_SYMBOL_GPL(cpu_pm_enter); 115EXPORT_SYMBOL_GPL(cpu_pm_enter);
116 116
117/** 117/**
118 * cpm_pm_exit - CPU low power exit notifier 118 * cpu_pm_exit - CPU low power exit notifier
119 * 119 *
120 * Notifies listeners that a single CPU is exiting a low power state that may 120 * Notifies listeners that a single CPU is exiting a low power state that may
121 * have caused some blocks in the same power domain as the cpu to reset. 121 * have caused some blocks in the same power domain as the cpu to reset.
122 * 122 *
123 * Notified drivers can include VFP co-processor, interrupt controller 123 * Notified drivers can include VFP co-processor, interrupt controller
124 * and it's PM extensions, local CPU timers context save/restore which 124 * and its PM extensions, local CPU timers context save/restore which
125 * shouldn't be interrupted. Hence it must be called with interrupts disabled. 125 * shouldn't be interrupted. Hence it must be called with interrupts disabled.
126 * 126 *
127 * Return conditions are same as __raw_notifier_call_chain. 127 * Return conditions are same as __raw_notifier_call_chain.
@@ -139,7 +139,7 @@ int cpu_pm_exit(void)
139EXPORT_SYMBOL_GPL(cpu_pm_exit); 139EXPORT_SYMBOL_GPL(cpu_pm_exit);
140 140
141/** 141/**
142 * cpm_cluster_pm_enter - CPU cluster low power entry notifier 142 * cpu_cluster_pm_enter - CPU cluster low power entry notifier
143 * 143 *
144 * Notifies listeners that all cpus in a power domain are entering a low power 144 * Notifies listeners that all cpus in a power domain are entering a low power
145 * state that may cause some blocks in the same power domain to reset. 145 * state that may cause some blocks in the same power domain to reset.
@@ -147,7 +147,7 @@ EXPORT_SYMBOL_GPL(cpu_pm_exit);
147 * Must be called after cpu_pm_enter has been called on all cpus in the power 147 * Must be called after cpu_pm_enter has been called on all cpus in the power
148 * domain, and before cpu_pm_exit has been called on any cpu in the power 148 * domain, and before cpu_pm_exit has been called on any cpu in the power
149 * domain. Notified drivers can include VFP co-processor, interrupt controller 149 * domain. Notified drivers can include VFP co-processor, interrupt controller
150 * and it's PM extensions, local CPU timers context save/restore which 150 * and its PM extensions, local CPU timers context save/restore which
151 * shouldn't be interrupted. Hence it must be called with interrupts disabled. 151 * shouldn't be interrupted. Hence it must be called with interrupts disabled.
152 * 152 *
153 * Must be called with interrupts disabled. 153 * Must be called with interrupts disabled.
@@ -174,7 +174,7 @@ int cpu_cluster_pm_enter(void)
174EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter); 174EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
175 175
176/** 176/**
177 * cpm_cluster_pm_exit - CPU cluster low power exit notifier 177 * cpu_cluster_pm_exit - CPU cluster low power exit notifier
178 * 178 *
179 * Notifies listeners that all cpus in a power domain are exiting form a 179 * Notifies listeners that all cpus in a power domain are exiting form a
180 * low power state that may have caused some blocks in the same power domain 180 * low power state that may have caused some blocks in the same power domain
@@ -183,7 +183,7 @@ EXPORT_SYMBOL_GPL(cpu_cluster_pm_enter);
183 * Must be called after cpu_pm_exit has been called on all cpus in the power 183 * Must be called after cpu_pm_exit has been called on all cpus in the power
184 * domain, and before cpu_pm_exit has been called on any cpu in the power 184 * domain, and before cpu_pm_exit has been called on any cpu in the power
185 * domain. Notified drivers can include VFP co-processor, interrupt controller 185 * domain. Notified drivers can include VFP co-processor, interrupt controller
186 * and it's PM extensions, local CPU timers context save/restore which 186 * and its PM extensions, local CPU timers context save/restore which
187 * shouldn't be interrupted. Hence it must be called with interrupts disabled. 187 * shouldn't be interrupted. Hence it must be called with interrupts disabled.
188 * 188 *
189 * Return conditions are same as __raw_notifier_call_chain. 189 * Return conditions are same as __raw_notifier_call_chain.
diff --git a/kernel/cred.c b/kernel/cred.c
index 430557ea488f..de728ac50d82 100644
--- a/kernel/cred.c
+++ b/kernel/cred.c
@@ -207,13 +207,6 @@ void exit_creds(struct task_struct *tsk)
207 validate_creds(cred); 207 validate_creds(cred);
208 alter_cred_subscribers(cred, -1); 208 alter_cred_subscribers(cred, -1);
209 put_cred(cred); 209 put_cred(cred);
210
211 cred = (struct cred *) tsk->replacement_session_keyring;
212 if (cred) {
213 tsk->replacement_session_keyring = NULL;
214 validate_creds(cred);
215 put_cred(cred);
216 }
217} 210}
218 211
219/** 212/**
@@ -396,8 +389,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags)
396 struct cred *new; 389 struct cred *new;
397 int ret; 390 int ret;
398 391
399 p->replacement_session_keyring = NULL;
400
401 if ( 392 if (
402#ifdef CONFIG_KEYS 393#ifdef CONFIG_KEYS
403 !p->cred->thread_keyring && 394 !p->cred->thread_keyring &&
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5b06cbbf6931..d7d71d6ec972 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -253,9 +253,9 @@ perf_cgroup_match(struct perf_event *event)
253 return !event->cgrp || event->cgrp == cpuctx->cgrp; 253 return !event->cgrp || event->cgrp == cpuctx->cgrp;
254} 254}
255 255
256static inline void perf_get_cgroup(struct perf_event *event) 256static inline bool perf_tryget_cgroup(struct perf_event *event)
257{ 257{
258 css_get(&event->cgrp->css); 258 return css_tryget(&event->cgrp->css);
259} 259}
260 260
261static inline void perf_put_cgroup(struct perf_event *event) 261static inline void perf_put_cgroup(struct perf_event *event)
@@ -484,7 +484,11 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event,
484 event->cgrp = cgrp; 484 event->cgrp = cgrp;
485 485
486 /* must be done before we fput() the file */ 486 /* must be done before we fput() the file */
487 perf_get_cgroup(event); 487 if (!perf_tryget_cgroup(event)) {
488 event->cgrp = NULL;
489 ret = -ENOENT;
490 goto out;
491 }
488 492
489 /* 493 /*
490 * all events in a group must monitor 494 * all events in a group must monitor
@@ -3181,7 +3185,6 @@ static void perf_event_for_each(struct perf_event *event,
3181 event = event->group_leader; 3185 event = event->group_leader;
3182 3186
3183 perf_event_for_each_child(event, func); 3187 perf_event_for_each_child(event, func);
3184 func(event);
3185 list_for_each_entry(sibling, &event->sibling_list, group_entry) 3188 list_for_each_entry(sibling, &event->sibling_list, group_entry)
3186 perf_event_for_each_child(sibling, func); 3189 perf_event_for_each_child(sibling, func);
3187 mutex_unlock(&ctx->mutex); 3190 mutex_unlock(&ctx->mutex);
diff --git a/kernel/exit.c b/kernel/exit.c
index 910a0716e17a..2f59cc334516 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -72,6 +72,18 @@ static void __unhash_process(struct task_struct *p, bool group_dead)
72 list_del_rcu(&p->tasks); 72 list_del_rcu(&p->tasks);
73 list_del_init(&p->sibling); 73 list_del_init(&p->sibling);
74 __this_cpu_dec(process_counts); 74 __this_cpu_dec(process_counts);
75 /*
76 * If we are the last child process in a pid namespace to be
77 * reaped, notify the reaper sleeping zap_pid_ns_processes().
78 */
79 if (IS_ENABLED(CONFIG_PID_NS)) {
80 struct task_struct *parent = p->real_parent;
81
82 if ((task_active_pid_ns(parent)->child_reaper == parent) &&
83 list_empty(&parent->children) &&
84 (parent->flags & PF_EXITING))
85 wake_up_process(parent);
86 }
75 } 87 }
76 list_del_rcu(&p->thread_group); 88 list_del_rcu(&p->thread_group);
77} 89}
@@ -643,6 +655,7 @@ static void exit_mm(struct task_struct * tsk)
643 mm_release(tsk, mm); 655 mm_release(tsk, mm);
644 if (!mm) 656 if (!mm)
645 return; 657 return;
658 sync_mm_rss(mm);
646 /* 659 /*
647 * Serialize with any possible pending coredump. 660 * Serialize with any possible pending coredump.
648 * We must hold mmap_sem around checking core_state 661 * We must hold mmap_sem around checking core_state
@@ -719,12 +732,6 @@ static struct task_struct *find_new_reaper(struct task_struct *father)
719 732
720 zap_pid_ns_processes(pid_ns); 733 zap_pid_ns_processes(pid_ns);
721 write_lock_irq(&tasklist_lock); 734 write_lock_irq(&tasklist_lock);
722 /*
723 * We can not clear ->child_reaper or leave it alone.
724 * There may by stealth EXIT_DEAD tasks on ->children,
725 * forget_original_parent() must move them somewhere.
726 */
727 pid_ns->child_reaper = init_pid_ns.child_reaper;
728 } else if (father->signal->has_child_subreaper) { 735 } else if (father->signal->has_child_subreaper) {
729 struct task_struct *reaper; 736 struct task_struct *reaper;
730 737
@@ -884,9 +891,9 @@ static void check_stack_usage(void)
884 891
885 spin_lock(&low_water_lock); 892 spin_lock(&low_water_lock);
886 if (free < lowest_to_date) { 893 if (free < lowest_to_date) {
887 printk(KERN_WARNING "%s used greatest stack depth: %lu bytes " 894 printk(KERN_WARNING "%s (%d) used greatest stack depth: "
888 "left\n", 895 "%lu bytes left\n",
889 current->comm, free); 896 current->comm, task_pid_nr(current), free);
890 lowest_to_date = free; 897 lowest_to_date = free;
891 } 898 }
892 spin_unlock(&low_water_lock); 899 spin_unlock(&low_water_lock);
@@ -946,12 +953,13 @@ void do_exit(long code)
946 exit_signals(tsk); /* sets PF_EXITING */ 953 exit_signals(tsk); /* sets PF_EXITING */
947 /* 954 /*
948 * tsk->flags are checked in the futex code to protect against 955 * tsk->flags are checked in the futex code to protect against
949 * an exiting task cleaning up the robust pi futexes. 956 * an exiting task cleaning up the robust pi futexes, and in
957 * task_work_add() to avoid the race with exit_task_work().
950 */ 958 */
951 smp_mb(); 959 smp_mb();
952 raw_spin_unlock_wait(&tsk->pi_lock); 960 raw_spin_unlock_wait(&tsk->pi_lock);
953 961
954 exit_irq_thread(); 962 exit_task_work(tsk);
955 963
956 if (unlikely(in_atomic())) 964 if (unlikely(in_atomic()))
957 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 965 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
@@ -1214,7 +1222,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1214 unsigned long state; 1222 unsigned long state;
1215 int retval, status, traced; 1223 int retval, status, traced;
1216 pid_t pid = task_pid_vnr(p); 1224 pid_t pid = task_pid_vnr(p);
1217 uid_t uid = from_kuid_munged(current_user_ns(), __task_cred(p)->uid); 1225 uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
1218 struct siginfo __user *infop; 1226 struct siginfo __user *infop;
1219 1227
1220 if (!likely(wo->wo_flags & WEXITED)) 1228 if (!likely(wo->wo_flags & WEXITED))
diff --git a/kernel/fork.c b/kernel/fork.c
index 31a32c7dd169..ab5211b9e622 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -787,9 +787,6 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
787 /* Get rid of any cached register state */ 787 /* Get rid of any cached register state */
788 deactivate_mm(tsk, mm); 788 deactivate_mm(tsk, mm);
789 789
790 if (tsk->vfork_done)
791 complete_vfork_done(tsk);
792
793 /* 790 /*
794 * If we're exiting normally, clear a user-space tid field if 791 * If we're exiting normally, clear a user-space tid field if
795 * requested. We leave this alone when dying by signal, to leave 792 * requested. We leave this alone when dying by signal, to leave
@@ -810,6 +807,13 @@ void mm_release(struct task_struct *tsk, struct mm_struct *mm)
810 } 807 }
811 tsk->clear_child_tid = NULL; 808 tsk->clear_child_tid = NULL;
812 } 809 }
810
811 /*
812 * All done, finally we can wake up parent and return this mm to him.
813 * Also kthread_stop() uses this completion for synchronization.
814 */
815 if (tsk->vfork_done)
816 complete_vfork_done(tsk);
813} 817}
814 818
815/* 819/*
@@ -1411,6 +1415,7 @@ static struct task_struct *copy_process(unsigned long clone_flags,
1411 */ 1415 */
1412 p->group_leader = p; 1416 p->group_leader = p;
1413 INIT_LIST_HEAD(&p->thread_group); 1417 INIT_LIST_HEAD(&p->thread_group);
1418 INIT_HLIST_HEAD(&p->task_works);
1414 1419
1415 /* Now that the task is set up, run cgroup callbacks if 1420 /* Now that the task is set up, run cgroup callbacks if
1416 * necessary. We need to run them before the task is visible 1421 * necessary. We need to run them before the task is visible
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index fc275e4f629b..eebd6d5cfb44 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -275,8 +275,10 @@ void handle_nested_irq(unsigned int irq)
275 kstat_incr_irqs_this_cpu(irq, desc); 275 kstat_incr_irqs_this_cpu(irq, desc);
276 276
277 action = desc->action; 277 action = desc->action;
278 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) 278 if (unlikely(!action || irqd_irq_disabled(&desc->irq_data))) {
279 desc->istate |= IRQS_PENDING;
279 goto out_unlock; 280 goto out_unlock;
281 }
280 282
281 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS); 283 irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
282 raw_spin_unlock_irq(&desc->lock); 284 raw_spin_unlock_irq(&desc->lock);
@@ -324,8 +326,10 @@ handle_simple_irq(unsigned int irq, struct irq_desc *desc)
324 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING); 326 desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
325 kstat_incr_irqs_this_cpu(irq, desc); 327 kstat_incr_irqs_this_cpu(irq, desc);
326 328
327 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) 329 if (unlikely(!desc->action || irqd_irq_disabled(&desc->irq_data))) {
330 desc->istate |= IRQS_PENDING;
328 goto out_unlock; 331 goto out_unlock;
332 }
329 333
330 handle_irq_event(desc); 334 handle_irq_event(desc);
331 335
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h
index 8e5c56b3b7d9..001fa5bab490 100644
--- a/kernel/irq/internals.h
+++ b/kernel/irq/internals.h
@@ -101,6 +101,9 @@ extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask);
101 101
102extern void irq_set_thread_affinity(struct irq_desc *desc); 102extern void irq_set_thread_affinity(struct irq_desc *desc);
103 103
104extern int irq_do_set_affinity(struct irq_data *data,
105 const struct cpumask *dest, bool force);
106
104/* Inline functions for support of irq chips on slow busses */ 107/* Inline functions for support of irq chips on slow busses */
105static inline void chip_bus_lock(struct irq_desc *desc) 108static inline void chip_bus_lock(struct irq_desc *desc)
106{ 109{
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index bb32326afe87..8c548232ba39 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -7,6 +7,8 @@
7 * This file contains driver APIs to the irq subsystem. 7 * This file contains driver APIs to the irq subsystem.
8 */ 8 */
9 9
10#define pr_fmt(fmt) "genirq: " fmt
11
10#include <linux/irq.h> 12#include <linux/irq.h>
11#include <linux/kthread.h> 13#include <linux/kthread.h>
12#include <linux/module.h> 14#include <linux/module.h>
@@ -14,6 +16,7 @@
14#include <linux/interrupt.h> 16#include <linux/interrupt.h>
15#include <linux/slab.h> 17#include <linux/slab.h>
16#include <linux/sched.h> 18#include <linux/sched.h>
19#include <linux/task_work.h>
17 20
18#include "internals.h" 21#include "internals.h"
19 22
@@ -139,6 +142,25 @@ static inline void
139irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { } 142irq_get_pending(struct cpumask *mask, struct irq_desc *desc) { }
140#endif 143#endif
141 144
145int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask,
146 bool force)
147{
148 struct irq_desc *desc = irq_data_to_desc(data);
149 struct irq_chip *chip = irq_data_get_irq_chip(data);
150 int ret;
151
152 ret = chip->irq_set_affinity(data, mask, false);
153 switch (ret) {
154 case IRQ_SET_MASK_OK:
155 cpumask_copy(data->affinity, mask);
156 case IRQ_SET_MASK_OK_NOCOPY:
157 irq_set_thread_affinity(desc);
158 ret = 0;
159 }
160
161 return ret;
162}
163
142int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask) 164int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
143{ 165{
144 struct irq_chip *chip = irq_data_get_irq_chip(data); 166 struct irq_chip *chip = irq_data_get_irq_chip(data);
@@ -149,14 +171,7 @@ int __irq_set_affinity_locked(struct irq_data *data, const struct cpumask *mask)
149 return -EINVAL; 171 return -EINVAL;
150 172
151 if (irq_can_move_pcntxt(data)) { 173 if (irq_can_move_pcntxt(data)) {
152 ret = chip->irq_set_affinity(data, mask, false); 174 ret = irq_do_set_affinity(data, mask, false);
153 switch (ret) {
154 case IRQ_SET_MASK_OK:
155 cpumask_copy(data->affinity, mask);
156 case IRQ_SET_MASK_OK_NOCOPY:
157 irq_set_thread_affinity(desc);
158 ret = 0;
159 }
160 } else { 175 } else {
161 irqd_set_move_pending(data); 176 irqd_set_move_pending(data);
162 irq_copy_pending(desc, mask); 177 irq_copy_pending(desc, mask);
@@ -280,9 +295,8 @@ EXPORT_SYMBOL_GPL(irq_set_affinity_notifier);
280static int 295static int
281setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask) 296setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
282{ 297{
283 struct irq_chip *chip = irq_desc_get_chip(desc);
284 struct cpumask *set = irq_default_affinity; 298 struct cpumask *set = irq_default_affinity;
285 int ret, node = desc->irq_data.node; 299 int node = desc->irq_data.node;
286 300
287 /* Excludes PER_CPU and NO_BALANCE interrupts */ 301 /* Excludes PER_CPU and NO_BALANCE interrupts */
288 if (!irq_can_set_affinity(irq)) 302 if (!irq_can_set_affinity(irq))
@@ -308,13 +322,7 @@ setup_affinity(unsigned int irq, struct irq_desc *desc, struct cpumask *mask)
308 if (cpumask_intersects(mask, nodemask)) 322 if (cpumask_intersects(mask, nodemask))
309 cpumask_and(mask, mask, nodemask); 323 cpumask_and(mask, mask, nodemask);
310 } 324 }
311 ret = chip->irq_set_affinity(&desc->irq_data, mask, false); 325 irq_do_set_affinity(&desc->irq_data, mask, false);
312 switch (ret) {
313 case IRQ_SET_MASK_OK:
314 cpumask_copy(desc->irq_data.affinity, mask);
315 case IRQ_SET_MASK_OK_NOCOPY:
316 irq_set_thread_affinity(desc);
317 }
318 return 0; 326 return 0;
319} 327}
320#else 328#else
@@ -565,7 +573,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
565 * IRQF_TRIGGER_* but the PIC does not support multiple 573 * IRQF_TRIGGER_* but the PIC does not support multiple
566 * flow-types? 574 * flow-types?
567 */ 575 */
568 pr_debug("genirq: No set_type function for IRQ %d (%s)\n", irq, 576 pr_debug("No set_type function for IRQ %d (%s)\n", irq,
569 chip ? (chip->name ? : "unknown") : "unknown"); 577 chip ? (chip->name ? : "unknown") : "unknown");
570 return 0; 578 return 0;
571 } 579 }
@@ -600,7 +608,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
600 ret = 0; 608 ret = 0;
601 break; 609 break;
602 default: 610 default:
603 pr_err("genirq: Setting trigger mode %lu for irq %u failed (%pF)\n", 611 pr_err("Setting trigger mode %lu for irq %u failed (%pF)\n",
604 flags, irq, chip->irq_set_type); 612 flags, irq, chip->irq_set_type);
605 } 613 }
606 if (unmask) 614 if (unmask)
@@ -773,11 +781,39 @@ static void wake_threads_waitq(struct irq_desc *desc)
773 wake_up(&desc->wait_for_threads); 781 wake_up(&desc->wait_for_threads);
774} 782}
775 783
784static void irq_thread_dtor(struct task_work *unused)
785{
786 struct task_struct *tsk = current;
787 struct irq_desc *desc;
788 struct irqaction *action;
789
790 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
791 return;
792
793 action = kthread_data(tsk);
794
795 pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
796 tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
797
798
799 desc = irq_to_desc(action->irq);
800 /*
801 * If IRQTF_RUNTHREAD is set, we need to decrement
802 * desc->threads_active and wake possible waiters.
803 */
804 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
805 wake_threads_waitq(desc);
806
807 /* Prevent a stale desc->threads_oneshot */
808 irq_finalize_oneshot(desc, action);
809}
810
776/* 811/*
777 * Interrupt handler thread 812 * Interrupt handler thread
778 */ 813 */
779static int irq_thread(void *data) 814static int irq_thread(void *data)
780{ 815{
816 struct task_work on_exit_work;
781 static const struct sched_param param = { 817 static const struct sched_param param = {
782 .sched_priority = MAX_USER_RT_PRIO/2, 818 .sched_priority = MAX_USER_RT_PRIO/2,
783 }; 819 };
@@ -793,7 +829,9 @@ static int irq_thread(void *data)
793 handler_fn = irq_thread_fn; 829 handler_fn = irq_thread_fn;
794 830
795 sched_setscheduler(current, SCHED_FIFO, &param); 831 sched_setscheduler(current, SCHED_FIFO, &param);
796 current->irq_thread = 1; 832
833 init_task_work(&on_exit_work, irq_thread_dtor, NULL);
834 task_work_add(current, &on_exit_work, false);
797 835
798 while (!irq_wait_for_interrupt(action)) { 836 while (!irq_wait_for_interrupt(action)) {
799 irqreturn_t action_ret; 837 irqreturn_t action_ret;
@@ -815,44 +853,11 @@ static int irq_thread(void *data)
815 * cannot touch the oneshot mask at this point anymore as 853 * cannot touch the oneshot mask at this point anymore as
816 * __setup_irq() might have given out currents thread_mask 854 * __setup_irq() might have given out currents thread_mask
817 * again. 855 * again.
818 *
819 * Clear irq_thread. Otherwise exit_irq_thread() would make
820 * fuzz about an active irq thread going into nirvana.
821 */ 856 */
822 current->irq_thread = 0; 857 task_work_cancel(current, irq_thread_dtor);
823 return 0; 858 return 0;
824} 859}
825 860
826/*
827 * Called from do_exit()
828 */
829void exit_irq_thread(void)
830{
831 struct task_struct *tsk = current;
832 struct irq_desc *desc;
833 struct irqaction *action;
834
835 if (!tsk->irq_thread)
836 return;
837
838 action = kthread_data(tsk);
839
840 pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
841 tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
842
843 desc = irq_to_desc(action->irq);
844
845 /*
846 * If IRQTF_RUNTHREAD is set, we need to decrement
847 * desc->threads_active and wake possible waiters.
848 */
849 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
850 wake_threads_waitq(desc);
851
852 /* Prevent a stale desc->threads_oneshot */
853 irq_finalize_oneshot(desc, action);
854}
855
856static void irq_setup_forced_threading(struct irqaction *new) 861static void irq_setup_forced_threading(struct irqaction *new)
857{ 862{
858 if (!force_irqthreads) 863 if (!force_irqthreads)
@@ -1044,7 +1049,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1044 * has. The type flags are unreliable as the 1049 * has. The type flags are unreliable as the
1045 * underlying chip implementation can override them. 1050 * underlying chip implementation can override them.
1046 */ 1051 */
1047 pr_err("genirq: Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n", 1052 pr_err("Threaded irq requested with handler=NULL and !ONESHOT for irq %d\n",
1048 irq); 1053 irq);
1049 ret = -EINVAL; 1054 ret = -EINVAL;
1050 goto out_mask; 1055 goto out_mask;
@@ -1095,7 +1100,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1095 1100
1096 if (nmsk != omsk) 1101 if (nmsk != omsk)
1097 /* hope the handler works with current trigger mode */ 1102 /* hope the handler works with current trigger mode */
1098 pr_warning("genirq: irq %d uses trigger mode %u; requested %u\n", 1103 pr_warning("irq %d uses trigger mode %u; requested %u\n",
1099 irq, nmsk, omsk); 1104 irq, nmsk, omsk);
1100 } 1105 }
1101 1106
@@ -1133,7 +1138,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
1133 1138
1134mismatch: 1139mismatch:
1135 if (!(new->flags & IRQF_PROBE_SHARED)) { 1140 if (!(new->flags & IRQF_PROBE_SHARED)) {
1136 pr_err("genirq: Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n", 1141 pr_err("Flags mismatch irq %d. %08x (%s) vs. %08x (%s)\n",
1137 irq, new->flags, new->name, old->flags, old->name); 1142 irq, new->flags, new->name, old->flags, old->name);
1138#ifdef CONFIG_DEBUG_SHIRQ 1143#ifdef CONFIG_DEBUG_SHIRQ
1139 dump_stack(); 1144 dump_stack();
diff --git a/kernel/irq/migration.c b/kernel/irq/migration.c
index c3c89751b327..ca3f4aaff707 100644
--- a/kernel/irq/migration.c
+++ b/kernel/irq/migration.c
@@ -42,17 +42,8 @@ void irq_move_masked_irq(struct irq_data *idata)
42 * For correct operation this depends on the caller 42 * For correct operation this depends on the caller
43 * masking the irqs. 43 * masking the irqs.
44 */ 44 */
45 if (likely(cpumask_any_and(desc->pending_mask, cpu_online_mask) 45 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
46 < nr_cpu_ids)) { 46 irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
47 int ret = chip->irq_set_affinity(&desc->irq_data,
48 desc->pending_mask, false);
49 switch (ret) {
50 case IRQ_SET_MASK_OK:
51 cpumask_copy(desc->irq_data.affinity, desc->pending_mask);
52 case IRQ_SET_MASK_OK_NOCOPY:
53 irq_set_thread_affinity(desc);
54 }
55 }
56 47
57 cpumask_clear(desc->pending_mask); 48 cpumask_clear(desc->pending_mask);
58} 49}
diff --git a/kernel/kcmp.c b/kernel/kcmp.c
new file mode 100644
index 000000000000..30b7b225306c
--- /dev/null
+++ b/kernel/kcmp.c
@@ -0,0 +1,196 @@
1#include <linux/kernel.h>
2#include <linux/syscalls.h>
3#include <linux/fdtable.h>
4#include <linux/string.h>
5#include <linux/random.h>
6#include <linux/module.h>
7#include <linux/init.h>
8#include <linux/errno.h>
9#include <linux/cache.h>
10#include <linux/bug.h>
11#include <linux/err.h>
12#include <linux/kcmp.h>
13
14#include <asm/unistd.h>
15
16/*
17 * We don't expose the real in-memory order of objects for security reasons.
18 * But still the comparison results should be suitable for sorting. So we
19 * obfuscate kernel pointers values and compare the production instead.
20 *
21 * The obfuscation is done in two steps. First we xor the kernel pointer with
22 * a random value, which puts pointer into a new position in a reordered space.
23 * Secondly we multiply the xor production with a large odd random number to
24 * permute its bits even more (the odd multiplier guarantees that the product
25 * is unique ever after the high bits are truncated, since any odd number is
26 * relative prime to 2^n).
27 *
28 * Note also that the obfuscation itself is invisible to userspace and if needed
29 * it can be changed to an alternate scheme.
30 */
31static unsigned long cookies[KCMP_TYPES][2] __read_mostly;
32
33static long kptr_obfuscate(long v, int type)
34{
35 return (v ^ cookies[type][0]) * cookies[type][1];
36}
37
38/*
39 * 0 - equal, i.e. v1 = v2
40 * 1 - less than, i.e. v1 < v2
41 * 2 - greater than, i.e. v1 > v2
42 * 3 - not equal but ordering unavailable (reserved for future)
43 */
44static int kcmp_ptr(void *v1, void *v2, enum kcmp_type type)
45{
46 long ret;
47
48 ret = kptr_obfuscate((long)v1, type) - kptr_obfuscate((long)v2, type);
49
50 return (ret < 0) | ((ret > 0) << 1);
51}
52
53/* The caller must have pinned the task */
54static struct file *
55get_file_raw_ptr(struct task_struct *task, unsigned int idx)
56{
57 struct file *file = NULL;
58
59 task_lock(task);
60 rcu_read_lock();
61
62 if (task->files)
63 file = fcheck_files(task->files, idx);
64
65 rcu_read_unlock();
66 task_unlock(task);
67
68 return file;
69}
70
71static void kcmp_unlock(struct mutex *m1, struct mutex *m2)
72{
73 if (likely(m2 != m1))
74 mutex_unlock(m2);
75 mutex_unlock(m1);
76}
77
78static int kcmp_lock(struct mutex *m1, struct mutex *m2)
79{
80 int err;
81
82 if (m2 > m1)
83 swap(m1, m2);
84
85 err = mutex_lock_killable(m1);
86 if (!err && likely(m1 != m2)) {
87 err = mutex_lock_killable_nested(m2, SINGLE_DEPTH_NESTING);
88 if (err)
89 mutex_unlock(m1);
90 }
91
92 return err;
93}
94
95SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
96 unsigned long, idx1, unsigned long, idx2)
97{
98 struct task_struct *task1, *task2;
99 int ret;
100
101 rcu_read_lock();
102
103 /*
104 * Tasks are looked up in caller's PID namespace only.
105 */
106 task1 = find_task_by_vpid(pid1);
107 task2 = find_task_by_vpid(pid2);
108 if (!task1 || !task2)
109 goto err_no_task;
110
111 get_task_struct(task1);
112 get_task_struct(task2);
113
114 rcu_read_unlock();
115
116 /*
117 * One should have enough rights to inspect task details.
118 */
119 ret = kcmp_lock(&task1->signal->cred_guard_mutex,
120 &task2->signal->cred_guard_mutex);
121 if (ret)
122 goto err;
123 if (!ptrace_may_access(task1, PTRACE_MODE_READ) ||
124 !ptrace_may_access(task2, PTRACE_MODE_READ)) {
125 ret = -EPERM;
126 goto err_unlock;
127 }
128
129 switch (type) {
130 case KCMP_FILE: {
131 struct file *filp1, *filp2;
132
133 filp1 = get_file_raw_ptr(task1, idx1);
134 filp2 = get_file_raw_ptr(task2, idx2);
135
136 if (filp1 && filp2)
137 ret = kcmp_ptr(filp1, filp2, KCMP_FILE);
138 else
139 ret = -EBADF;
140 break;
141 }
142 case KCMP_VM:
143 ret = kcmp_ptr(task1->mm, task2->mm, KCMP_VM);
144 break;
145 case KCMP_FILES:
146 ret = kcmp_ptr(task1->files, task2->files, KCMP_FILES);
147 break;
148 case KCMP_FS:
149 ret = kcmp_ptr(task1->fs, task2->fs, KCMP_FS);
150 break;
151 case KCMP_SIGHAND:
152 ret = kcmp_ptr(task1->sighand, task2->sighand, KCMP_SIGHAND);
153 break;
154 case KCMP_IO:
155 ret = kcmp_ptr(task1->io_context, task2->io_context, KCMP_IO);
156 break;
157 case KCMP_SYSVSEM:
158#ifdef CONFIG_SYSVIPC
159 ret = kcmp_ptr(task1->sysvsem.undo_list,
160 task2->sysvsem.undo_list,
161 KCMP_SYSVSEM);
162#else
163 ret = -EOPNOTSUPP;
164#endif
165 break;
166 default:
167 ret = -EINVAL;
168 break;
169 }
170
171err_unlock:
172 kcmp_unlock(&task1->signal->cred_guard_mutex,
173 &task2->signal->cred_guard_mutex);
174err:
175 put_task_struct(task1);
176 put_task_struct(task2);
177
178 return ret;
179
180err_no_task:
181 rcu_read_unlock();
182 return -ESRCH;
183}
184
185static __init int kcmp_cookies_init(void)
186{
187 int i;
188
189 get_random_bytes(cookies, sizeof(cookies));
190
191 for (i = 0; i < KCMP_TYPES; i++)
192 cookies[i][1] |= (~(~0UL >> 1) | 1);
193
194 return 0;
195}
196arch_initcall(kcmp_cookies_init);
diff --git a/kernel/kmod.c b/kernel/kmod.c
index 05698a7415fe..ff2c7cb86d77 100644
--- a/kernel/kmod.c
+++ b/kernel/kmod.c
@@ -221,13 +221,12 @@ fail:
221 return 0; 221 return 0;
222} 222}
223 223
224void call_usermodehelper_freeinfo(struct subprocess_info *info) 224static void call_usermodehelper_freeinfo(struct subprocess_info *info)
225{ 225{
226 if (info->cleanup) 226 if (info->cleanup)
227 (*info->cleanup)(info); 227 (*info->cleanup)(info);
228 kfree(info); 228 kfree(info);
229} 229}
230EXPORT_SYMBOL(call_usermodehelper_freeinfo);
231 230
232static void umh_complete(struct subprocess_info *sub_info) 231static void umh_complete(struct subprocess_info *sub_info)
233{ 232{
@@ -410,7 +409,7 @@ EXPORT_SYMBOL_GPL(usermodehelper_read_unlock);
410 409
411/** 410/**
412 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled. 411 * __usermodehelper_set_disable_depth - Modify usermodehelper_disabled.
413 * depth: New value to assign to usermodehelper_disabled. 412 * @depth: New value to assign to usermodehelper_disabled.
414 * 413 *
415 * Change the value of usermodehelper_disabled (under umhelper_sem locked for 414 * Change the value of usermodehelper_disabled (under umhelper_sem locked for
416 * writing) and wakeup tasks waiting for it to change. 415 * writing) and wakeup tasks waiting for it to change.
@@ -479,6 +478,7 @@ static void helper_unlock(void)
479 * structure. This should be passed to call_usermodehelper_exec to 478 * structure. This should be passed to call_usermodehelper_exec to
480 * exec the process and free the structure. 479 * exec the process and free the structure.
481 */ 480 */
481static
482struct subprocess_info *call_usermodehelper_setup(char *path, char **argv, 482struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
483 char **envp, gfp_t gfp_mask) 483 char **envp, gfp_t gfp_mask)
484{ 484{
@@ -494,7 +494,6 @@ struct subprocess_info *call_usermodehelper_setup(char *path, char **argv,
494 out: 494 out:
495 return sub_info; 495 return sub_info;
496} 496}
497EXPORT_SYMBOL(call_usermodehelper_setup);
498 497
499/** 498/**
500 * call_usermodehelper_setfns - set a cleanup/init function 499 * call_usermodehelper_setfns - set a cleanup/init function
@@ -512,6 +511,7 @@ EXPORT_SYMBOL(call_usermodehelper_setup);
512 * Function must be runnable in either a process context or the 511 * Function must be runnable in either a process context or the
513 * context in which call_usermodehelper_exec is called. 512 * context in which call_usermodehelper_exec is called.
514 */ 513 */
514static
515void call_usermodehelper_setfns(struct subprocess_info *info, 515void call_usermodehelper_setfns(struct subprocess_info *info,
516 int (*init)(struct subprocess_info *info, struct cred *new), 516 int (*init)(struct subprocess_info *info, struct cred *new),
517 void (*cleanup)(struct subprocess_info *info), 517 void (*cleanup)(struct subprocess_info *info),
@@ -521,7 +521,6 @@ void call_usermodehelper_setfns(struct subprocess_info *info,
521 info->init = init; 521 info->init = init;
522 info->data = data; 522 info->data = data;
523} 523}
524EXPORT_SYMBOL(call_usermodehelper_setfns);
525 524
526/** 525/**
527 * call_usermodehelper_exec - start a usermode application 526 * call_usermodehelper_exec - start a usermode application
@@ -535,6 +534,7 @@ EXPORT_SYMBOL(call_usermodehelper_setfns);
535 * asynchronously if wait is not set, and runs as a child of keventd. 534 * asynchronously if wait is not set, and runs as a child of keventd.
536 * (ie. it runs with full root capabilities). 535 * (ie. it runs with full root capabilities).
537 */ 536 */
537static
538int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait) 538int call_usermodehelper_exec(struct subprocess_info *sub_info, int wait)
539{ 539{
540 DECLARE_COMPLETION_ONSTACK(done); 540 DECLARE_COMPLETION_ONSTACK(done);
@@ -576,7 +576,25 @@ unlock:
576 helper_unlock(); 576 helper_unlock();
577 return retval; 577 return retval;
578} 578}
579EXPORT_SYMBOL(call_usermodehelper_exec); 579
580int call_usermodehelper_fns(
581 char *path, char **argv, char **envp, int wait,
582 int (*init)(struct subprocess_info *info, struct cred *new),
583 void (*cleanup)(struct subprocess_info *), void *data)
584{
585 struct subprocess_info *info;
586 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL;
587
588 info = call_usermodehelper_setup(path, argv, envp, gfp_mask);
589
590 if (info == NULL)
591 return -ENOMEM;
592
593 call_usermodehelper_setfns(info, init, cleanup, data);
594
595 return call_usermodehelper_exec(info, wait);
596}
597EXPORT_SYMBOL(call_usermodehelper_fns);
580 598
581static int proc_cap_handler(struct ctl_table *table, int write, 599static int proc_cap_handler(struct ctl_table *table, int write,
582 void __user *buffer, size_t *lenp, loff_t *ppos) 600 void __user *buffer, size_t *lenp, loff_t *ppos)
diff --git a/kernel/lglock.c b/kernel/lglock.c
new file mode 100644
index 000000000000..6535a667a5a7
--- /dev/null
+++ b/kernel/lglock.c
@@ -0,0 +1,89 @@
1/* See include/linux/lglock.h for description */
2#include <linux/module.h>
3#include <linux/lglock.h>
4#include <linux/cpu.h>
5#include <linux/string.h>
6
7/*
8 * Note there is no uninit, so lglocks cannot be defined in
9 * modules (but it's fine to use them from there)
10 * Could be added though, just undo lg_lock_init
11 */
12
13void lg_lock_init(struct lglock *lg, char *name)
14{
15 LOCKDEP_INIT_MAP(&lg->lock_dep_map, name, &lg->lock_key, 0);
16}
17EXPORT_SYMBOL(lg_lock_init);
18
19void lg_local_lock(struct lglock *lg)
20{
21 arch_spinlock_t *lock;
22
23 preempt_disable();
24 rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
25 lock = this_cpu_ptr(lg->lock);
26 arch_spin_lock(lock);
27}
28EXPORT_SYMBOL(lg_local_lock);
29
30void lg_local_unlock(struct lglock *lg)
31{
32 arch_spinlock_t *lock;
33
34 rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
35 lock = this_cpu_ptr(lg->lock);
36 arch_spin_unlock(lock);
37 preempt_enable();
38}
39EXPORT_SYMBOL(lg_local_unlock);
40
41void lg_local_lock_cpu(struct lglock *lg, int cpu)
42{
43 arch_spinlock_t *lock;
44
45 preempt_disable();
46 rwlock_acquire_read(&lg->lock_dep_map, 0, 0, _RET_IP_);
47 lock = per_cpu_ptr(lg->lock, cpu);
48 arch_spin_lock(lock);
49}
50EXPORT_SYMBOL(lg_local_lock_cpu);
51
52void lg_local_unlock_cpu(struct lglock *lg, int cpu)
53{
54 arch_spinlock_t *lock;
55
56 rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
57 lock = per_cpu_ptr(lg->lock, cpu);
58 arch_spin_unlock(lock);
59 preempt_enable();
60}
61EXPORT_SYMBOL(lg_local_unlock_cpu);
62
63void lg_global_lock(struct lglock *lg)
64{
65 int i;
66
67 preempt_disable();
68 rwlock_acquire(&lg->lock_dep_map, 0, 0, _RET_IP_);
69 for_each_possible_cpu(i) {
70 arch_spinlock_t *lock;
71 lock = per_cpu_ptr(lg->lock, i);
72 arch_spin_lock(lock);
73 }
74}
75EXPORT_SYMBOL(lg_global_lock);
76
77void lg_global_unlock(struct lglock *lg)
78{
79 int i;
80
81 rwlock_release(&lg->lock_dep_map, 1, _RET_IP_);
82 for_each_possible_cpu(i) {
83 arch_spinlock_t *lock;
84 lock = per_cpu_ptr(lg->lock, i);
85 arch_spin_unlock(lock);
86 }
87 preempt_enable();
88}
89EXPORT_SYMBOL(lg_global_unlock);
diff --git a/kernel/panic.c b/kernel/panic.c
index 8ed89a175d79..d2a5f4ecc6dd 100644
--- a/kernel/panic.c
+++ b/kernel/panic.c
@@ -27,7 +27,7 @@
27#define PANIC_TIMER_STEP 100 27#define PANIC_TIMER_STEP 100
28#define PANIC_BLINK_SPD 18 28#define PANIC_BLINK_SPD 18
29 29
30int panic_on_oops; 30int panic_on_oops = CONFIG_PANIC_ON_OOPS_VALUE;
31static unsigned long tainted_mask; 31static unsigned long tainted_mask;
32static int pause_on_oops; 32static int pause_on_oops;
33static int pause_on_oops_flag; 33static int pause_on_oops_flag;
@@ -108,8 +108,6 @@ void panic(const char *fmt, ...)
108 */ 108 */
109 crash_kexec(NULL); 109 crash_kexec(NULL);
110 110
111 kmsg_dump(KMSG_DUMP_PANIC);
112
113 /* 111 /*
114 * Note smp_send_stop is the usual smp shutdown function, which 112 * Note smp_send_stop is the usual smp shutdown function, which
115 * unfortunately means it may not be hardened to work in a panic 113 * unfortunately means it may not be hardened to work in a panic
@@ -117,6 +115,8 @@ void panic(const char *fmt, ...)
117 */ 115 */
118 smp_send_stop(); 116 smp_send_stop();
119 117
118 kmsg_dump(KMSG_DUMP_PANIC);
119
120 atomic_notifier_call_chain(&panic_notifier_list, 0, buf); 120 atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
121 121
122 bust_spinlocks(0); 122 bust_spinlocks(0);
diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
index 57bc1fd35b3c..b3c7fd554250 100644
--- a/kernel/pid_namespace.c
+++ b/kernel/pid_namespace.c
@@ -149,7 +149,12 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
149{ 149{
150 int nr; 150 int nr;
151 int rc; 151 int rc;
152 struct task_struct *task; 152 struct task_struct *task, *me = current;
153
154 /* Ignore SIGCHLD causing any terminated children to autoreap */
155 spin_lock_irq(&me->sighand->siglock);
156 me->sighand->action[SIGCHLD - 1].sa.sa_handler = SIG_IGN;
157 spin_unlock_irq(&me->sighand->siglock);
153 158
154 /* 159 /*
155 * The last thread in the cgroup-init thread group is terminating. 160 * The last thread in the cgroup-init thread group is terminating.
@@ -179,11 +184,31 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
179 } 184 }
180 read_unlock(&tasklist_lock); 185 read_unlock(&tasklist_lock);
181 186
187 /* Firstly reap the EXIT_ZOMBIE children we may have. */
182 do { 188 do {
183 clear_thread_flag(TIF_SIGPENDING); 189 clear_thread_flag(TIF_SIGPENDING);
184 rc = sys_wait4(-1, NULL, __WALL, NULL); 190 rc = sys_wait4(-1, NULL, __WALL, NULL);
185 } while (rc != -ECHILD); 191 } while (rc != -ECHILD);
186 192
193 /*
194 * sys_wait4() above can't reap the TASK_DEAD children.
195 * Make sure they all go away, see __unhash_process().
196 */
197 for (;;) {
198 bool need_wait = false;
199
200 read_lock(&tasklist_lock);
201 if (!list_empty(&current->children)) {
202 __set_current_state(TASK_UNINTERRUPTIBLE);
203 need_wait = true;
204 }
205 read_unlock(&tasklist_lock);
206
207 if (!need_wait)
208 break;
209 schedule();
210 }
211
187 if (pid_ns->reboot) 212 if (pid_ns->reboot)
188 current->signal->group_exit_code = pid_ns->reboot; 213 current->signal->group_exit_code = pid_ns->reboot;
189 214
@@ -191,6 +216,7 @@ void zap_pid_ns_processes(struct pid_namespace *pid_ns)
191 return; 216 return;
192} 217}
193 218
219#ifdef CONFIG_CHECKPOINT_RESTORE
194static int pid_ns_ctl_handler(struct ctl_table *table, int write, 220static int pid_ns_ctl_handler(struct ctl_table *table, int write,
195 void __user *buffer, size_t *lenp, loff_t *ppos) 221 void __user *buffer, size_t *lenp, loff_t *ppos)
196{ 222{
@@ -218,8 +244,8 @@ static struct ctl_table pid_ns_ctl_table[] = {
218 }, 244 },
219 { } 245 { }
220}; 246};
221
222static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } }; 247static struct ctl_path kern_path[] = { { .procname = "kernel", }, { } };
248#endif /* CONFIG_CHECKPOINT_RESTORE */
223 249
224int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd) 250int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
225{ 251{
@@ -253,7 +279,10 @@ int reboot_pid_ns(struct pid_namespace *pid_ns, int cmd)
253static __init int pid_namespaces_init(void) 279static __init int pid_namespaces_init(void)
254{ 280{
255 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC); 281 pid_ns_cachep = KMEM_CACHE(pid_namespace, SLAB_PANIC);
282
283#ifdef CONFIG_CHECKPOINT_RESTORE
256 register_sysctl_paths(kern_path, pid_ns_ctl_table); 284 register_sysctl_paths(kern_path, pid_ns_ctl_table);
285#endif
257 return 0; 286 return 0;
258} 287}
259 288
diff --git a/kernel/printk.c b/kernel/printk.c
index 32462d2b364a..a2276b916769 100644
--- a/kernel/printk.c
+++ b/kernel/printk.c
@@ -227,10 +227,10 @@ static u32 clear_idx;
227#define LOG_LINE_MAX 1024 227#define LOG_LINE_MAX 1024
228 228
229/* record buffer */ 229/* record buffer */
230#if !defined(CONFIG_64BIT) || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) 230#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
231#define LOG_ALIGN 4 231#define LOG_ALIGN 4
232#else 232#else
233#define LOG_ALIGN 8 233#define LOG_ALIGN __alignof__(struct log)
234#endif 234#endif
235#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT) 235#define __LOG_BUF_LEN (1 << CONFIG_LOG_BUF_SHIFT)
236static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN); 236static char __log_buf[__LOG_BUF_LEN] __aligned(LOG_ALIGN);
@@ -414,7 +414,9 @@ static ssize_t devkmsg_read(struct file *file, char __user *buf,
414 if (!user) 414 if (!user)
415 return -EBADF; 415 return -EBADF;
416 416
417 mutex_lock(&user->lock); 417 ret = mutex_lock_interruptible(&user->lock);
418 if (ret)
419 return ret;
418 raw_spin_lock(&logbuf_lock); 420 raw_spin_lock(&logbuf_lock);
419 while (user->seq == log_next_seq) { 421 while (user->seq == log_next_seq) {
420 if (file->f_flags & O_NONBLOCK) { 422 if (file->f_flags & O_NONBLOCK) {
@@ -878,7 +880,9 @@ static int syslog_print(char __user *buf, int size)
878 syslog_seq++; 880 syslog_seq++;
879 raw_spin_unlock_irq(&logbuf_lock); 881 raw_spin_unlock_irq(&logbuf_lock);
880 882
881 if (len > 0 && copy_to_user(buf, text, len)) 883 if (len > size)
884 len = -EINVAL;
885 else if (len > 0 && copy_to_user(buf, text, len))
882 len = -EFAULT; 886 len = -EFAULT;
883 887
884 kfree(text); 888 kfree(text);
@@ -909,7 +913,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
909 /* 913 /*
910 * Find first record that fits, including all following records, 914 * Find first record that fits, including all following records,
911 * into the user-provided buffer for this dump. 915 * into the user-provided buffer for this dump.
912 */ 916 */
913 seq = clear_seq; 917 seq = clear_seq;
914 idx = clear_idx; 918 idx = clear_idx;
915 while (seq < log_next_seq) { 919 while (seq < log_next_seq) {
@@ -919,6 +923,8 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
919 idx = log_next(idx); 923 idx = log_next(idx);
920 seq++; 924 seq++;
921 } 925 }
926
927 /* move first record forward until length fits into the buffer */
922 seq = clear_seq; 928 seq = clear_seq;
923 idx = clear_idx; 929 idx = clear_idx;
924 while (len > size && seq < log_next_seq) { 930 while (len > size && seq < log_next_seq) {
@@ -929,7 +935,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
929 seq++; 935 seq++;
930 } 936 }
931 937
932 /* last message in this dump */ 938 /* last message fitting into this dump */
933 next_seq = log_next_seq; 939 next_seq = log_next_seq;
934 940
935 len = 0; 941 len = 0;
@@ -974,6 +980,7 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
974{ 980{
975 bool clear = false; 981 bool clear = false;
976 static int saved_console_loglevel = -1; 982 static int saved_console_loglevel = -1;
983 static DEFINE_MUTEX(syslog_mutex);
977 int error; 984 int error;
978 985
979 error = check_syslog_permissions(type, from_file); 986 error = check_syslog_permissions(type, from_file);
@@ -1000,11 +1007,17 @@ int do_syslog(int type, char __user *buf, int len, bool from_file)
1000 error = -EFAULT; 1007 error = -EFAULT;
1001 goto out; 1008 goto out;
1002 } 1009 }
1010 error = mutex_lock_interruptible(&syslog_mutex);
1011 if (error)
1012 goto out;
1003 error = wait_event_interruptible(log_wait, 1013 error = wait_event_interruptible(log_wait,
1004 syslog_seq != log_next_seq); 1014 syslog_seq != log_next_seq);
1005 if (error) 1015 if (error) {
1016 mutex_unlock(&syslog_mutex);
1006 goto out; 1017 goto out;
1018 }
1007 error = syslog_print(buf, len); 1019 error = syslog_print(buf, len);
1020 mutex_unlock(&syslog_mutex);
1008 break; 1021 break;
1009 /* Read/clear last kernel messages */ 1022 /* Read/clear last kernel messages */
1010 case SYSLOG_ACTION_READ_CLEAR: 1023 case SYSLOG_ACTION_READ_CLEAR:
@@ -2300,48 +2313,210 @@ module_param_named(always_kmsg_dump, always_kmsg_dump, bool, S_IRUGO | S_IWUSR);
2300 * kmsg_dump - dump kernel log to kernel message dumpers. 2313 * kmsg_dump - dump kernel log to kernel message dumpers.
2301 * @reason: the reason (oops, panic etc) for dumping 2314 * @reason: the reason (oops, panic etc) for dumping
2302 * 2315 *
2303 * Iterate through each of the dump devices and call the oops/panic 2316 * Call each of the registered dumper's dump() callback, which can
2304 * callbacks with the log buffer. 2317 * retrieve the kmsg records with kmsg_dump_get_line() or
2318 * kmsg_dump_get_buffer().
2305 */ 2319 */
2306void kmsg_dump(enum kmsg_dump_reason reason) 2320void kmsg_dump(enum kmsg_dump_reason reason)
2307{ 2321{
2308 u64 idx;
2309 struct kmsg_dumper *dumper; 2322 struct kmsg_dumper *dumper;
2310 const char *s1, *s2;
2311 unsigned long l1, l2;
2312 unsigned long flags; 2323 unsigned long flags;
2313 2324
2314 if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump) 2325 if ((reason > KMSG_DUMP_OOPS) && !always_kmsg_dump)
2315 return; 2326 return;
2316 2327
2317 /* Theoretically, the log could move on after we do this, but 2328 rcu_read_lock();
2318 there's not a lot we can do about that. The new messages 2329 list_for_each_entry_rcu(dumper, &dump_list, list) {
2319 will overwrite the start of what we dump. */ 2330 if (dumper->max_reason && reason > dumper->max_reason)
2331 continue;
2332
2333 /* initialize iterator with data about the stored records */
2334 dumper->active = true;
2335
2336 raw_spin_lock_irqsave(&logbuf_lock, flags);
2337 dumper->cur_seq = clear_seq;
2338 dumper->cur_idx = clear_idx;
2339 dumper->next_seq = log_next_seq;
2340 dumper->next_idx = log_next_idx;
2341 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2342
2343 /* invoke dumper which will iterate over records */
2344 dumper->dump(dumper, reason);
2345
2346 /* reset iterator */
2347 dumper->active = false;
2348 }
2349 rcu_read_unlock();
2350}
2351
2352/**
2353 * kmsg_dump_get_line - retrieve one kmsg log line
2354 * @dumper: registered kmsg dumper
2355 * @syslog: include the "<4>" prefixes
2356 * @line: buffer to copy the line to
2357 * @size: maximum size of the buffer
2358 * @len: length of line placed into buffer
2359 *
2360 * Start at the beginning of the kmsg buffer, with the oldest kmsg
2361 * record, and copy one record into the provided buffer.
2362 *
2363 * Consecutive calls will return the next available record moving
2364 * towards the end of the buffer with the youngest messages.
2365 *
2366 * A return value of FALSE indicates that there are no more records to
2367 * read.
2368 */
2369bool kmsg_dump_get_line(struct kmsg_dumper *dumper, bool syslog,
2370 char *line, size_t size, size_t *len)
2371{
2372 unsigned long flags;
2373 struct log *msg;
2374 size_t l = 0;
2375 bool ret = false;
2376
2377 if (!dumper->active)
2378 goto out;
2320 2379
2321 raw_spin_lock_irqsave(&logbuf_lock, flags); 2380 raw_spin_lock_irqsave(&logbuf_lock, flags);
2322 if (syslog_seq < log_first_seq) 2381 if (dumper->cur_seq < log_first_seq) {
2323 idx = syslog_idx; 2382 /* messages are gone, move to first available one */
2324 else 2383 dumper->cur_seq = log_first_seq;
2325 idx = log_first_idx; 2384 dumper->cur_idx = log_first_idx;
2385 }
2386
2387 /* last entry */
2388 if (dumper->cur_seq >= log_next_seq) {
2389 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2390 goto out;
2391 }
2326 2392
2327 if (idx > log_next_idx) { 2393 msg = log_from_idx(dumper->cur_idx);
2328 s1 = log_buf; 2394 l = msg_print_text(msg, syslog,
2329 l1 = log_next_idx; 2395 line, size);
2330 2396
2331 s2 = log_buf + idx; 2397 dumper->cur_idx = log_next(dumper->cur_idx);
2332 l2 = log_buf_len - idx; 2398 dumper->cur_seq++;
2333 } else { 2399 ret = true;
2334 s1 = ""; 2400 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2335 l1 = 0; 2401out:
2402 if (len)
2403 *len = l;
2404 return ret;
2405}
2406EXPORT_SYMBOL_GPL(kmsg_dump_get_line);
2407
2408/**
2409 * kmsg_dump_get_buffer - copy kmsg log lines
2410 * @dumper: registered kmsg dumper
2411 * @syslog: include the "<4>" prefixes
2412 * @line: buffer to copy the line to
2413 * @size: maximum size of the buffer
2414 * @len: length of line placed into buffer
2415 *
2416 * Start at the end of the kmsg buffer and fill the provided buffer
2417 * with as many of the the *youngest* kmsg records that fit into it.
2418 * If the buffer is large enough, all available kmsg records will be
2419 * copied with a single call.
2420 *
2421 * Consecutive calls will fill the buffer with the next block of
2422 * available older records, not including the earlier retrieved ones.
2423 *
2424 * A return value of FALSE indicates that there are no more records to
2425 * read.
2426 */
2427bool kmsg_dump_get_buffer(struct kmsg_dumper *dumper, bool syslog,
2428 char *buf, size_t size, size_t *len)
2429{
2430 unsigned long flags;
2431 u64 seq;
2432 u32 idx;
2433 u64 next_seq;
2434 u32 next_idx;
2435 size_t l = 0;
2436 bool ret = false;
2437
2438 if (!dumper->active)
2439 goto out;
2440
2441 raw_spin_lock_irqsave(&logbuf_lock, flags);
2442 if (dumper->cur_seq < log_first_seq) {
2443 /* messages are gone, move to first available one */
2444 dumper->cur_seq = log_first_seq;
2445 dumper->cur_idx = log_first_idx;
2446 }
2447
2448 /* last entry */
2449 if (dumper->cur_seq >= dumper->next_seq) {
2450 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2451 goto out;
2452 }
2453
2454 /* calculate length of entire buffer */
2455 seq = dumper->cur_seq;
2456 idx = dumper->cur_idx;
2457 while (seq < dumper->next_seq) {
2458 struct log *msg = log_from_idx(idx);
2459
2460 l += msg_print_text(msg, true, NULL, 0);
2461 idx = log_next(idx);
2462 seq++;
2463 }
2336 2464
2337 s2 = log_buf + idx; 2465 /* move first record forward until length fits into the buffer */
2338 l2 = log_next_idx - idx; 2466 seq = dumper->cur_seq;
2467 idx = dumper->cur_idx;
2468 while (l > size && seq < dumper->next_seq) {
2469 struct log *msg = log_from_idx(idx);
2470
2471 l -= msg_print_text(msg, true, NULL, 0);
2472 idx = log_next(idx);
2473 seq++;
2474 }
2475
2476 /* last message in next interation */
2477 next_seq = seq;
2478 next_idx = idx;
2479
2480 l = 0;
2481 while (seq < dumper->next_seq) {
2482 struct log *msg = log_from_idx(idx);
2483
2484 l += msg_print_text(msg, syslog,
2485 buf + l, size - l);
2486
2487 idx = log_next(idx);
2488 seq++;
2339 } 2489 }
2490
2491 dumper->next_seq = next_seq;
2492 dumper->next_idx = next_idx;
2493 ret = true;
2340 raw_spin_unlock_irqrestore(&logbuf_lock, flags); 2494 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2495out:
2496 if (len)
2497 *len = l;
2498 return ret;
2499}
2500EXPORT_SYMBOL_GPL(kmsg_dump_get_buffer);
2341 2501
2342 rcu_read_lock(); 2502/**
2343 list_for_each_entry_rcu(dumper, &dump_list, list) 2503 * kmsg_dump_rewind - reset the interator
2344 dumper->dump(dumper, reason, s1, l1, s2, l2); 2504 * @dumper: registered kmsg dumper
2345 rcu_read_unlock(); 2505 *
2506 * Reset the dumper's iterator so that kmsg_dump_get_line() and
2507 * kmsg_dump_get_buffer() can be called again and used multiple
2508 * times within the same dumper.dump() callback.
2509 */
2510void kmsg_dump_rewind(struct kmsg_dumper *dumper)
2511{
2512 unsigned long flags;
2513
2514 raw_spin_lock_irqsave(&logbuf_lock, flags);
2515 dumper->cur_seq = clear_seq;
2516 dumper->cur_idx = clear_idx;
2517 dumper->next_seq = log_next_seq;
2518 dumper->next_idx = log_next_idx;
2519 raw_spin_unlock_irqrestore(&logbuf_lock, flags);
2346} 2520}
2521EXPORT_SYMBOL_GPL(kmsg_dump_rewind);
2347#endif 2522#endif
diff --git a/kernel/rcutree.c b/kernel/rcutree.c
index 0da7b88d92d0..3b0f1337f75b 100644
--- a/kernel/rcutree.c
+++ b/kernel/rcutree.c
@@ -1397,6 +1397,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp)
1397 rdp->qlen_lazy += rsp->qlen_lazy; 1397 rdp->qlen_lazy += rsp->qlen_lazy;
1398 rdp->qlen += rsp->qlen; 1398 rdp->qlen += rsp->qlen;
1399 rdp->n_cbs_adopted += rsp->qlen; 1399 rdp->n_cbs_adopted += rsp->qlen;
1400 if (rsp->qlen_lazy != rsp->qlen)
1401 rcu_idle_count_callbacks_posted();
1400 rsp->qlen_lazy = 0; 1402 rsp->qlen_lazy = 0;
1401 rsp->qlen = 0; 1403 rsp->qlen = 0;
1402 1404
diff --git a/kernel/rcutree.h b/kernel/rcutree.h
index 7f5d138dedf5..ea056495783e 100644
--- a/kernel/rcutree.h
+++ b/kernel/rcutree.h
@@ -84,6 +84,20 @@ struct rcu_dynticks {
84 /* Process level is worth LLONG_MAX/2. */ 84 /* Process level is worth LLONG_MAX/2. */
85 int dynticks_nmi_nesting; /* Track NMI nesting level. */ 85 int dynticks_nmi_nesting; /* Track NMI nesting level. */
86 atomic_t dynticks; /* Even value for idle, else odd. */ 86 atomic_t dynticks; /* Even value for idle, else odd. */
87#ifdef CONFIG_RCU_FAST_NO_HZ
88 int dyntick_drain; /* Prepare-for-idle state variable. */
89 unsigned long dyntick_holdoff;
90 /* No retries for the jiffy of failure. */
91 struct timer_list idle_gp_timer;
92 /* Wake up CPU sleeping with callbacks. */
93 unsigned long idle_gp_timer_expires;
94 /* When to wake up CPU (for repost). */
95 bool idle_first_pass; /* First pass of attempt to go idle? */
96 unsigned long nonlazy_posted;
97 /* # times non-lazy CBs posted to CPU. */
98 unsigned long nonlazy_posted_snap;
99 /* idle-period nonlazy_posted snapshot. */
100#endif /* #ifdef CONFIG_RCU_FAST_NO_HZ */
87}; 101};
88 102
89/* RCU's kthread states for tracing. */ 103/* RCU's kthread states for tracing. */
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
index 2411000d9869..5271a020887e 100644
--- a/kernel/rcutree_plugin.h
+++ b/kernel/rcutree_plugin.h
@@ -1886,8 +1886,9 @@ static void __cpuinit rcu_prepare_kthreads(int cpu)
1886 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs 1886 * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
1887 * any flavor of RCU. 1887 * any flavor of RCU.
1888 */ 1888 */
1889int rcu_needs_cpu(int cpu) 1889int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
1890{ 1890{
1891 *delta_jiffies = ULONG_MAX;
1891 return rcu_cpu_has_callbacks(cpu); 1892 return rcu_cpu_has_callbacks(cpu);
1892} 1893}
1893 1894
@@ -1962,41 +1963,6 @@ static void rcu_idle_count_callbacks_posted(void)
1962#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */ 1963#define RCU_IDLE_GP_DELAY 6 /* Roughly one grace period. */
1963#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */ 1964#define RCU_IDLE_LAZY_GP_DELAY (6 * HZ) /* Roughly six seconds. */
1964 1965
1965/* Loop counter for rcu_prepare_for_idle(). */
1966static DEFINE_PER_CPU(int, rcu_dyntick_drain);
1967/* If rcu_dyntick_holdoff==jiffies, don't try to enter dyntick-idle mode. */
1968static DEFINE_PER_CPU(unsigned long, rcu_dyntick_holdoff);
1969/* Timer to awaken the CPU if it enters dyntick-idle mode with callbacks. */
1970static DEFINE_PER_CPU(struct timer_list, rcu_idle_gp_timer);
1971/* Scheduled expiry time for rcu_idle_gp_timer to allow reposting. */
1972static DEFINE_PER_CPU(unsigned long, rcu_idle_gp_timer_expires);
1973/* Enable special processing on first attempt to enter dyntick-idle mode. */
1974static DEFINE_PER_CPU(bool, rcu_idle_first_pass);
1975/* Running count of non-lazy callbacks posted, never decremented. */
1976static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted);
1977/* Snapshot of rcu_nonlazy_posted to detect meaningful exits from idle. */
1978static DEFINE_PER_CPU(unsigned long, rcu_nonlazy_posted_snap);
1979
1980/*
1981 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
1982 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
1983 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
1984 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
1985 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
1986 * it is better to incur scheduling-clock interrupts than to spin
1987 * continuously for the same time duration!
1988 */
1989int rcu_needs_cpu(int cpu)
1990{
1991 /* Flag a new idle sojourn to the idle-entry state machine. */
1992 per_cpu(rcu_idle_first_pass, cpu) = 1;
1993 /* If no callbacks, RCU doesn't need the CPU. */
1994 if (!rcu_cpu_has_callbacks(cpu))
1995 return 0;
1996 /* Otherwise, RCU needs the CPU only if it recently tried and failed. */
1997 return per_cpu(rcu_dyntick_holdoff, cpu) == jiffies;
1998}
1999
2000/* 1966/*
2001 * Does the specified flavor of RCU have non-lazy callbacks pending on 1967 * Does the specified flavor of RCU have non-lazy callbacks pending on
2002 * the specified CPU? Both RCU flavor and CPU are specified by the 1968 * the specified CPU? Both RCU flavor and CPU are specified by the
@@ -2040,6 +2006,47 @@ static bool rcu_cpu_has_nonlazy_callbacks(int cpu)
2040} 2006}
2041 2007
2042/* 2008/*
2009 * Allow the CPU to enter dyntick-idle mode if either: (1) There are no
2010 * callbacks on this CPU, (2) this CPU has not yet attempted to enter
2011 * dyntick-idle mode, or (3) this CPU is in the process of attempting to
2012 * enter dyntick-idle mode. Otherwise, if we have recently tried and failed
2013 * to enter dyntick-idle mode, we refuse to try to enter it. After all,
2014 * it is better to incur scheduling-clock interrupts than to spin
2015 * continuously for the same time duration!
2016 *
2017 * The delta_jiffies argument is used to store the time when RCU is
2018 * going to need the CPU again if it still has callbacks. The reason
2019 * for this is that rcu_prepare_for_idle() might need to post a timer,
2020 * but if so, it will do so after tick_nohz_stop_sched_tick() has set
2021 * the wakeup time for this CPU. This means that RCU's timer can be
2022 * delayed until the wakeup time, which defeats the purpose of posting
2023 * a timer.
2024 */
2025int rcu_needs_cpu(int cpu, unsigned long *delta_jiffies)
2026{
2027 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2028
2029 /* Flag a new idle sojourn to the idle-entry state machine. */
2030 rdtp->idle_first_pass = 1;
2031 /* If no callbacks, RCU doesn't need the CPU. */
2032 if (!rcu_cpu_has_callbacks(cpu)) {
2033 *delta_jiffies = ULONG_MAX;
2034 return 0;
2035 }
2036 if (rdtp->dyntick_holdoff == jiffies) {
2037 /* RCU recently tried and failed, so don't try again. */
2038 *delta_jiffies = 1;
2039 return 1;
2040 }
2041 /* Set up for the possibility that RCU will post a timer. */
2042 if (rcu_cpu_has_nonlazy_callbacks(cpu))
2043 *delta_jiffies = RCU_IDLE_GP_DELAY;
2044 else
2045 *delta_jiffies = RCU_IDLE_LAZY_GP_DELAY;
2046 return 0;
2047}
2048
2049/*
2043 * Handler for smp_call_function_single(). The only point of this 2050 * Handler for smp_call_function_single(). The only point of this
2044 * handler is to wake the CPU up, so the handler does only tracing. 2051 * handler is to wake the CPU up, so the handler does only tracing.
2045 */ 2052 */
@@ -2075,21 +2082,24 @@ static void rcu_idle_gp_timer_func(unsigned long cpu_in)
2075 */ 2082 */
2076static void rcu_prepare_for_idle_init(int cpu) 2083static void rcu_prepare_for_idle_init(int cpu)
2077{ 2084{
2078 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2085 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2079 setup_timer(&per_cpu(rcu_idle_gp_timer, cpu), 2086
2080 rcu_idle_gp_timer_func, cpu); 2087 rdtp->dyntick_holdoff = jiffies - 1;
2081 per_cpu(rcu_idle_gp_timer_expires, cpu) = jiffies - 1; 2088 setup_timer(&rdtp->idle_gp_timer, rcu_idle_gp_timer_func, cpu);
2082 per_cpu(rcu_idle_first_pass, cpu) = 1; 2089 rdtp->idle_gp_timer_expires = jiffies - 1;
2090 rdtp->idle_first_pass = 1;
2083} 2091}
2084 2092
2085/* 2093/*
2086 * Clean up for exit from idle. Because we are exiting from idle, there 2094 * Clean up for exit from idle. Because we are exiting from idle, there
2087 * is no longer any point to rcu_idle_gp_timer, so cancel it. This will 2095 * is no longer any point to ->idle_gp_timer, so cancel it. This will
2088 * do nothing if this timer is not active, so just cancel it unconditionally. 2096 * do nothing if this timer is not active, so just cancel it unconditionally.
2089 */ 2097 */
2090static void rcu_cleanup_after_idle(int cpu) 2098static void rcu_cleanup_after_idle(int cpu)
2091{ 2099{
2092 del_timer(&per_cpu(rcu_idle_gp_timer, cpu)); 2100 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2101
2102 del_timer(&rdtp->idle_gp_timer);
2093 trace_rcu_prep_idle("Cleanup after idle"); 2103 trace_rcu_prep_idle("Cleanup after idle");
2094} 2104}
2095 2105
@@ -2108,42 +2118,41 @@ static void rcu_cleanup_after_idle(int cpu)
2108 * Because it is not legal to invoke rcu_process_callbacks() with irqs 2118 * Because it is not legal to invoke rcu_process_callbacks() with irqs
2109 * disabled, we do one pass of force_quiescent_state(), then do a 2119 * disabled, we do one pass of force_quiescent_state(), then do a
2110 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked 2120 * invoke_rcu_core() to cause rcu_process_callbacks() to be invoked
2111 * later. The per-cpu rcu_dyntick_drain variable controls the sequencing. 2121 * later. The ->dyntick_drain field controls the sequencing.
2112 * 2122 *
2113 * The caller must have disabled interrupts. 2123 * The caller must have disabled interrupts.
2114 */ 2124 */
2115static void rcu_prepare_for_idle(int cpu) 2125static void rcu_prepare_for_idle(int cpu)
2116{ 2126{
2117 struct timer_list *tp; 2127 struct timer_list *tp;
2128 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2118 2129
2119 /* 2130 /*
2120 * If this is an idle re-entry, for example, due to use of 2131 * If this is an idle re-entry, for example, due to use of
2121 * RCU_NONIDLE() or the new idle-loop tracing API within the idle 2132 * RCU_NONIDLE() or the new idle-loop tracing API within the idle
2122 * loop, then don't take any state-machine actions, unless the 2133 * loop, then don't take any state-machine actions, unless the
2123 * momentary exit from idle queued additional non-lazy callbacks. 2134 * momentary exit from idle queued additional non-lazy callbacks.
2124 * Instead, repost the rcu_idle_gp_timer if this CPU has callbacks 2135 * Instead, repost the ->idle_gp_timer if this CPU has callbacks
2125 * pending. 2136 * pending.
2126 */ 2137 */
2127 if (!per_cpu(rcu_idle_first_pass, cpu) && 2138 if (!rdtp->idle_first_pass &&
2128 (per_cpu(rcu_nonlazy_posted, cpu) == 2139 (rdtp->nonlazy_posted == rdtp->nonlazy_posted_snap)) {
2129 per_cpu(rcu_nonlazy_posted_snap, cpu))) {
2130 if (rcu_cpu_has_callbacks(cpu)) { 2140 if (rcu_cpu_has_callbacks(cpu)) {
2131 tp = &per_cpu(rcu_idle_gp_timer, cpu); 2141 tp = &rdtp->idle_gp_timer;
2132 mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); 2142 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
2133 } 2143 }
2134 return; 2144 return;
2135 } 2145 }
2136 per_cpu(rcu_idle_first_pass, cpu) = 0; 2146 rdtp->idle_first_pass = 0;
2137 per_cpu(rcu_nonlazy_posted_snap, cpu) = 2147 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted - 1;
2138 per_cpu(rcu_nonlazy_posted, cpu) - 1;
2139 2148
2140 /* 2149 /*
2141 * If there are no callbacks on this CPU, enter dyntick-idle mode. 2150 * If there are no callbacks on this CPU, enter dyntick-idle mode.
2142 * Also reset state to avoid prejudicing later attempts. 2151 * Also reset state to avoid prejudicing later attempts.
2143 */ 2152 */
2144 if (!rcu_cpu_has_callbacks(cpu)) { 2153 if (!rcu_cpu_has_callbacks(cpu)) {
2145 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies - 1; 2154 rdtp->dyntick_holdoff = jiffies - 1;
2146 per_cpu(rcu_dyntick_drain, cpu) = 0; 2155 rdtp->dyntick_drain = 0;
2147 trace_rcu_prep_idle("No callbacks"); 2156 trace_rcu_prep_idle("No callbacks");
2148 return; 2157 return;
2149 } 2158 }
@@ -2152,36 +2161,37 @@ static void rcu_prepare_for_idle(int cpu)
2152 * If in holdoff mode, just return. We will presumably have 2161 * If in holdoff mode, just return. We will presumably have
2153 * refrained from disabling the scheduling-clock tick. 2162 * refrained from disabling the scheduling-clock tick.
2154 */ 2163 */
2155 if (per_cpu(rcu_dyntick_holdoff, cpu) == jiffies) { 2164 if (rdtp->dyntick_holdoff == jiffies) {
2156 trace_rcu_prep_idle("In holdoff"); 2165 trace_rcu_prep_idle("In holdoff");
2157 return; 2166 return;
2158 } 2167 }
2159 2168
2160 /* Check and update the rcu_dyntick_drain sequencing. */ 2169 /* Check and update the ->dyntick_drain sequencing. */
2161 if (per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2170 if (rdtp->dyntick_drain <= 0) {
2162 /* First time through, initialize the counter. */ 2171 /* First time through, initialize the counter. */
2163 per_cpu(rcu_dyntick_drain, cpu) = RCU_IDLE_FLUSHES; 2172 rdtp->dyntick_drain = RCU_IDLE_FLUSHES;
2164 } else if (per_cpu(rcu_dyntick_drain, cpu) <= RCU_IDLE_OPT_FLUSHES && 2173 } else if (rdtp->dyntick_drain <= RCU_IDLE_OPT_FLUSHES &&
2165 !rcu_pending(cpu) && 2174 !rcu_pending(cpu) &&
2166 !local_softirq_pending()) { 2175 !local_softirq_pending()) {
2167 /* Can we go dyntick-idle despite still having callbacks? */ 2176 /* Can we go dyntick-idle despite still having callbacks? */
2168 trace_rcu_prep_idle("Dyntick with callbacks"); 2177 rdtp->dyntick_drain = 0;
2169 per_cpu(rcu_dyntick_drain, cpu) = 0; 2178 rdtp->dyntick_holdoff = jiffies;
2170 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2179 if (rcu_cpu_has_nonlazy_callbacks(cpu)) {
2171 if (rcu_cpu_has_nonlazy_callbacks(cpu)) 2180 trace_rcu_prep_idle("Dyntick with callbacks");
2172 per_cpu(rcu_idle_gp_timer_expires, cpu) = 2181 rdtp->idle_gp_timer_expires =
2173 jiffies + RCU_IDLE_GP_DELAY; 2182 jiffies + RCU_IDLE_GP_DELAY;
2174 else 2183 } else {
2175 per_cpu(rcu_idle_gp_timer_expires, cpu) = 2184 rdtp->idle_gp_timer_expires =
2176 jiffies + RCU_IDLE_LAZY_GP_DELAY; 2185 jiffies + RCU_IDLE_LAZY_GP_DELAY;
2177 tp = &per_cpu(rcu_idle_gp_timer, cpu); 2186 trace_rcu_prep_idle("Dyntick with lazy callbacks");
2178 mod_timer_pinned(tp, per_cpu(rcu_idle_gp_timer_expires, cpu)); 2187 }
2179 per_cpu(rcu_nonlazy_posted_snap, cpu) = 2188 tp = &rdtp->idle_gp_timer;
2180 per_cpu(rcu_nonlazy_posted, cpu); 2189 mod_timer_pinned(tp, rdtp->idle_gp_timer_expires);
2190 rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
2181 return; /* Nothing more to do immediately. */ 2191 return; /* Nothing more to do immediately. */
2182 } else if (--per_cpu(rcu_dyntick_drain, cpu) <= 0) { 2192 } else if (--(rdtp->dyntick_drain) <= 0) {
2183 /* We have hit the limit, so time to give up. */ 2193 /* We have hit the limit, so time to give up. */
2184 per_cpu(rcu_dyntick_holdoff, cpu) = jiffies; 2194 rdtp->dyntick_holdoff = jiffies;
2185 trace_rcu_prep_idle("Begin holdoff"); 2195 trace_rcu_prep_idle("Begin holdoff");
2186 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */ 2196 invoke_rcu_core(); /* Force the CPU out of dyntick-idle. */
2187 return; 2197 return;
@@ -2227,7 +2237,7 @@ static void rcu_prepare_for_idle(int cpu)
2227 */ 2237 */
2228static void rcu_idle_count_callbacks_posted(void) 2238static void rcu_idle_count_callbacks_posted(void)
2229{ 2239{
2230 __this_cpu_add(rcu_nonlazy_posted, 1); 2240 __this_cpu_add(rcu_dynticks.nonlazy_posted, 1);
2231} 2241}
2232 2242
2233#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */ 2243#endif /* #else #if !defined(CONFIG_RCU_FAST_NO_HZ) */
@@ -2238,11 +2248,12 @@ static void rcu_idle_count_callbacks_posted(void)
2238 2248
2239static void print_cpu_stall_fast_no_hz(char *cp, int cpu) 2249static void print_cpu_stall_fast_no_hz(char *cp, int cpu)
2240{ 2250{
2241 struct timer_list *tltp = &per_cpu(rcu_idle_gp_timer, cpu); 2251 struct rcu_dynticks *rdtp = &per_cpu(rcu_dynticks, cpu);
2252 struct timer_list *tltp = &rdtp->idle_gp_timer;
2242 2253
2243 sprintf(cp, "drain=%d %c timer=%lu", 2254 sprintf(cp, "drain=%d %c timer=%lu",
2244 per_cpu(rcu_dyntick_drain, cpu), 2255 rdtp->dyntick_drain,
2245 per_cpu(rcu_dyntick_holdoff, cpu) == jiffies ? 'H' : '.', 2256 rdtp->dyntick_holdoff == jiffies ? 'H' : '.',
2246 timer_pending(tltp) ? tltp->expires - jiffies : -1); 2257 timer_pending(tltp) ? tltp->expires - jiffies : -1);
2247} 2258}
2248 2259
diff --git a/kernel/resource.c b/kernel/resource.c
index 7e8ea66a8c01..e1d2b8ee76d5 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -515,8 +515,8 @@ out:
515 * @root: root resource descriptor 515 * @root: root resource descriptor
516 * @new: resource descriptor desired by caller 516 * @new: resource descriptor desired by caller
517 * @size: requested resource region size 517 * @size: requested resource region size
518 * @min: minimum size to allocate 518 * @min: minimum boundary to allocate
519 * @max: maximum size to allocate 519 * @max: maximum boundary to allocate
520 * @align: alignment requested, in bytes 520 * @align: alignment requested, in bytes
521 * @alignf: alignment function, optional, called if not NULL 521 * @alignf: alignment function, optional, called if not NULL
522 * @alignf_data: arbitrary data to pass to the @alignf function 522 * @alignf_data: arbitrary data to pass to the @alignf function
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 39eb6011bc38..d5594a4268d4 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -142,9 +142,8 @@ const_debug unsigned int sysctl_sched_features =
142#define SCHED_FEAT(name, enabled) \ 142#define SCHED_FEAT(name, enabled) \
143 #name , 143 #name ,
144 144
145static __read_mostly char *sched_feat_names[] = { 145static const char * const sched_feat_names[] = {
146#include "features.h" 146#include "features.h"
147 NULL
148}; 147};
149 148
150#undef SCHED_FEAT 149#undef SCHED_FEAT
@@ -2517,25 +2516,32 @@ static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
2517 sched_avg_update(this_rq); 2516 sched_avg_update(this_rq);
2518} 2517}
2519 2518
2519#ifdef CONFIG_NO_HZ
2520/*
2521 * There is no sane way to deal with nohz on smp when using jiffies because the
2522 * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
2523 * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
2524 *
2525 * Therefore we cannot use the delta approach from the regular tick since that
2526 * would seriously skew the load calculation. However we'll make do for those
2527 * updates happening while idle (nohz_idle_balance) or coming out of idle
2528 * (tick_nohz_idle_exit).
2529 *
2530 * This means we might still be one tick off for nohz periods.
2531 */
2532
2520/* 2533/*
2521 * Called from nohz_idle_balance() to update the load ratings before doing the 2534 * Called from nohz_idle_balance() to update the load ratings before doing the
2522 * idle balance. 2535 * idle balance.
2523 */ 2536 */
2524void update_idle_cpu_load(struct rq *this_rq) 2537void update_idle_cpu_load(struct rq *this_rq)
2525{ 2538{
2526 unsigned long curr_jiffies = jiffies; 2539 unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
2527 unsigned long load = this_rq->load.weight; 2540 unsigned long load = this_rq->load.weight;
2528 unsigned long pending_updates; 2541 unsigned long pending_updates;
2529 2542
2530 /* 2543 /*
2531 * Bloody broken means of dealing with nohz, but better than nothing.. 2544 * bail if there's load or we're actually up-to-date.
2532 * jiffies is updated by one cpu, another cpu can drift wrt the jiffy
2533 * update and see 0 difference the one time and 2 the next, even though
2534 * we ticked at roughtly the same rate.
2535 *
2536 * Hence we only use this from nohz_idle_balance() and skip this
2537 * nonsense when called from the scheduler_tick() since that's
2538 * guaranteed a stable rate.
2539 */ 2545 */
2540 if (load || curr_jiffies == this_rq->last_load_update_tick) 2546 if (load || curr_jiffies == this_rq->last_load_update_tick)
2541 return; 2547 return;
@@ -2547,12 +2553,38 @@ void update_idle_cpu_load(struct rq *this_rq)
2547} 2553}
2548 2554
2549/* 2555/*
2556 * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
2557 */
2558void update_cpu_load_nohz(void)
2559{
2560 struct rq *this_rq = this_rq();
2561 unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
2562 unsigned long pending_updates;
2563
2564 if (curr_jiffies == this_rq->last_load_update_tick)
2565 return;
2566
2567 raw_spin_lock(&this_rq->lock);
2568 pending_updates = curr_jiffies - this_rq->last_load_update_tick;
2569 if (pending_updates) {
2570 this_rq->last_load_update_tick = curr_jiffies;
2571 /*
2572 * We were idle, this means load 0, the current load might be
2573 * !0 due to remote wakeups and the sort.
2574 */
2575 __update_cpu_load(this_rq, 0, pending_updates);
2576 }
2577 raw_spin_unlock(&this_rq->lock);
2578}
2579#endif /* CONFIG_NO_HZ */
2580
2581/*
2550 * Called from scheduler_tick() 2582 * Called from scheduler_tick()
2551 */ 2583 */
2552static void update_cpu_load_active(struct rq *this_rq) 2584static void update_cpu_load_active(struct rq *this_rq)
2553{ 2585{
2554 /* 2586 /*
2555 * See the mess in update_idle_cpu_load(). 2587 * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
2556 */ 2588 */
2557 this_rq->last_load_update_tick = jiffies; 2589 this_rq->last_load_update_tick = jiffies;
2558 __update_cpu_load(this_rq, this_rq->load.weight, 1); 2590 __update_cpu_load(this_rq, this_rq->load.weight, 1);
@@ -4982,7 +5014,7 @@ void do_set_cpus_allowed(struct task_struct *p, const struct cpumask *new_mask)
4982 p->sched_class->set_cpus_allowed(p, new_mask); 5014 p->sched_class->set_cpus_allowed(p, new_mask);
4983 5015
4984 cpumask_copy(&p->cpus_allowed, new_mask); 5016 cpumask_copy(&p->cpus_allowed, new_mask);
4985 p->rt.nr_cpus_allowed = cpumask_weight(new_mask); 5017 p->nr_cpus_allowed = cpumask_weight(new_mask);
4986} 5018}
4987 5019
4988/* 5020/*
@@ -5524,15 +5556,20 @@ static cpumask_var_t sched_domains_tmpmask; /* sched_domains_mutex */
5524 5556
5525#ifdef CONFIG_SCHED_DEBUG 5557#ifdef CONFIG_SCHED_DEBUG
5526 5558
5527static __read_mostly int sched_domain_debug_enabled; 5559static __read_mostly int sched_debug_enabled;
5528 5560
5529static int __init sched_domain_debug_setup(char *str) 5561static int __init sched_debug_setup(char *str)
5530{ 5562{
5531 sched_domain_debug_enabled = 1; 5563 sched_debug_enabled = 1;
5532 5564
5533 return 0; 5565 return 0;
5534} 5566}
5535early_param("sched_debug", sched_domain_debug_setup); 5567early_param("sched_debug", sched_debug_setup);
5568
5569static inline bool sched_debug(void)
5570{
5571 return sched_debug_enabled;
5572}
5536 5573
5537static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level, 5574static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5538 struct cpumask *groupmask) 5575 struct cpumask *groupmask)
@@ -5572,7 +5609,12 @@ static int sched_domain_debug_one(struct sched_domain *sd, int cpu, int level,
5572 break; 5609 break;
5573 } 5610 }
5574 5611
5575 if (!group->sgp->power) { 5612 /*
5613 * Even though we initialize ->power to something semi-sane,
5614 * we leave power_orig unset. This allows us to detect if
5615 * domain iteration is still funny without causing /0 traps.
5616 */
5617 if (!group->sgp->power_orig) {
5576 printk(KERN_CONT "\n"); 5618 printk(KERN_CONT "\n");
5577 printk(KERN_ERR "ERROR: domain->cpu_power not " 5619 printk(KERN_ERR "ERROR: domain->cpu_power not "
5578 "set\n"); 5620 "set\n");
@@ -5620,7 +5662,7 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
5620{ 5662{
5621 int level = 0; 5663 int level = 0;
5622 5664
5623 if (!sched_domain_debug_enabled) 5665 if (!sched_debug_enabled)
5624 return; 5666 return;
5625 5667
5626 if (!sd) { 5668 if (!sd) {
@@ -5641,6 +5683,10 @@ static void sched_domain_debug(struct sched_domain *sd, int cpu)
5641} 5683}
5642#else /* !CONFIG_SCHED_DEBUG */ 5684#else /* !CONFIG_SCHED_DEBUG */
5643# define sched_domain_debug(sd, cpu) do { } while (0) 5685# define sched_domain_debug(sd, cpu) do { } while (0)
5686static inline bool sched_debug(void)
5687{
5688 return false;
5689}
5644#endif /* CONFIG_SCHED_DEBUG */ 5690#endif /* CONFIG_SCHED_DEBUG */
5645 5691
5646static int sd_degenerate(struct sched_domain *sd) 5692static int sd_degenerate(struct sched_domain *sd)
@@ -5962,6 +6008,44 @@ struct sched_domain_topology_level {
5962 struct sd_data data; 6008 struct sd_data data;
5963}; 6009};
5964 6010
6011/*
6012 * Build an iteration mask that can exclude certain CPUs from the upwards
6013 * domain traversal.
6014 *
6015 * Asymmetric node setups can result in situations where the domain tree is of
6016 * unequal depth, make sure to skip domains that already cover the entire
6017 * range.
6018 *
6019 * In that case build_sched_domains() will have terminated the iteration early
6020 * and our sibling sd spans will be empty. Domains should always include the
6021 * cpu they're built on, so check that.
6022 *
6023 */
6024static void build_group_mask(struct sched_domain *sd, struct sched_group *sg)
6025{
6026 const struct cpumask *span = sched_domain_span(sd);
6027 struct sd_data *sdd = sd->private;
6028 struct sched_domain *sibling;
6029 int i;
6030
6031 for_each_cpu(i, span) {
6032 sibling = *per_cpu_ptr(sdd->sd, i);
6033 if (!cpumask_test_cpu(i, sched_domain_span(sibling)))
6034 continue;
6035
6036 cpumask_set_cpu(i, sched_group_mask(sg));
6037 }
6038}
6039
6040/*
6041 * Return the canonical balance cpu for this group, this is the first cpu
6042 * of this group that's also in the iteration mask.
6043 */
6044int group_balance_cpu(struct sched_group *sg)
6045{
6046 return cpumask_first_and(sched_group_cpus(sg), sched_group_mask(sg));
6047}
6048
5965static int 6049static int
5966build_overlap_sched_groups(struct sched_domain *sd, int cpu) 6050build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5967{ 6051{
@@ -5980,6 +6064,12 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5980 if (cpumask_test_cpu(i, covered)) 6064 if (cpumask_test_cpu(i, covered))
5981 continue; 6065 continue;
5982 6066
6067 child = *per_cpu_ptr(sdd->sd, i);
6068
6069 /* See the comment near build_group_mask(). */
6070 if (!cpumask_test_cpu(i, sched_domain_span(child)))
6071 continue;
6072
5983 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(), 6073 sg = kzalloc_node(sizeof(struct sched_group) + cpumask_size(),
5984 GFP_KERNEL, cpu_to_node(cpu)); 6074 GFP_KERNEL, cpu_to_node(cpu));
5985 6075
@@ -5987,8 +6077,6 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5987 goto fail; 6077 goto fail;
5988 6078
5989 sg_span = sched_group_cpus(sg); 6079 sg_span = sched_group_cpus(sg);
5990
5991 child = *per_cpu_ptr(sdd->sd, i);
5992 if (child->child) { 6080 if (child->child) {
5993 child = child->child; 6081 child = child->child;
5994 cpumask_copy(sg_span, sched_domain_span(child)); 6082 cpumask_copy(sg_span, sched_domain_span(child));
@@ -5997,10 +6085,24 @@ build_overlap_sched_groups(struct sched_domain *sd, int cpu)
5997 6085
5998 cpumask_or(covered, covered, sg_span); 6086 cpumask_or(covered, covered, sg_span);
5999 6087
6000 sg->sgp = *per_cpu_ptr(sdd->sgp, cpumask_first(sg_span)); 6088 sg->sgp = *per_cpu_ptr(sdd->sgp, i);
6001 atomic_inc(&sg->sgp->ref); 6089 if (atomic_inc_return(&sg->sgp->ref) == 1)
6090 build_group_mask(sd, sg);
6002 6091
6003 if (cpumask_test_cpu(cpu, sg_span)) 6092 /*
6093 * Initialize sgp->power such that even if we mess up the
6094 * domains and no possible iteration will get us here, we won't
6095 * die on a /0 trap.
6096 */
6097 sg->sgp->power = SCHED_POWER_SCALE * cpumask_weight(sg_span);
6098
6099 /*
6100 * Make sure the first group of this domain contains the
6101 * canonical balance cpu. Otherwise the sched_domain iteration
6102 * breaks. See update_sg_lb_stats().
6103 */
6104 if ((!groups && cpumask_test_cpu(cpu, sg_span)) ||
6105 group_balance_cpu(sg) == cpu)
6004 groups = sg; 6106 groups = sg;
6005 6107
6006 if (!first) 6108 if (!first)
@@ -6074,6 +6176,7 @@ build_sched_groups(struct sched_domain *sd, int cpu)
6074 6176
6075 cpumask_clear(sched_group_cpus(sg)); 6177 cpumask_clear(sched_group_cpus(sg));
6076 sg->sgp->power = 0; 6178 sg->sgp->power = 0;
6179 cpumask_setall(sched_group_mask(sg));
6077 6180
6078 for_each_cpu(j, span) { 6181 for_each_cpu(j, span) {
6079 if (get_group(j, sdd, NULL) != group) 6182 if (get_group(j, sdd, NULL) != group)
@@ -6115,7 +6218,7 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
6115 sg = sg->next; 6218 sg = sg->next;
6116 } while (sg != sd->groups); 6219 } while (sg != sd->groups);
6117 6220
6118 if (cpu != group_first_cpu(sg)) 6221 if (cpu != group_balance_cpu(sg))
6119 return; 6222 return;
6120 6223
6121 update_group_power(sd, cpu); 6224 update_group_power(sd, cpu);
@@ -6165,11 +6268,8 @@ int sched_domain_level_max;
6165 6268
6166static int __init setup_relax_domain_level(char *str) 6269static int __init setup_relax_domain_level(char *str)
6167{ 6270{
6168 unsigned long val; 6271 if (kstrtoint(str, 0, &default_relax_domain_level))
6169 6272 pr_warn("Unable to set relax_domain_level\n");
6170 val = simple_strtoul(str, NULL, 0);
6171 if (val < sched_domain_level_max)
6172 default_relax_domain_level = val;
6173 6273
6174 return 1; 6274 return 1;
6175} 6275}
@@ -6279,14 +6379,13 @@ static struct sched_domain_topology_level *sched_domain_topology = default_topol
6279#ifdef CONFIG_NUMA 6379#ifdef CONFIG_NUMA
6280 6380
6281static int sched_domains_numa_levels; 6381static int sched_domains_numa_levels;
6282static int sched_domains_numa_scale;
6283static int *sched_domains_numa_distance; 6382static int *sched_domains_numa_distance;
6284static struct cpumask ***sched_domains_numa_masks; 6383static struct cpumask ***sched_domains_numa_masks;
6285static int sched_domains_curr_level; 6384static int sched_domains_curr_level;
6286 6385
6287static inline int sd_local_flags(int level) 6386static inline int sd_local_flags(int level)
6288{ 6387{
6289 if (sched_domains_numa_distance[level] > REMOTE_DISTANCE) 6388 if (sched_domains_numa_distance[level] > RECLAIM_DISTANCE)
6290 return 0; 6389 return 0;
6291 6390
6292 return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE; 6391 return SD_BALANCE_EXEC | SD_BALANCE_FORK | SD_WAKE_AFFINE;
@@ -6344,6 +6443,42 @@ static const struct cpumask *sd_numa_mask(int cpu)
6344 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)]; 6443 return sched_domains_numa_masks[sched_domains_curr_level][cpu_to_node(cpu)];
6345} 6444}
6346 6445
6446static void sched_numa_warn(const char *str)
6447{
6448 static int done = false;
6449 int i,j;
6450
6451 if (done)
6452 return;
6453
6454 done = true;
6455
6456 printk(KERN_WARNING "ERROR: %s\n\n", str);
6457
6458 for (i = 0; i < nr_node_ids; i++) {
6459 printk(KERN_WARNING " ");
6460 for (j = 0; j < nr_node_ids; j++)
6461 printk(KERN_CONT "%02d ", node_distance(i,j));
6462 printk(KERN_CONT "\n");
6463 }
6464 printk(KERN_WARNING "\n");
6465}
6466
6467static bool find_numa_distance(int distance)
6468{
6469 int i;
6470
6471 if (distance == node_distance(0, 0))
6472 return true;
6473
6474 for (i = 0; i < sched_domains_numa_levels; i++) {
6475 if (sched_domains_numa_distance[i] == distance)
6476 return true;
6477 }
6478
6479 return false;
6480}
6481
6347static void sched_init_numa(void) 6482static void sched_init_numa(void)
6348{ 6483{
6349 int next_distance, curr_distance = node_distance(0, 0); 6484 int next_distance, curr_distance = node_distance(0, 0);
@@ -6351,7 +6486,6 @@ static void sched_init_numa(void)
6351 int level = 0; 6486 int level = 0;
6352 int i, j, k; 6487 int i, j, k;
6353 6488
6354 sched_domains_numa_scale = curr_distance;
6355 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL); 6489 sched_domains_numa_distance = kzalloc(sizeof(int) * nr_node_ids, GFP_KERNEL);
6356 if (!sched_domains_numa_distance) 6490 if (!sched_domains_numa_distance)
6357 return; 6491 return;
@@ -6362,23 +6496,41 @@ static void sched_init_numa(void)
6362 * 6496 *
6363 * Assumes node_distance(0,j) includes all distances in 6497 * Assumes node_distance(0,j) includes all distances in
6364 * node_distance(i,j) in order to avoid cubic time. 6498 * node_distance(i,j) in order to avoid cubic time.
6365 *
6366 * XXX: could be optimized to O(n log n) by using sort()
6367 */ 6499 */
6368 next_distance = curr_distance; 6500 next_distance = curr_distance;
6369 for (i = 0; i < nr_node_ids; i++) { 6501 for (i = 0; i < nr_node_ids; i++) {
6370 for (j = 0; j < nr_node_ids; j++) { 6502 for (j = 0; j < nr_node_ids; j++) {
6371 int distance = node_distance(0, j); 6503 for (k = 0; k < nr_node_ids; k++) {
6372 if (distance > curr_distance && 6504 int distance = node_distance(i, k);
6373 (distance < next_distance || 6505
6374 next_distance == curr_distance)) 6506 if (distance > curr_distance &&
6375 next_distance = distance; 6507 (distance < next_distance ||
6508 next_distance == curr_distance))
6509 next_distance = distance;
6510
6511 /*
6512 * While not a strong assumption it would be nice to know
6513 * about cases where if node A is connected to B, B is not
6514 * equally connected to A.
6515 */
6516 if (sched_debug() && node_distance(k, i) != distance)
6517 sched_numa_warn("Node-distance not symmetric");
6518
6519 if (sched_debug() && i && !find_numa_distance(distance))
6520 sched_numa_warn("Node-0 not representative");
6521 }
6522 if (next_distance != curr_distance) {
6523 sched_domains_numa_distance[level++] = next_distance;
6524 sched_domains_numa_levels = level;
6525 curr_distance = next_distance;
6526 } else break;
6376 } 6527 }
6377 if (next_distance != curr_distance) { 6528
6378 sched_domains_numa_distance[level++] = next_distance; 6529 /*
6379 sched_domains_numa_levels = level; 6530 * In case of sched_debug() we verify the above assumption.
6380 curr_distance = next_distance; 6531 */
6381 } else break; 6532 if (!sched_debug())
6533 break;
6382 } 6534 }
6383 /* 6535 /*
6384 * 'level' contains the number of unique distances, excluding the 6536 * 'level' contains the number of unique distances, excluding the
@@ -6403,7 +6555,7 @@ static void sched_init_numa(void)
6403 return; 6555 return;
6404 6556
6405 for (j = 0; j < nr_node_ids; j++) { 6557 for (j = 0; j < nr_node_ids; j++) {
6406 struct cpumask *mask = kzalloc_node(cpumask_size(), GFP_KERNEL, j); 6558 struct cpumask *mask = kzalloc(cpumask_size(), GFP_KERNEL);
6407 if (!mask) 6559 if (!mask)
6408 return; 6560 return;
6409 6561
@@ -6490,7 +6642,7 @@ static int __sdt_alloc(const struct cpumask *cpu_map)
6490 6642
6491 *per_cpu_ptr(sdd->sg, j) = sg; 6643 *per_cpu_ptr(sdd->sg, j) = sg;
6492 6644
6493 sgp = kzalloc_node(sizeof(struct sched_group_power), 6645 sgp = kzalloc_node(sizeof(struct sched_group_power) + cpumask_size(),
6494 GFP_KERNEL, cpu_to_node(j)); 6646 GFP_KERNEL, cpu_to_node(j));
6495 if (!sgp) 6647 if (!sgp)
6496 return -ENOMEM; 6648 return -ENOMEM;
@@ -6543,7 +6695,6 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6543 if (!sd) 6695 if (!sd)
6544 return child; 6696 return child;
6545 6697
6546 set_domain_attribute(sd, attr);
6547 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu)); 6698 cpumask_and(sched_domain_span(sd), cpu_map, tl->mask(cpu));
6548 if (child) { 6699 if (child) {
6549 sd->level = child->level + 1; 6700 sd->level = child->level + 1;
@@ -6551,6 +6702,7 @@ struct sched_domain *build_sched_domain(struct sched_domain_topology_level *tl,
6551 child->parent = sd; 6702 child->parent = sd;
6552 } 6703 }
6553 sd->child = child; 6704 sd->child = child;
6705 set_domain_attribute(sd, attr);
6554 6706
6555 return sd; 6707 return sd;
6556} 6708}
@@ -6691,7 +6843,6 @@ static int init_sched_domains(const struct cpumask *cpu_map)
6691 if (!doms_cur) 6843 if (!doms_cur)
6692 doms_cur = &fallback_doms; 6844 doms_cur = &fallback_doms;
6693 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map); 6845 cpumask_andnot(doms_cur[0], cpu_map, cpu_isolated_map);
6694 dattr_cur = NULL;
6695 err = build_sched_domains(doms_cur[0], NULL); 6846 err = build_sched_domains(doms_cur[0], NULL);
6696 register_sched_domain_sysctl(); 6847 register_sched_domain_sysctl();
6697 6848
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 940e6d17cf96..c099cc6eebe3 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2703,7 +2703,7 @@ select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flags)
2703 int want_sd = 1; 2703 int want_sd = 1;
2704 int sync = wake_flags & WF_SYNC; 2704 int sync = wake_flags & WF_SYNC;
2705 2705
2706 if (p->rt.nr_cpus_allowed == 1) 2706 if (p->nr_cpus_allowed == 1)
2707 return prev_cpu; 2707 return prev_cpu;
2708 2708
2709 if (sd_flag & SD_BALANCE_WAKE) { 2709 if (sd_flag & SD_BALANCE_WAKE) {
@@ -3503,15 +3503,22 @@ unsigned long __weak arch_scale_smt_power(struct sched_domain *sd, int cpu)
3503unsigned long scale_rt_power(int cpu) 3503unsigned long scale_rt_power(int cpu)
3504{ 3504{
3505 struct rq *rq = cpu_rq(cpu); 3505 struct rq *rq = cpu_rq(cpu);
3506 u64 total, available; 3506 u64 total, available, age_stamp, avg;
3507 3507
3508 total = sched_avg_period() + (rq->clock - rq->age_stamp); 3508 /*
3509 * Since we're reading these variables without serialization make sure
3510 * we read them once before doing sanity checks on them.
3511 */
3512 age_stamp = ACCESS_ONCE(rq->age_stamp);
3513 avg = ACCESS_ONCE(rq->rt_avg);
3514
3515 total = sched_avg_period() + (rq->clock - age_stamp);
3509 3516
3510 if (unlikely(total < rq->rt_avg)) { 3517 if (unlikely(total < avg)) {
3511 /* Ensures that power won't end up being negative */ 3518 /* Ensures that power won't end up being negative */
3512 available = 0; 3519 available = 0;
3513 } else { 3520 } else {
3514 available = total - rq->rt_avg; 3521 available = total - avg;
3515 } 3522 }
3516 3523
3517 if (unlikely((s64)total < SCHED_POWER_SCALE)) 3524 if (unlikely((s64)total < SCHED_POWER_SCALE))
@@ -3574,13 +3581,28 @@ void update_group_power(struct sched_domain *sd, int cpu)
3574 3581
3575 power = 0; 3582 power = 0;
3576 3583
3577 group = child->groups; 3584 if (child->flags & SD_OVERLAP) {
3578 do { 3585 /*
3579 power += group->sgp->power; 3586 * SD_OVERLAP domains cannot assume that child groups
3580 group = group->next; 3587 * span the current group.
3581 } while (group != child->groups); 3588 */
3582 3589
3583 sdg->sgp->power = power; 3590 for_each_cpu(cpu, sched_group_cpus(sdg))
3591 power += power_of(cpu);
3592 } else {
3593 /*
3594 * !SD_OVERLAP domains can assume that child groups
3595 * span the current group.
3596 */
3597
3598 group = child->groups;
3599 do {
3600 power += group->sgp->power;
3601 group = group->next;
3602 } while (group != child->groups);
3603 }
3604
3605 sdg->sgp->power_orig = sdg->sgp->power = power;
3584} 3606}
3585 3607
3586/* 3608/*
@@ -3610,7 +3632,7 @@ fix_small_capacity(struct sched_domain *sd, struct sched_group *group)
3610 3632
3611/** 3633/**
3612 * update_sg_lb_stats - Update sched_group's statistics for load balancing. 3634 * update_sg_lb_stats - Update sched_group's statistics for load balancing.
3613 * @sd: The sched_domain whose statistics are to be updated. 3635 * @env: The load balancing environment.
3614 * @group: sched_group whose statistics are to be updated. 3636 * @group: sched_group whose statistics are to be updated.
3615 * @load_idx: Load index of sched_domain of this_cpu for load calc. 3637 * @load_idx: Load index of sched_domain of this_cpu for load calc.
3616 * @local_group: Does group contain this_cpu. 3638 * @local_group: Does group contain this_cpu.
@@ -3630,7 +3652,7 @@ static inline void update_sg_lb_stats(struct lb_env *env,
3630 int i; 3652 int i;
3631 3653
3632 if (local_group) 3654 if (local_group)
3633 balance_cpu = group_first_cpu(group); 3655 balance_cpu = group_balance_cpu(group);
3634 3656
3635 /* Tally up the load of all CPUs in the group */ 3657 /* Tally up the load of all CPUs in the group */
3636 max_cpu_load = 0; 3658 max_cpu_load = 0;
@@ -3645,7 +3667,8 @@ static inline void update_sg_lb_stats(struct lb_env *env,
3645 3667
3646 /* Bias balancing toward cpus of our domain */ 3668 /* Bias balancing toward cpus of our domain */
3647 if (local_group) { 3669 if (local_group) {
3648 if (idle_cpu(i) && !first_idle_cpu) { 3670 if (idle_cpu(i) && !first_idle_cpu &&
3671 cpumask_test_cpu(i, sched_group_mask(group))) {
3649 first_idle_cpu = 1; 3672 first_idle_cpu = 1;
3650 balance_cpu = i; 3673 balance_cpu = i;
3651 } 3674 }
@@ -3719,11 +3742,10 @@ static inline void update_sg_lb_stats(struct lb_env *env,
3719 3742
3720/** 3743/**
3721 * update_sd_pick_busiest - return 1 on busiest group 3744 * update_sd_pick_busiest - return 1 on busiest group
3722 * @sd: sched_domain whose statistics are to be checked 3745 * @env: The load balancing environment.
3723 * @sds: sched_domain statistics 3746 * @sds: sched_domain statistics
3724 * @sg: sched_group candidate to be checked for being the busiest 3747 * @sg: sched_group candidate to be checked for being the busiest
3725 * @sgs: sched_group statistics 3748 * @sgs: sched_group statistics
3726 * @this_cpu: the current cpu
3727 * 3749 *
3728 * Determine if @sg is a busier group than the previously selected 3750 * Determine if @sg is a busier group than the previously selected
3729 * busiest group. 3751 * busiest group.
@@ -3761,9 +3783,7 @@ static bool update_sd_pick_busiest(struct lb_env *env,
3761 3783
3762/** 3784/**
3763 * update_sd_lb_stats - Update sched_domain's statistics for load balancing. 3785 * update_sd_lb_stats - Update sched_domain's statistics for load balancing.
3764 * @sd: sched_domain whose statistics are to be updated. 3786 * @env: The load balancing environment.
3765 * @this_cpu: Cpu for which load balance is currently performed.
3766 * @idle: Idle status of this_cpu
3767 * @cpus: Set of cpus considered for load balancing. 3787 * @cpus: Set of cpus considered for load balancing.
3768 * @balance: Should we balance. 3788 * @balance: Should we balance.
3769 * @sds: variable to hold the statistics for this sched_domain. 3789 * @sds: variable to hold the statistics for this sched_domain.
@@ -3852,10 +3872,8 @@ static inline void update_sd_lb_stats(struct lb_env *env,
3852 * Returns 1 when packing is required and a task should be moved to 3872 * Returns 1 when packing is required and a task should be moved to
3853 * this CPU. The amount of the imbalance is returned in *imbalance. 3873 * this CPU. The amount of the imbalance is returned in *imbalance.
3854 * 3874 *
3855 * @sd: The sched_domain whose packing is to be checked. 3875 * @env: The load balancing environment.
3856 * @sds: Statistics of the sched_domain which is to be packed 3876 * @sds: Statistics of the sched_domain which is to be packed
3857 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
3858 * @imbalance: returns amount of imbalanced due to packing.
3859 */ 3877 */
3860static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds) 3878static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
3861{ 3879{
@@ -3881,9 +3899,8 @@ static int check_asym_packing(struct lb_env *env, struct sd_lb_stats *sds)
3881 * fix_small_imbalance - Calculate the minor imbalance that exists 3899 * fix_small_imbalance - Calculate the minor imbalance that exists
3882 * amongst the groups of a sched_domain, during 3900 * amongst the groups of a sched_domain, during
3883 * load balancing. 3901 * load balancing.
3902 * @env: The load balancing environment.
3884 * @sds: Statistics of the sched_domain whose imbalance is to be calculated. 3903 * @sds: Statistics of the sched_domain whose imbalance is to be calculated.
3885 * @this_cpu: The cpu at whose sched_domain we're performing load-balance.
3886 * @imbalance: Variable to store the imbalance.
3887 */ 3904 */
3888static inline 3905static inline
3889void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds) 3906void fix_small_imbalance(struct lb_env *env, struct sd_lb_stats *sds)
@@ -4026,11 +4043,7 @@ static inline void calculate_imbalance(struct lb_env *env, struct sd_lb_stats *s
4026 * Also calculates the amount of weighted load which should be moved 4043 * Also calculates the amount of weighted load which should be moved
4027 * to restore balance. 4044 * to restore balance.
4028 * 4045 *
4029 * @sd: The sched_domain whose busiest group is to be returned. 4046 * @env: The load balancing environment.
4030 * @this_cpu: The cpu for which load balancing is currently being performed.
4031 * @imbalance: Variable which stores amount of weighted load which should
4032 * be moved to restore balance/put a group to idle.
4033 * @idle: The idle status of this_cpu.
4034 * @cpus: The set of CPUs under consideration for load-balancing. 4047 * @cpus: The set of CPUs under consideration for load-balancing.
4035 * @balance: Pointer to a variable indicating if this_cpu 4048 * @balance: Pointer to a variable indicating if this_cpu
4036 * is the appropriate cpu to perform load balancing at this_level. 4049 * is the appropriate cpu to perform load balancing at this_level.
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index c5565c3c515f..573e1ca01102 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -274,13 +274,16 @@ static void update_rt_migration(struct rt_rq *rt_rq)
274 274
275static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 275static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
276{ 276{
277 struct task_struct *p;
278
277 if (!rt_entity_is_task(rt_se)) 279 if (!rt_entity_is_task(rt_se))
278 return; 280 return;
279 281
282 p = rt_task_of(rt_se);
280 rt_rq = &rq_of_rt_rq(rt_rq)->rt; 283 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
281 284
282 rt_rq->rt_nr_total++; 285 rt_rq->rt_nr_total++;
283 if (rt_se->nr_cpus_allowed > 1) 286 if (p->nr_cpus_allowed > 1)
284 rt_rq->rt_nr_migratory++; 287 rt_rq->rt_nr_migratory++;
285 288
286 update_rt_migration(rt_rq); 289 update_rt_migration(rt_rq);
@@ -288,13 +291,16 @@ static void inc_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
288 291
289static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq) 292static void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
290{ 293{
294 struct task_struct *p;
295
291 if (!rt_entity_is_task(rt_se)) 296 if (!rt_entity_is_task(rt_se))
292 return; 297 return;
293 298
299 p = rt_task_of(rt_se);
294 rt_rq = &rq_of_rt_rq(rt_rq)->rt; 300 rt_rq = &rq_of_rt_rq(rt_rq)->rt;
295 301
296 rt_rq->rt_nr_total--; 302 rt_rq->rt_nr_total--;
297 if (rt_se->nr_cpus_allowed > 1) 303 if (p->nr_cpus_allowed > 1)
298 rt_rq->rt_nr_migratory--; 304 rt_rq->rt_nr_migratory--;
299 305
300 update_rt_migration(rt_rq); 306 update_rt_migration(rt_rq);
@@ -1161,7 +1167,7 @@ enqueue_task_rt(struct rq *rq, struct task_struct *p, int flags)
1161 1167
1162 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD); 1168 enqueue_rt_entity(rt_se, flags & ENQUEUE_HEAD);
1163 1169
1164 if (!task_current(rq, p) && p->rt.nr_cpus_allowed > 1) 1170 if (!task_current(rq, p) && p->nr_cpus_allowed > 1)
1165 enqueue_pushable_task(rq, p); 1171 enqueue_pushable_task(rq, p);
1166 1172
1167 inc_nr_running(rq); 1173 inc_nr_running(rq);
@@ -1225,7 +1231,7 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1225 1231
1226 cpu = task_cpu(p); 1232 cpu = task_cpu(p);
1227 1233
1228 if (p->rt.nr_cpus_allowed == 1) 1234 if (p->nr_cpus_allowed == 1)
1229 goto out; 1235 goto out;
1230 1236
1231 /* For anything but wake ups, just return the task_cpu */ 1237 /* For anything but wake ups, just return the task_cpu */
@@ -1260,9 +1266,9 @@ select_task_rq_rt(struct task_struct *p, int sd_flag, int flags)
1260 * will have to sort it out. 1266 * will have to sort it out.
1261 */ 1267 */
1262 if (curr && unlikely(rt_task(curr)) && 1268 if (curr && unlikely(rt_task(curr)) &&
1263 (curr->rt.nr_cpus_allowed < 2 || 1269 (curr->nr_cpus_allowed < 2 ||
1264 curr->prio <= p->prio) && 1270 curr->prio <= p->prio) &&
1265 (p->rt.nr_cpus_allowed > 1)) { 1271 (p->nr_cpus_allowed > 1)) {
1266 int target = find_lowest_rq(p); 1272 int target = find_lowest_rq(p);
1267 1273
1268 if (target != -1) 1274 if (target != -1)
@@ -1276,10 +1282,10 @@ out:
1276 1282
1277static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p) 1283static void check_preempt_equal_prio(struct rq *rq, struct task_struct *p)
1278{ 1284{
1279 if (rq->curr->rt.nr_cpus_allowed == 1) 1285 if (rq->curr->nr_cpus_allowed == 1)
1280 return; 1286 return;
1281 1287
1282 if (p->rt.nr_cpus_allowed != 1 1288 if (p->nr_cpus_allowed != 1
1283 && cpupri_find(&rq->rd->cpupri, p, NULL)) 1289 && cpupri_find(&rq->rd->cpupri, p, NULL))
1284 return; 1290 return;
1285 1291
@@ -1395,7 +1401,7 @@ static void put_prev_task_rt(struct rq *rq, struct task_struct *p)
1395 * The previous task needs to be made eligible for pushing 1401 * The previous task needs to be made eligible for pushing
1396 * if it is still active 1402 * if it is still active
1397 */ 1403 */
1398 if (on_rt_rq(&p->rt) && p->rt.nr_cpus_allowed > 1) 1404 if (on_rt_rq(&p->rt) && p->nr_cpus_allowed > 1)
1399 enqueue_pushable_task(rq, p); 1405 enqueue_pushable_task(rq, p);
1400} 1406}
1401 1407
@@ -1408,7 +1414,7 @@ static int pick_rt_task(struct rq *rq, struct task_struct *p, int cpu)
1408{ 1414{
1409 if (!task_running(rq, p) && 1415 if (!task_running(rq, p) &&
1410 (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) && 1416 (cpu < 0 || cpumask_test_cpu(cpu, tsk_cpus_allowed(p))) &&
1411 (p->rt.nr_cpus_allowed > 1)) 1417 (p->nr_cpus_allowed > 1))
1412 return 1; 1418 return 1;
1413 return 0; 1419 return 0;
1414} 1420}
@@ -1464,7 +1470,7 @@ static int find_lowest_rq(struct task_struct *task)
1464 if (unlikely(!lowest_mask)) 1470 if (unlikely(!lowest_mask))
1465 return -1; 1471 return -1;
1466 1472
1467 if (task->rt.nr_cpus_allowed == 1) 1473 if (task->nr_cpus_allowed == 1)
1468 return -1; /* No other targets possible */ 1474 return -1; /* No other targets possible */
1469 1475
1470 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask)) 1476 if (!cpupri_find(&task_rq(task)->rd->cpupri, task, lowest_mask))
@@ -1556,7 +1562,7 @@ static struct rq *find_lock_lowest_rq(struct task_struct *task, struct rq *rq)
1556 task_running(rq, task) || 1562 task_running(rq, task) ||
1557 !task->on_rq)) { 1563 !task->on_rq)) {
1558 1564
1559 raw_spin_unlock(&lowest_rq->lock); 1565 double_unlock_balance(rq, lowest_rq);
1560 lowest_rq = NULL; 1566 lowest_rq = NULL;
1561 break; 1567 break;
1562 } 1568 }
@@ -1586,7 +1592,7 @@ static struct task_struct *pick_next_pushable_task(struct rq *rq)
1586 1592
1587 BUG_ON(rq->cpu != task_cpu(p)); 1593 BUG_ON(rq->cpu != task_cpu(p));
1588 BUG_ON(task_current(rq, p)); 1594 BUG_ON(task_current(rq, p));
1589 BUG_ON(p->rt.nr_cpus_allowed <= 1); 1595 BUG_ON(p->nr_cpus_allowed <= 1);
1590 1596
1591 BUG_ON(!p->on_rq); 1597 BUG_ON(!p->on_rq);
1592 BUG_ON(!rt_task(p)); 1598 BUG_ON(!rt_task(p));
@@ -1793,9 +1799,9 @@ static void task_woken_rt(struct rq *rq, struct task_struct *p)
1793 if (!task_running(rq, p) && 1799 if (!task_running(rq, p) &&
1794 !test_tsk_need_resched(rq->curr) && 1800 !test_tsk_need_resched(rq->curr) &&
1795 has_pushable_tasks(rq) && 1801 has_pushable_tasks(rq) &&
1796 p->rt.nr_cpus_allowed > 1 && 1802 p->nr_cpus_allowed > 1 &&
1797 rt_task(rq->curr) && 1803 rt_task(rq->curr) &&
1798 (rq->curr->rt.nr_cpus_allowed < 2 || 1804 (rq->curr->nr_cpus_allowed < 2 ||
1799 rq->curr->prio <= p->prio)) 1805 rq->curr->prio <= p->prio))
1800 push_rt_tasks(rq); 1806 push_rt_tasks(rq);
1801} 1807}
@@ -1817,7 +1823,7 @@ static void set_cpus_allowed_rt(struct task_struct *p,
1817 * Only update if the process changes its state from whether it 1823 * Only update if the process changes its state from whether it
1818 * can migrate or not. 1824 * can migrate or not.
1819 */ 1825 */
1820 if ((p->rt.nr_cpus_allowed > 1) == (weight > 1)) 1826 if ((p->nr_cpus_allowed > 1) == (weight > 1))
1821 return; 1827 return;
1822 1828
1823 rq = task_rq(p); 1829 rq = task_rq(p);
@@ -1979,6 +1985,8 @@ static void watchdog(struct rq *rq, struct task_struct *p)
1979 1985
1980static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) 1986static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1981{ 1987{
1988 struct sched_rt_entity *rt_se = &p->rt;
1989
1982 update_curr_rt(rq); 1990 update_curr_rt(rq);
1983 1991
1984 watchdog(rq, p); 1992 watchdog(rq, p);
@@ -1996,12 +2004,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued)
1996 p->rt.time_slice = RR_TIMESLICE; 2004 p->rt.time_slice = RR_TIMESLICE;
1997 2005
1998 /* 2006 /*
1999 * Requeue to the end of queue if we are not the only element 2007 * Requeue to the end of queue if we (and all of our ancestors) are the
2000 * on the queue: 2008 * only element on the queue
2001 */ 2009 */
2002 if (p->rt.run_list.prev != p->rt.run_list.next) { 2010 for_each_sched_rt_entity(rt_se) {
2003 requeue_task_rt(rq, p, 0); 2011 if (rt_se->run_list.prev != rt_se->run_list.next) {
2004 set_tsk_need_resched(p); 2012 requeue_task_rt(rq, p, 0);
2013 set_tsk_need_resched(p);
2014 return;
2015 }
2005 } 2016 }
2006} 2017}
2007 2018
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ba9dccfd24ce..6d52cea7f33d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -526,6 +526,8 @@ static inline struct sched_domain *highest_flag_domain(int cpu, int flag)
526DECLARE_PER_CPU(struct sched_domain *, sd_llc); 526DECLARE_PER_CPU(struct sched_domain *, sd_llc);
527DECLARE_PER_CPU(int, sd_llc_id); 527DECLARE_PER_CPU(int, sd_llc_id);
528 528
529extern int group_balance_cpu(struct sched_group *sg);
530
529#endif /* CONFIG_SMP */ 531#endif /* CONFIG_SMP */
530 532
531#include "stats.h" 533#include "stats.h"
diff --git a/kernel/signal.c b/kernel/signal.c
index f7b418217633..677102789cf2 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -1656,19 +1656,18 @@ bool do_notify_parent(struct task_struct *tsk, int sig)
1656 info.si_signo = sig; 1656 info.si_signo = sig;
1657 info.si_errno = 0; 1657 info.si_errno = 0;
1658 /* 1658 /*
1659 * we are under tasklist_lock here so our parent is tied to 1659 * We are under tasklist_lock here so our parent is tied to
1660 * us and cannot exit and release its namespace. 1660 * us and cannot change.
1661 * 1661 *
1662 * the only it can is to switch its nsproxy with sys_unshare, 1662 * task_active_pid_ns will always return the same pid namespace
1663 * bu uncharing pid namespaces is not allowed, so we'll always 1663 * until a task passes through release_task.
1664 * see relevant namespace
1665 * 1664 *
1666 * write_lock() currently calls preempt_disable() which is the 1665 * write_lock() currently calls preempt_disable() which is the
1667 * same as rcu_read_lock(), but according to Oleg, this is not 1666 * same as rcu_read_lock(), but according to Oleg, this is not
1668 * correct to rely on this 1667 * correct to rely on this
1669 */ 1668 */
1670 rcu_read_lock(); 1669 rcu_read_lock();
1671 info.si_pid = task_pid_nr_ns(tsk, tsk->parent->nsproxy->pid_ns); 1670 info.si_pid = task_pid_nr_ns(tsk, task_active_pid_ns(tsk->parent));
1672 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns), 1671 info.si_uid = from_kuid_munged(task_cred_xxx(tsk->parent, user_ns),
1673 task_uid(tsk)); 1672 task_uid(tsk));
1674 rcu_read_unlock(); 1673 rcu_read_unlock();
@@ -2369,24 +2368,34 @@ relock:
2369} 2368}
2370 2369
2371/** 2370/**
2372 * block_sigmask - add @ka's signal mask to current->blocked 2371 * signal_delivered -
2373 * @ka: action for @signr 2372 * @sig: number of signal being delivered
2374 * @signr: signal that has been successfully delivered 2373 * @info: siginfo_t of signal being delivered
2374 * @ka: sigaction setting that chose the handler
2375 * @regs: user register state
2376 * @stepping: nonzero if debugger single-step or block-step in use
2375 * 2377 *
2376 * This function should be called when a signal has succesfully been 2378 * This function should be called when a signal has succesfully been
2377 * delivered. It adds the mask of signals for @ka to current->blocked 2379 * delivered. It updates the blocked signals accordingly (@ka->sa.sa_mask
2378 * so that they are blocked during the execution of the signal 2380 * is always blocked, and the signal itself is blocked unless %SA_NODEFER
2379 * handler. In addition, @signr will be blocked unless %SA_NODEFER is 2381 * is set in @ka->sa.sa_flags. Tracing is notified.
2380 * set in @ka->sa.sa_flags.
2381 */ 2382 */
2382void block_sigmask(struct k_sigaction *ka, int signr) 2383void signal_delivered(int sig, siginfo_t *info, struct k_sigaction *ka,
2384 struct pt_regs *regs, int stepping)
2383{ 2385{
2384 sigset_t blocked; 2386 sigset_t blocked;
2385 2387
2388 /* A signal was successfully delivered, and the
2389 saved sigmask was stored on the signal frame,
2390 and will be restored by sigreturn. So we can
2391 simply clear the restore sigmask flag. */
2392 clear_restore_sigmask();
2393
2386 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask); 2394 sigorsets(&blocked, &current->blocked, &ka->sa.sa_mask);
2387 if (!(ka->sa.sa_flags & SA_NODEFER)) 2395 if (!(ka->sa.sa_flags & SA_NODEFER))
2388 sigaddset(&blocked, signr); 2396 sigaddset(&blocked, sig);
2389 set_current_blocked(&blocked); 2397 set_current_blocked(&blocked);
2398 tracehook_signal_handler(sig, info, ka, regs, stepping);
2390} 2399}
2391 2400
2392/* 2401/*
@@ -2519,7 +2528,16 @@ static void __set_task_blocked(struct task_struct *tsk, const sigset_t *newset)
2519 * It is wrong to change ->blocked directly, this helper should be used 2528 * It is wrong to change ->blocked directly, this helper should be used
2520 * to ensure the process can't miss a shared signal we are going to block. 2529 * to ensure the process can't miss a shared signal we are going to block.
2521 */ 2530 */
2522void set_current_blocked(const sigset_t *newset) 2531void set_current_blocked(sigset_t *newset)
2532{
2533 struct task_struct *tsk = current;
2534 sigdelsetmask(newset, sigmask(SIGKILL) | sigmask(SIGSTOP));
2535 spin_lock_irq(&tsk->sighand->siglock);
2536 __set_task_blocked(tsk, newset);
2537 spin_unlock_irq(&tsk->sighand->siglock);
2538}
2539
2540void __set_current_blocked(const sigset_t *newset)
2523{ 2541{
2524 struct task_struct *tsk = current; 2542 struct task_struct *tsk = current;
2525 2543
@@ -2559,7 +2577,7 @@ int sigprocmask(int how, sigset_t *set, sigset_t *oldset)
2559 return -EINVAL; 2577 return -EINVAL;
2560 } 2578 }
2561 2579
2562 set_current_blocked(&newset); 2580 __set_current_blocked(&newset);
2563 return 0; 2581 return 0;
2564} 2582}
2565 2583
@@ -3133,7 +3151,7 @@ SYSCALL_DEFINE3(sigprocmask, int, how, old_sigset_t __user *, nset,
3133 return -EINVAL; 3151 return -EINVAL;
3134 } 3152 }
3135 3153
3136 set_current_blocked(&new_blocked); 3154 __set_current_blocked(&new_blocked);
3137 } 3155 }
3138 3156
3139 if (oset) { 3157 if (oset) {
@@ -3197,7 +3215,6 @@ SYSCALL_DEFINE1(ssetmask, int, newmask)
3197 int old = current->blocked.sig[0]; 3215 int old = current->blocked.sig[0];
3198 sigset_t newset; 3216 sigset_t newset;
3199 3217
3200 siginitset(&newset, newmask & ~(sigmask(SIGKILL) | sigmask(SIGSTOP)));
3201 set_current_blocked(&newset); 3218 set_current_blocked(&newset);
3202 3219
3203 return old; 3220 return old;
@@ -3236,11 +3253,8 @@ SYSCALL_DEFINE0(pause)
3236 3253
3237#endif 3254#endif
3238 3255
3239#ifdef HAVE_SET_RESTORE_SIGMASK
3240int sigsuspend(sigset_t *set) 3256int sigsuspend(sigset_t *set)
3241{ 3257{
3242 sigdelsetmask(set, sigmask(SIGKILL)|sigmask(SIGSTOP));
3243
3244 current->saved_sigmask = current->blocked; 3258 current->saved_sigmask = current->blocked;
3245 set_current_blocked(set); 3259 set_current_blocked(set);
3246 3260
@@ -3249,7 +3263,6 @@ int sigsuspend(sigset_t *set)
3249 set_restore_sigmask(); 3263 set_restore_sigmask();
3250 return -ERESTARTNOHAND; 3264 return -ERESTARTNOHAND;
3251} 3265}
3252#endif
3253 3266
3254#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND 3267#ifdef __ARCH_WANT_SYS_RT_SIGSUSPEND
3255/** 3268/**
diff --git a/kernel/smpboot.c b/kernel/smpboot.c
index e1a797e028a3..98f60c5caa1b 100644
--- a/kernel/smpboot.c
+++ b/kernel/smpboot.c
@@ -31,6 +31,12 @@ void __init idle_thread_set_boot_cpu(void)
31 per_cpu(idle_threads, smp_processor_id()) = current; 31 per_cpu(idle_threads, smp_processor_id()) = current;
32} 32}
33 33
34/**
35 * idle_init - Initialize the idle thread for a cpu
36 * @cpu: The cpu for which the idle thread should be initialized
37 *
38 * Creates the thread if it does not exist.
39 */
34static inline void idle_init(unsigned int cpu) 40static inline void idle_init(unsigned int cpu)
35{ 41{
36 struct task_struct *tsk = per_cpu(idle_threads, cpu); 42 struct task_struct *tsk = per_cpu(idle_threads, cpu);
@@ -45,17 +51,16 @@ static inline void idle_init(unsigned int cpu)
45} 51}
46 52
47/** 53/**
48 * idle_thread_init - Initialize the idle thread for a cpu 54 * idle_threads_init - Initialize idle threads for all cpus
49 * @cpu: The cpu for which the idle thread should be initialized
50 *
51 * Creates the thread if it does not exist.
52 */ 55 */
53void __init idle_threads_init(void) 56void __init idle_threads_init(void)
54{ 57{
55 unsigned int cpu; 58 unsigned int cpu, boot_cpu;
59
60 boot_cpu = smp_processor_id();
56 61
57 for_each_possible_cpu(cpu) { 62 for_each_possible_cpu(cpu) {
58 if (cpu != smp_processor_id()) 63 if (cpu != boot_cpu)
59 idle_init(cpu); 64 idle_init(cpu);
60 } 65 }
61} 66}
diff --git a/kernel/sys.c b/kernel/sys.c
index 6df42624e454..e0c8ffc50d7f 100644
--- a/kernel/sys.c
+++ b/kernel/sys.c
@@ -36,6 +36,8 @@
36#include <linux/personality.h> 36#include <linux/personality.h>
37#include <linux/ptrace.h> 37#include <linux/ptrace.h>
38#include <linux/fs_struct.h> 38#include <linux/fs_struct.h>
39#include <linux/file.h>
40#include <linux/mount.h>
39#include <linux/gfp.h> 41#include <linux/gfp.h>
40#include <linux/syscore_ops.h> 42#include <linux/syscore_ops.h>
41#include <linux/version.h> 43#include <linux/version.h>
@@ -1378,8 +1380,8 @@ SYSCALL_DEFINE2(sethostname, char __user *, name, int, len)
1378 memcpy(u->nodename, tmp, len); 1380 memcpy(u->nodename, tmp, len);
1379 memset(u->nodename + len, 0, sizeof(u->nodename) - len); 1381 memset(u->nodename + len, 0, sizeof(u->nodename) - len);
1380 errno = 0; 1382 errno = 0;
1383 uts_proc_notify(UTS_PROC_HOSTNAME);
1381 } 1384 }
1382 uts_proc_notify(UTS_PROC_HOSTNAME);
1383 up_write(&uts_sem); 1385 up_write(&uts_sem);
1384 return errno; 1386 return errno;
1385} 1387}
@@ -1429,8 +1431,8 @@ SYSCALL_DEFINE2(setdomainname, char __user *, name, int, len)
1429 memcpy(u->domainname, tmp, len); 1431 memcpy(u->domainname, tmp, len);
1430 memset(u->domainname + len, 0, sizeof(u->domainname) - len); 1432 memset(u->domainname + len, 0, sizeof(u->domainname) - len);
1431 errno = 0; 1433 errno = 0;
1434 uts_proc_notify(UTS_PROC_DOMAINNAME);
1432 } 1435 }
1433 uts_proc_notify(UTS_PROC_DOMAINNAME);
1434 up_write(&uts_sem); 1436 up_write(&uts_sem);
1435 return errno; 1437 return errno;
1436} 1438}
@@ -1784,77 +1786,101 @@ SYSCALL_DEFINE1(umask, int, mask)
1784} 1786}
1785 1787
1786#ifdef CONFIG_CHECKPOINT_RESTORE 1788#ifdef CONFIG_CHECKPOINT_RESTORE
1789static int prctl_set_mm_exe_file(struct mm_struct *mm, unsigned int fd)
1790{
1791 struct vm_area_struct *vma;
1792 struct file *exe_file;
1793 struct dentry *dentry;
1794 int err;
1795
1796 exe_file = fget(fd);
1797 if (!exe_file)
1798 return -EBADF;
1799
1800 dentry = exe_file->f_path.dentry;
1801
1802 /*
1803 * Because the original mm->exe_file points to executable file, make
1804 * sure that this one is executable as well, to avoid breaking an
1805 * overall picture.
1806 */
1807 err = -EACCES;
1808 if (!S_ISREG(dentry->d_inode->i_mode) ||
1809 exe_file->f_path.mnt->mnt_flags & MNT_NOEXEC)
1810 goto exit;
1811
1812 err = inode_permission(dentry->d_inode, MAY_EXEC);
1813 if (err)
1814 goto exit;
1815
1816 down_write(&mm->mmap_sem);
1817
1818 /*
1819 * Forbid mm->exe_file change if there are mapped other files.
1820 */
1821 err = -EBUSY;
1822 for (vma = mm->mmap; vma; vma = vma->vm_next) {
1823 if (vma->vm_file && !path_equal(&vma->vm_file->f_path,
1824 &exe_file->f_path))
1825 goto exit_unlock;
1826 }
1827
1828 /*
1829 * The symlink can be changed only once, just to disallow arbitrary
1830 * transitions malicious software might bring in. This means one
1831 * could make a snapshot over all processes running and monitor
1832 * /proc/pid/exe changes to notice unusual activity if needed.
1833 */
1834 err = -EPERM;
1835 if (test_and_set_bit(MMF_EXE_FILE_CHANGED, &mm->flags))
1836 goto exit_unlock;
1837
1838 set_mm_exe_file(mm, exe_file);
1839exit_unlock:
1840 up_write(&mm->mmap_sem);
1841
1842exit:
1843 fput(exe_file);
1844 return err;
1845}
1846
1787static int prctl_set_mm(int opt, unsigned long addr, 1847static int prctl_set_mm(int opt, unsigned long addr,
1788 unsigned long arg4, unsigned long arg5) 1848 unsigned long arg4, unsigned long arg5)
1789{ 1849{
1790 unsigned long rlim = rlimit(RLIMIT_DATA); 1850 unsigned long rlim = rlimit(RLIMIT_DATA);
1791 unsigned long vm_req_flags;
1792 unsigned long vm_bad_flags;
1793 struct vm_area_struct *vma;
1794 int error = 0;
1795 struct mm_struct *mm = current->mm; 1851 struct mm_struct *mm = current->mm;
1852 struct vm_area_struct *vma;
1853 int error;
1796 1854
1797 if (arg4 | arg5) 1855 if (arg5 || (arg4 && opt != PR_SET_MM_AUXV))
1798 return -EINVAL; 1856 return -EINVAL;
1799 1857
1800 if (!capable(CAP_SYS_RESOURCE)) 1858 if (!capable(CAP_SYS_RESOURCE))
1801 return -EPERM; 1859 return -EPERM;
1802 1860
1803 if (addr >= TASK_SIZE) 1861 if (opt == PR_SET_MM_EXE_FILE)
1862 return prctl_set_mm_exe_file(mm, (unsigned int)addr);
1863
1864 if (addr >= TASK_SIZE || addr < mmap_min_addr)
1804 return -EINVAL; 1865 return -EINVAL;
1805 1866
1867 error = -EINVAL;
1868
1806 down_read(&mm->mmap_sem); 1869 down_read(&mm->mmap_sem);
1807 vma = find_vma(mm, addr); 1870 vma = find_vma(mm, addr);
1808 1871
1809 if (opt != PR_SET_MM_START_BRK && opt != PR_SET_MM_BRK) {
1810 /* It must be existing VMA */
1811 if (!vma || vma->vm_start > addr)
1812 goto out;
1813 }
1814
1815 error = -EINVAL;
1816 switch (opt) { 1872 switch (opt) {
1817 case PR_SET_MM_START_CODE: 1873 case PR_SET_MM_START_CODE:
1874 mm->start_code = addr;
1875 break;
1818 case PR_SET_MM_END_CODE: 1876 case PR_SET_MM_END_CODE:
1819 vm_req_flags = VM_READ | VM_EXEC; 1877 mm->end_code = addr;
1820 vm_bad_flags = VM_WRITE | VM_MAYSHARE;
1821
1822 if ((vma->vm_flags & vm_req_flags) != vm_req_flags ||
1823 (vma->vm_flags & vm_bad_flags))
1824 goto out;
1825
1826 if (opt == PR_SET_MM_START_CODE)
1827 mm->start_code = addr;
1828 else
1829 mm->end_code = addr;
1830 break; 1878 break;
1831
1832 case PR_SET_MM_START_DATA: 1879 case PR_SET_MM_START_DATA:
1833 case PR_SET_MM_END_DATA: 1880 mm->start_data = addr;
1834 vm_req_flags = VM_READ | VM_WRITE;
1835 vm_bad_flags = VM_EXEC | VM_MAYSHARE;
1836
1837 if ((vma->vm_flags & vm_req_flags) != vm_req_flags ||
1838 (vma->vm_flags & vm_bad_flags))
1839 goto out;
1840
1841 if (opt == PR_SET_MM_START_DATA)
1842 mm->start_data = addr;
1843 else
1844 mm->end_data = addr;
1845 break; 1881 break;
1846 1882 case PR_SET_MM_END_DATA:
1847 case PR_SET_MM_START_STACK: 1883 mm->end_data = addr;
1848
1849#ifdef CONFIG_STACK_GROWSUP
1850 vm_req_flags = VM_READ | VM_WRITE | VM_GROWSUP;
1851#else
1852 vm_req_flags = VM_READ | VM_WRITE | VM_GROWSDOWN;
1853#endif
1854 if ((vma->vm_flags & vm_req_flags) != vm_req_flags)
1855 goto out;
1856
1857 mm->start_stack = addr;
1858 break; 1884 break;
1859 1885
1860 case PR_SET_MM_START_BRK: 1886 case PR_SET_MM_START_BRK:
@@ -1881,24 +1907,89 @@ static int prctl_set_mm(int opt, unsigned long addr,
1881 mm->brk = addr; 1907 mm->brk = addr;
1882 break; 1908 break;
1883 1909
1910 /*
1911 * If command line arguments and environment
1912 * are placed somewhere else on stack, we can
1913 * set them up here, ARG_START/END to setup
1914 * command line argumets and ENV_START/END
1915 * for environment.
1916 */
1917 case PR_SET_MM_START_STACK:
1918 case PR_SET_MM_ARG_START:
1919 case PR_SET_MM_ARG_END:
1920 case PR_SET_MM_ENV_START:
1921 case PR_SET_MM_ENV_END:
1922 if (!vma) {
1923 error = -EFAULT;
1924 goto out;
1925 }
1926 if (opt == PR_SET_MM_START_STACK)
1927 mm->start_stack = addr;
1928 else if (opt == PR_SET_MM_ARG_START)
1929 mm->arg_start = addr;
1930 else if (opt == PR_SET_MM_ARG_END)
1931 mm->arg_end = addr;
1932 else if (opt == PR_SET_MM_ENV_START)
1933 mm->env_start = addr;
1934 else if (opt == PR_SET_MM_ENV_END)
1935 mm->env_end = addr;
1936 break;
1937
1938 /*
1939 * This doesn't move auxiliary vector itself
1940 * since it's pinned to mm_struct, but allow
1941 * to fill vector with new values. It's up
1942 * to a caller to provide sane values here
1943 * otherwise user space tools which use this
1944 * vector might be unhappy.
1945 */
1946 case PR_SET_MM_AUXV: {
1947 unsigned long user_auxv[AT_VECTOR_SIZE];
1948
1949 if (arg4 > sizeof(user_auxv))
1950 goto out;
1951 up_read(&mm->mmap_sem);
1952
1953 if (copy_from_user(user_auxv, (const void __user *)addr, arg4))
1954 return -EFAULT;
1955
1956 /* Make sure the last entry is always AT_NULL */
1957 user_auxv[AT_VECTOR_SIZE - 2] = 0;
1958 user_auxv[AT_VECTOR_SIZE - 1] = 0;
1959
1960 BUILD_BUG_ON(sizeof(user_auxv) != sizeof(mm->saved_auxv));
1961
1962 task_lock(current);
1963 memcpy(mm->saved_auxv, user_auxv, arg4);
1964 task_unlock(current);
1965
1966 return 0;
1967 }
1884 default: 1968 default:
1885 error = -EINVAL;
1886 goto out; 1969 goto out;
1887 } 1970 }
1888 1971
1889 error = 0; 1972 error = 0;
1890
1891out: 1973out:
1892 up_read(&mm->mmap_sem); 1974 up_read(&mm->mmap_sem);
1893
1894 return error; 1975 return error;
1895} 1976}
1977
1978static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
1979{
1980 return put_user(me->clear_child_tid, tid_addr);
1981}
1982
1896#else /* CONFIG_CHECKPOINT_RESTORE */ 1983#else /* CONFIG_CHECKPOINT_RESTORE */
1897static int prctl_set_mm(int opt, unsigned long addr, 1984static int prctl_set_mm(int opt, unsigned long addr,
1898 unsigned long arg4, unsigned long arg5) 1985 unsigned long arg4, unsigned long arg5)
1899{ 1986{
1900 return -EINVAL; 1987 return -EINVAL;
1901} 1988}
1989static int prctl_get_tid_address(struct task_struct *me, int __user **tid_addr)
1990{
1991 return -EINVAL;
1992}
1902#endif 1993#endif
1903 1994
1904SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3, 1995SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
@@ -2053,6 +2144,9 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
2053 case PR_SET_MM: 2144 case PR_SET_MM:
2054 error = prctl_set_mm(arg2, arg3, arg4, arg5); 2145 error = prctl_set_mm(arg2, arg3, arg4, arg5);
2055 break; 2146 break;
2147 case PR_GET_TID_ADDRESS:
2148 error = prctl_get_tid_address(me, (int __user **)arg2);
2149 break;
2056 case PR_SET_CHILD_SUBREAPER: 2150 case PR_SET_CHILD_SUBREAPER:
2057 me->signal->is_child_subreaper = !!arg2; 2151 me->signal->is_child_subreaper = !!arg2;
2058 error = 0; 2152 error = 0;
@@ -2114,7 +2208,6 @@ int orderly_poweroff(bool force)
2114 NULL 2208 NULL
2115 }; 2209 };
2116 int ret = -ENOMEM; 2210 int ret = -ENOMEM;
2117 struct subprocess_info *info;
2118 2211
2119 if (argv == NULL) { 2212 if (argv == NULL) {
2120 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n", 2213 printk(KERN_WARNING "%s failed to allocate memory for \"%s\"\n",
@@ -2122,18 +2215,16 @@ int orderly_poweroff(bool force)
2122 goto out; 2215 goto out;
2123 } 2216 }
2124 2217
2125 info = call_usermodehelper_setup(argv[0], argv, envp, GFP_ATOMIC); 2218 ret = call_usermodehelper_fns(argv[0], argv, envp, UMH_NO_WAIT,
2126 if (info == NULL) { 2219 NULL, argv_cleanup, NULL);
2127 argv_free(argv); 2220out:
2128 goto out; 2221 if (likely(!ret))
2129 } 2222 return 0;
2130
2131 call_usermodehelper_setfns(info, NULL, argv_cleanup, NULL);
2132 2223
2133 ret = call_usermodehelper_exec(info, UMH_NO_WAIT); 2224 if (ret == -ENOMEM)
2225 argv_free(argv);
2134 2226
2135 out: 2227 if (force) {
2136 if (ret && force) {
2137 printk(KERN_WARNING "Failed to start orderly shutdown: " 2228 printk(KERN_WARNING "Failed to start orderly shutdown: "
2138 "forcing the issue\n"); 2229 "forcing the issue\n");
2139 2230
diff --git a/kernel/sys_ni.c b/kernel/sys_ni.c
index 47bfa16430d7..dbff751e4086 100644
--- a/kernel/sys_ni.c
+++ b/kernel/sys_ni.c
@@ -203,3 +203,6 @@ cond_syscall(sys_fanotify_mark);
203cond_syscall(sys_name_to_handle_at); 203cond_syscall(sys_name_to_handle_at);
204cond_syscall(sys_open_by_handle_at); 204cond_syscall(sys_open_by_handle_at);
205cond_syscall(compat_sys_open_by_handle_at); 205cond_syscall(compat_sys_open_by_handle_at);
206
207/* compare kernel pointers */
208cond_syscall(sys_kcmp);
diff --git a/kernel/task_work.c b/kernel/task_work.c
new file mode 100644
index 000000000000..82d1c794066d
--- /dev/null
+++ b/kernel/task_work.c
@@ -0,0 +1,84 @@
1#include <linux/spinlock.h>
2#include <linux/task_work.h>
3#include <linux/tracehook.h>
4
5int
6task_work_add(struct task_struct *task, struct task_work *twork, bool notify)
7{
8 unsigned long flags;
9 int err = -ESRCH;
10
11#ifndef TIF_NOTIFY_RESUME
12 if (notify)
13 return -ENOTSUPP;
14#endif
15 /*
16 * We must not insert the new work if the task has already passed
17 * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait()
18 * and check PF_EXITING under pi_lock.
19 */
20 raw_spin_lock_irqsave(&task->pi_lock, flags);
21 if (likely(!(task->flags & PF_EXITING))) {
22 hlist_add_head(&twork->hlist, &task->task_works);
23 err = 0;
24 }
25 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
26
27 /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
28 if (likely(!err) && notify)
29 set_notify_resume(task);
30 return err;
31}
32
33struct task_work *
34task_work_cancel(struct task_struct *task, task_work_func_t func)
35{
36 unsigned long flags;
37 struct task_work *twork;
38 struct hlist_node *pos;
39
40 raw_spin_lock_irqsave(&task->pi_lock, flags);
41 hlist_for_each_entry(twork, pos, &task->task_works, hlist) {
42 if (twork->func == func) {
43 hlist_del(&twork->hlist);
44 goto found;
45 }
46 }
47 twork = NULL;
48 found:
49 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
50
51 return twork;
52}
53
54void task_work_run(void)
55{
56 struct task_struct *task = current;
57 struct hlist_head task_works;
58 struct hlist_node *pos;
59
60 raw_spin_lock_irq(&task->pi_lock);
61 hlist_move_list(&task->task_works, &task_works);
62 raw_spin_unlock_irq(&task->pi_lock);
63
64 if (unlikely(hlist_empty(&task_works)))
65 return;
66 /*
67 * We use hlist to save the space in task_struct, but we want fifo.
68 * Find the last entry, the list should be short, then process them
69 * in reverse order.
70 */
71 for (pos = task_works.first; pos->next; pos = pos->next)
72 ;
73
74 for (;;) {
75 struct hlist_node **pprev = pos->pprev;
76 struct task_work *twork = container_of(pos, struct task_work,
77 hlist);
78 twork->func(twork);
79
80 if (pprev == &task_works.first)
81 break;
82 pos = container_of(pprev, struct hlist_node, next);
83 }
84}
diff --git a/kernel/time/clockevents.c b/kernel/time/clockevents.c
index 9cd928f7a7c6..7e1ce012a851 100644
--- a/kernel/time/clockevents.c
+++ b/kernel/time/clockevents.c
@@ -297,8 +297,7 @@ void clockevents_register_device(struct clock_event_device *dev)
297} 297}
298EXPORT_SYMBOL_GPL(clockevents_register_device); 298EXPORT_SYMBOL_GPL(clockevents_register_device);
299 299
300static void clockevents_config(struct clock_event_device *dev, 300void clockevents_config(struct clock_event_device *dev, u32 freq)
301 u32 freq)
302{ 301{
303 u64 sec; 302 u64 sec;
304 303
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index 6a3a5b9ff561..869997833928 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -274,6 +274,7 @@ EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
274static void tick_nohz_stop_sched_tick(struct tick_sched *ts) 274static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
275{ 275{
276 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies; 276 unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
277 unsigned long rcu_delta_jiffies;
277 ktime_t last_update, expires, now; 278 ktime_t last_update, expires, now;
278 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev; 279 struct clock_event_device *dev = __get_cpu_var(tick_cpu_device).evtdev;
279 u64 time_delta; 280 u64 time_delta;
@@ -322,7 +323,7 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
322 time_delta = timekeeping_max_deferment(); 323 time_delta = timekeeping_max_deferment();
323 } while (read_seqretry(&xtime_lock, seq)); 324 } while (read_seqretry(&xtime_lock, seq));
324 325
325 if (rcu_needs_cpu(cpu) || printk_needs_cpu(cpu) || 326 if (rcu_needs_cpu(cpu, &rcu_delta_jiffies) || printk_needs_cpu(cpu) ||
326 arch_needs_cpu(cpu)) { 327 arch_needs_cpu(cpu)) {
327 next_jiffies = last_jiffies + 1; 328 next_jiffies = last_jiffies + 1;
328 delta_jiffies = 1; 329 delta_jiffies = 1;
@@ -330,6 +331,10 @@ static void tick_nohz_stop_sched_tick(struct tick_sched *ts)
330 /* Get the next timer wheel timer */ 331 /* Get the next timer wheel timer */
331 next_jiffies = get_next_timer_interrupt(last_jiffies); 332 next_jiffies = get_next_timer_interrupt(last_jiffies);
332 delta_jiffies = next_jiffies - last_jiffies; 333 delta_jiffies = next_jiffies - last_jiffies;
334 if (rcu_delta_jiffies < delta_jiffies) {
335 next_jiffies = last_jiffies + rcu_delta_jiffies;
336 delta_jiffies = rcu_delta_jiffies;
337 }
333 } 338 }
334 /* 339 /*
335 * Do not stop the tick, if we are only one off 340 * Do not stop the tick, if we are only one off
@@ -576,6 +581,7 @@ void tick_nohz_idle_exit(void)
576 /* Update jiffies first */ 581 /* Update jiffies first */
577 select_nohz_load_balancer(0); 582 select_nohz_load_balancer(0);
578 tick_do_update_jiffies64(now); 583 tick_do_update_jiffies64(now);
584 update_cpu_load_nohz();
579 585
580#ifndef CONFIG_VIRT_CPU_ACCOUNTING 586#ifndef CONFIG_VIRT_CPU_ACCOUNTING
581 /* 587 /*
@@ -814,6 +820,16 @@ static enum hrtimer_restart tick_sched_timer(struct hrtimer *timer)
814 return HRTIMER_RESTART; 820 return HRTIMER_RESTART;
815} 821}
816 822
823static int sched_skew_tick;
824
825static int __init skew_tick(char *str)
826{
827 get_option(&str, &sched_skew_tick);
828
829 return 0;
830}
831early_param("skew_tick", skew_tick);
832
817/** 833/**
818 * tick_setup_sched_timer - setup the tick emulation timer 834 * tick_setup_sched_timer - setup the tick emulation timer
819 */ 835 */
@@ -831,6 +847,14 @@ void tick_setup_sched_timer(void)
831 /* Get the next period (per cpu) */ 847 /* Get the next period (per cpu) */
832 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update()); 848 hrtimer_set_expires(&ts->sched_timer, tick_init_jiffy_update());
833 849
850 /* Offset the tick to avert xtime_lock contention. */
851 if (sched_skew_tick) {
852 u64 offset = ktime_to_ns(tick_period) >> 1;
853 do_div(offset, num_possible_cpus());
854 offset *= smp_processor_id();
855 hrtimer_add_expires_ns(&ts->sched_timer, offset);
856 }
857
834 for (;;) { 858 for (;;) {
835 hrtimer_forward(&ts->sched_timer, now, tick_period); 859 hrtimer_forward(&ts->sched_timer, now, tick_period);
836 hrtimer_start_expires(&ts->sched_timer, 860 hrtimer_start_expires(&ts->sched_timer,
diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
index 6e46cacf5969..6f46a00a1e8a 100644
--- a/kernel/time/timekeeping.c
+++ b/kernel/time/timekeeping.c
@@ -962,6 +962,7 @@ static cycle_t logarithmic_accumulation(cycle_t offset, int shift)
962 timekeeper.xtime.tv_sec++; 962 timekeeper.xtime.tv_sec++;
963 leap = second_overflow(timekeeper.xtime.tv_sec); 963 leap = second_overflow(timekeeper.xtime.tv_sec);
964 timekeeper.xtime.tv_sec += leap; 964 timekeeper.xtime.tv_sec += leap;
965 timekeeper.wall_to_monotonic.tv_sec -= leap;
965 } 966 }
966 967
967 /* Accumulate raw time */ 968 /* Accumulate raw time */
@@ -1077,6 +1078,7 @@ static void update_wall_time(void)
1077 timekeeper.xtime.tv_sec++; 1078 timekeeper.xtime.tv_sec++;
1078 leap = second_overflow(timekeeper.xtime.tv_sec); 1079 leap = second_overflow(timekeeper.xtime.tv_sec);
1079 timekeeper.xtime.tv_sec += leap; 1080 timekeeper.xtime.tv_sec += leap;
1081 timekeeper.wall_to_monotonic.tv_sec -= leap;
1080 } 1082 }
1081 1083
1082 timekeeping_update(false); 1084 timekeeping_update(false);
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 68032c6177db..49249c28690d 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -371,7 +371,7 @@ EXPORT_SYMBOL_GPL(tracing_on);
371void tracing_off(void) 371void tracing_off(void)
372{ 372{
373 if (global_trace.buffer) 373 if (global_trace.buffer)
374 ring_buffer_record_on(global_trace.buffer); 374 ring_buffer_record_off(global_trace.buffer);
375 /* 375 /*
376 * This flag is only looked at when buffers haven't been 376 * This flag is only looked at when buffers haven't been
377 * allocated yet. We don't really care about the race 377 * allocated yet. We don't really care about the race
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index e5e1d85b8c7c..4b1dfba70f7c 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -372,6 +372,13 @@ static int watchdog(void *unused)
372 372
373 373
374#ifdef CONFIG_HARDLOCKUP_DETECTOR 374#ifdef CONFIG_HARDLOCKUP_DETECTOR
375/*
376 * People like the simple clean cpu node info on boot.
377 * Reduce the watchdog noise by only printing messages
378 * that are different from what cpu0 displayed.
379 */
380static unsigned long cpu0_err;
381
375static int watchdog_nmi_enable(int cpu) 382static int watchdog_nmi_enable(int cpu)
376{ 383{
377 struct perf_event_attr *wd_attr; 384 struct perf_event_attr *wd_attr;
@@ -390,11 +397,21 @@ static int watchdog_nmi_enable(int cpu)
390 397
391 /* Try to register using hardware perf events */ 398 /* Try to register using hardware perf events */
392 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL); 399 event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
400
401 /* save cpu0 error for future comparision */
402 if (cpu == 0 && IS_ERR(event))
403 cpu0_err = PTR_ERR(event);
404
393 if (!IS_ERR(event)) { 405 if (!IS_ERR(event)) {
394 pr_info("enabled, takes one hw-pmu counter.\n"); 406 /* only print for cpu0 or different than cpu0 */
407 if (cpu == 0 || cpu0_err)
408 pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
395 goto out_save; 409 goto out_save;
396 } 410 }
397 411
412 /* skip displaying the same error again */
413 if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
414 return PTR_ERR(event);
398 415
399 /* vary the KERN level based on the returned errno */ 416 /* vary the KERN level based on the returned errno */
400 if (PTR_ERR(event) == -EOPNOTSUPP) 417 if (PTR_ERR(event) == -EOPNOTSUPP)
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index a42d3ae39648..ff5bdee4716d 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -241,6 +241,26 @@ config BOOTPARAM_SOFTLOCKUP_PANIC_VALUE
241 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC 241 default 0 if !BOOTPARAM_SOFTLOCKUP_PANIC
242 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC 242 default 1 if BOOTPARAM_SOFTLOCKUP_PANIC
243 243
244config PANIC_ON_OOPS
245 bool "Panic on Oops" if EXPERT
246 default n
247 help
248 Say Y here to enable the kernel to panic when it oopses. This
249 has the same effect as setting oops=panic on the kernel command
250 line.
251
252 This feature is useful to ensure that the kernel does not do
253 anything erroneous after an oops which could result in data
254 corruption or other issues.
255
256 Say N if unsure.
257
258config PANIC_ON_OOPS_VALUE
259 int
260 range 0 1
261 default 0 if !PANIC_ON_OOPS
262 default 1 if PANIC_ON_OOPS
263
244config DETECT_HUNG_TASK 264config DETECT_HUNG_TASK
245 bool "Detect Hung Tasks" 265 bool "Detect Hung Tasks"
246 depends on DEBUG_KERNEL 266 depends on DEBUG_KERNEL
diff --git a/lib/btree.c b/lib/btree.c
index e5ec1e9c1aa5..f9a484676cb6 100644
--- a/lib/btree.c
+++ b/lib/btree.c
@@ -319,8 +319,8 @@ void *btree_get_prev(struct btree_head *head, struct btree_geo *geo,
319 319
320 if (head->height == 0) 320 if (head->height == 0)
321 return NULL; 321 return NULL;
322retry:
323 longcpy(key, __key, geo->keylen); 322 longcpy(key, __key, geo->keylen);
323retry:
324 dec_key(geo, key); 324 dec_key(geo, key);
325 325
326 node = head->node; 326 node = head->node;
@@ -351,7 +351,7 @@ retry:
351 } 351 }
352miss: 352miss:
353 if (retry_key) { 353 if (retry_key) {
354 __key = retry_key; 354 longcpy(key, retry_key, geo->keylen);
355 retry_key = NULL; 355 retry_key = NULL;
356 goto retry; 356 goto retry;
357 } 357 }
@@ -509,6 +509,7 @@ retry:
509int btree_insert(struct btree_head *head, struct btree_geo *geo, 509int btree_insert(struct btree_head *head, struct btree_geo *geo,
510 unsigned long *key, void *val, gfp_t gfp) 510 unsigned long *key, void *val, gfp_t gfp)
511{ 511{
512 BUG_ON(!val);
512 return btree_insert_level(head, geo, key, val, 1, gfp); 513 return btree_insert_level(head, geo, key, val, 1, gfp);
513} 514}
514EXPORT_SYMBOL_GPL(btree_insert); 515EXPORT_SYMBOL_GPL(btree_insert);
diff --git a/lib/dynamic_queue_limits.c b/lib/dynamic_queue_limits.c
index 6ab4587d052b..0777c5a45fa0 100644
--- a/lib/dynamic_queue_limits.c
+++ b/lib/dynamic_queue_limits.c
@@ -10,23 +10,27 @@
10#include <linux/jiffies.h> 10#include <linux/jiffies.h>
11#include <linux/dynamic_queue_limits.h> 11#include <linux/dynamic_queue_limits.h>
12 12
13#define POSDIFF(A, B) ((A) > (B) ? (A) - (B) : 0) 13#define POSDIFF(A, B) ((int)((A) - (B)) > 0 ? (A) - (B) : 0)
14#define AFTER_EQ(A, B) ((int)((A) - (B)) >= 0)
14 15
15/* Records completed count and recalculates the queue limit */ 16/* Records completed count and recalculates the queue limit */
16void dql_completed(struct dql *dql, unsigned int count) 17void dql_completed(struct dql *dql, unsigned int count)
17{ 18{
18 unsigned int inprogress, prev_inprogress, limit; 19 unsigned int inprogress, prev_inprogress, limit;
19 unsigned int ovlimit, all_prev_completed, completed; 20 unsigned int ovlimit, completed, num_queued;
21 bool all_prev_completed;
22
23 num_queued = ACCESS_ONCE(dql->num_queued);
20 24
21 /* Can't complete more than what's in queue */ 25 /* Can't complete more than what's in queue */
22 BUG_ON(count > dql->num_queued - dql->num_completed); 26 BUG_ON(count > num_queued - dql->num_completed);
23 27
24 completed = dql->num_completed + count; 28 completed = dql->num_completed + count;
25 limit = dql->limit; 29 limit = dql->limit;
26 ovlimit = POSDIFF(dql->num_queued - dql->num_completed, limit); 30 ovlimit = POSDIFF(num_queued - dql->num_completed, limit);
27 inprogress = dql->num_queued - completed; 31 inprogress = num_queued - completed;
28 prev_inprogress = dql->prev_num_queued - dql->num_completed; 32 prev_inprogress = dql->prev_num_queued - dql->num_completed;
29 all_prev_completed = POSDIFF(completed, dql->prev_num_queued); 33 all_prev_completed = AFTER_EQ(completed, dql->prev_num_queued);
30 34
31 if ((ovlimit && !inprogress) || 35 if ((ovlimit && !inprogress) ||
32 (dql->prev_ovlimit && all_prev_completed)) { 36 (dql->prev_ovlimit && all_prev_completed)) {
@@ -104,7 +108,7 @@ void dql_completed(struct dql *dql, unsigned int count)
104 dql->prev_ovlimit = ovlimit; 108 dql->prev_ovlimit = ovlimit;
105 dql->prev_last_obj_cnt = dql->last_obj_cnt; 109 dql->prev_last_obj_cnt = dql->last_obj_cnt;
106 dql->num_completed = completed; 110 dql->num_completed = completed;
107 dql->prev_num_queued = dql->num_queued; 111 dql->prev_num_queued = num_queued;
108} 112}
109EXPORT_SYMBOL(dql_completed); 113EXPORT_SYMBOL(dql_completed);
110 114
diff --git a/lib/fault-inject.c b/lib/fault-inject.c
index 6805453c18e7..f7210ad6cffd 100644
--- a/lib/fault-inject.c
+++ b/lib/fault-inject.c
@@ -101,6 +101,10 @@ static inline bool fail_stacktrace(struct fault_attr *attr)
101 101
102bool should_fail(struct fault_attr *attr, ssize_t size) 102bool should_fail(struct fault_attr *attr, ssize_t size)
103{ 103{
104 /* No need to check any other properties if the probability is 0 */
105 if (attr->probability == 0)
106 return false;
107
104 if (attr->task_filter && !fail_task(attr, current)) 108 if (attr->task_filter && !fail_task(attr, current))
105 return false; 109 return false;
106 110
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index d7c878cc006c..e7964296fd50 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -686,6 +686,9 @@ void **radix_tree_next_chunk(struct radix_tree_root *root,
686 * during iterating; it can be zero only at the beginning. 686 * during iterating; it can be zero only at the beginning.
687 * And we cannot overflow iter->next_index in a single step, 687 * And we cannot overflow iter->next_index in a single step,
688 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG. 688 * because RADIX_TREE_MAP_SHIFT < BITS_PER_LONG.
689 *
690 * This condition also used by radix_tree_next_slot() to stop
691 * contiguous iterating, and forbid swithing to the next chunk.
689 */ 692 */
690 index = iter->next_index; 693 index = iter->next_index;
691 if (!index && iter->index) 694 if (!index && iter->index)
diff --git a/lib/raid6/recov.c b/lib/raid6/recov.c
index 1805a5cc5daa..a95bccb8497d 100644
--- a/lib/raid6/recov.c
+++ b/lib/raid6/recov.c
@@ -22,8 +22,8 @@
22#include <linux/raid/pq.h> 22#include <linux/raid/pq.h>
23 23
24/* Recover two failed data blocks. */ 24/* Recover two failed data blocks. */
25void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb, 25static void raid6_2data_recov_intx1(int disks, size_t bytes, int faila,
26 void **ptrs) 26 int failb, void **ptrs)
27{ 27{
28 u8 *p, *q, *dp, *dq; 28 u8 *p, *q, *dp, *dq;
29 u8 px, qx, db; 29 u8 px, qx, db;
@@ -66,7 +66,8 @@ void raid6_2data_recov_intx1(int disks, size_t bytes, int faila, int failb,
66} 66}
67 67
68/* Recover failure of one data block plus the P block */ 68/* Recover failure of one data block plus the P block */
69void raid6_datap_recov_intx1(int disks, size_t bytes, int faila, void **ptrs) 69static void raid6_datap_recov_intx1(int disks, size_t bytes, int faila,
70 void **ptrs)
70{ 71{
71 u8 *p, *q, *dq; 72 u8 *p, *q, *dq;
72 const u8 *qmul; /* Q multiplier table */ 73 const u8 *qmul; /* Q multiplier table */
diff --git a/lib/raid6/recov_ssse3.c b/lib/raid6/recov_ssse3.c
index 37ae61930559..ecb710c0b4d9 100644
--- a/lib/raid6/recov_ssse3.c
+++ b/lib/raid6/recov_ssse3.c
@@ -19,8 +19,8 @@ static int raid6_has_ssse3(void)
19 boot_cpu_has(X86_FEATURE_SSSE3); 19 boot_cpu_has(X86_FEATURE_SSSE3);
20} 20}
21 21
22void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb, 22static void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila,
23 void **ptrs) 23 int failb, void **ptrs)
24{ 24{
25 u8 *p, *q, *dp, *dq; 25 u8 *p, *q, *dp, *dq;
26 const u8 *pbmul; /* P multiplier table for B data */ 26 const u8 *pbmul; /* P multiplier table for B data */
@@ -194,7 +194,8 @@ void raid6_2data_recov_ssse3(int disks, size_t bytes, int faila, int failb,
194} 194}
195 195
196 196
197void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila, void **ptrs) 197static void raid6_datap_recov_ssse3(int disks, size_t bytes, int faila,
198 void **ptrs)
198{ 199{
199 u8 *p, *q, *dq; 200 u8 *p, *q, *dq;
200 const u8 *qmul; /* Q multiplier table */ 201 const u8 *qmul; /* Q multiplier table */
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index d0ec4f3d1593..e91fbc23fff1 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -118,7 +118,7 @@ static void __spin_lock_debug(raw_spinlock_t *lock)
118 /* lockup suspected: */ 118 /* lockup suspected: */
119 if (print_once) { 119 if (print_once) {
120 print_once = 0; 120 print_once = 0;
121 spin_dump(lock, "lockup"); 121 spin_dump(lock, "lockup suspected");
122#ifdef CONFIG_SMP 122#ifdef CONFIG_SMP
123 trigger_all_cpu_backtrace(); 123 trigger_all_cpu_backtrace();
124#endif 124#endif
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 5391299c1e78..c3f36d415bdf 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -112,106 +112,199 @@ int skip_atoi(const char **s)
112/* Decimal conversion is by far the most typical, and is used 112/* Decimal conversion is by far the most typical, and is used
113 * for /proc and /sys data. This directly impacts e.g. top performance 113 * for /proc and /sys data. This directly impacts e.g. top performance
114 * with many processes running. We optimize it for speed 114 * with many processes running. We optimize it for speed
115 * using code from 115 * using ideas described at <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
116 * http://www.cs.uiowa.edu/~jones/bcd/decimal.html 116 * (with permission from the author, Douglas W. Jones).
117 * (with permission from the author, Douglas W. Jones). */ 117 */
118 118
119/* Formats correctly any integer in [0,99999]. 119#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
120 * Outputs from one to five digits depending on input. 120/* Formats correctly any integer in [0, 999999999] */
121 * On i386 gcc 4.1.2 -O2: ~250 bytes of code. */
122static noinline_for_stack 121static noinline_for_stack
123char *put_dec_trunc(char *buf, unsigned q) 122char *put_dec_full9(char *buf, unsigned q)
124{ 123{
125 unsigned d3, d2, d1, d0; 124 unsigned r;
126 d1 = (q>>4) & 0xf;
127 d2 = (q>>8) & 0xf;
128 d3 = (q>>12);
129
130 d0 = 6*(d3 + d2 + d1) + (q & 0xf);
131 q = (d0 * 0xcd) >> 11;
132 d0 = d0 - 10*q;
133 *buf++ = d0 + '0'; /* least significant digit */
134 d1 = q + 9*d3 + 5*d2 + d1;
135 if (d1 != 0) {
136 q = (d1 * 0xcd) >> 11;
137 d1 = d1 - 10*q;
138 *buf++ = d1 + '0'; /* next digit */
139
140 d2 = q + 2*d2;
141 if ((d2 != 0) || (d3 != 0)) {
142 q = (d2 * 0xd) >> 7;
143 d2 = d2 - 10*q;
144 *buf++ = d2 + '0'; /* next digit */
145
146 d3 = q + 4*d3;
147 if (d3 != 0) {
148 q = (d3 * 0xcd) >> 11;
149 d3 = d3 - 10*q;
150 *buf++ = d3 + '0'; /* next digit */
151 if (q != 0)
152 *buf++ = q + '0'; /* most sign. digit */
153 }
154 }
155 }
156 125
126 /*
127 * Possible ways to approx. divide by 10
128 * (x * 0x1999999a) >> 32 x < 1073741829 (multiply must be 64-bit)
129 * (x * 0xcccd) >> 19 x < 81920 (x < 262149 when 64-bit mul)
130 * (x * 0x6667) >> 18 x < 43699
131 * (x * 0x3334) >> 17 x < 16389
132 * (x * 0x199a) >> 16 x < 16389
133 * (x * 0x0ccd) >> 15 x < 16389
134 * (x * 0x0667) >> 14 x < 2739
135 * (x * 0x0334) >> 13 x < 1029
136 * (x * 0x019a) >> 12 x < 1029
137 * (x * 0x00cd) >> 11 x < 1029 shorter code than * 0x67 (on i386)
138 * (x * 0x0067) >> 10 x < 179
139 * (x * 0x0034) >> 9 x < 69 same
140 * (x * 0x001a) >> 8 x < 69 same
141 * (x * 0x000d) >> 7 x < 69 same, shortest code (on i386)
142 * (x * 0x0007) >> 6 x < 19
143 * See <http://www.cs.uiowa.edu/~jones/bcd/divide.html>
144 */
145 r = (q * (uint64_t)0x1999999a) >> 32;
146 *buf++ = (q - 10 * r) + '0'; /* 1 */
147 q = (r * (uint64_t)0x1999999a) >> 32;
148 *buf++ = (r - 10 * q) + '0'; /* 2 */
149 r = (q * (uint64_t)0x1999999a) >> 32;
150 *buf++ = (q - 10 * r) + '0'; /* 3 */
151 q = (r * (uint64_t)0x1999999a) >> 32;
152 *buf++ = (r - 10 * q) + '0'; /* 4 */
153 r = (q * (uint64_t)0x1999999a) >> 32;
154 *buf++ = (q - 10 * r) + '0'; /* 5 */
155 /* Now value is under 10000, can avoid 64-bit multiply */
156 q = (r * 0x199a) >> 16;
157 *buf++ = (r - 10 * q) + '0'; /* 6 */
158 r = (q * 0xcd) >> 11;
159 *buf++ = (q - 10 * r) + '0'; /* 7 */
160 q = (r * 0xcd) >> 11;
161 *buf++ = (r - 10 * q) + '0'; /* 8 */
162 *buf++ = q + '0'; /* 9 */
157 return buf; 163 return buf;
158} 164}
159/* Same with if's removed. Always emits five digits */ 165#endif
166
167/* Similar to above but do not pad with zeros.
168 * Code can be easily arranged to print 9 digits too, but our callers
169 * always call put_dec_full9() instead when the number has 9 decimal digits.
170 */
160static noinline_for_stack 171static noinline_for_stack
161char *put_dec_full(char *buf, unsigned q) 172char *put_dec_trunc8(char *buf, unsigned r)
162{ 173{
163 /* BTW, if q is in [0,9999], 8-bit ints will be enough, */ 174 unsigned q;
164 /* but anyway, gcc produces better code with full-sized ints */ 175
165 unsigned d3, d2, d1, d0; 176 /* Copy of previous function's body with added early returns */
166 d1 = (q>>4) & 0xf; 177 q = (r * (uint64_t)0x1999999a) >> 32;
167 d2 = (q>>8) & 0xf; 178 *buf++ = (r - 10 * q) + '0'; /* 2 */
168 d3 = (q>>12); 179 if (q == 0)
180 return buf;
181 r = (q * (uint64_t)0x1999999a) >> 32;
182 *buf++ = (q - 10 * r) + '0'; /* 3 */
183 if (r == 0)
184 return buf;
185 q = (r * (uint64_t)0x1999999a) >> 32;
186 *buf++ = (r - 10 * q) + '0'; /* 4 */
187 if (q == 0)
188 return buf;
189 r = (q * (uint64_t)0x1999999a) >> 32;
190 *buf++ = (q - 10 * r) + '0'; /* 5 */
191 if (r == 0)
192 return buf;
193 q = (r * 0x199a) >> 16;
194 *buf++ = (r - 10 * q) + '0'; /* 6 */
195 if (q == 0)
196 return buf;
197 r = (q * 0xcd) >> 11;
198 *buf++ = (q - 10 * r) + '0'; /* 7 */
199 if (r == 0)
200 return buf;
201 q = (r * 0xcd) >> 11;
202 *buf++ = (r - 10 * q) + '0'; /* 8 */
203 if (q == 0)
204 return buf;
205 *buf++ = q + '0'; /* 9 */
206 return buf;
207}
169 208
170 /* 209/* There are two algorithms to print larger numbers.
171 * Possible ways to approx. divide by 10 210 * One is generic: divide by 1000000000 and repeatedly print
172 * gcc -O2 replaces multiply with shifts and adds 211 * groups of (up to) 9 digits. It's conceptually simple,
173 * (x * 0xcd) >> 11: 11001101 - shorter code than * 0x67 (on i386) 212 * but requires a (unsigned long long) / 1000000000 division.
174 * (x * 0x67) >> 10: 1100111 213 *
175 * (x * 0x34) >> 9: 110100 - same 214 * Second algorithm splits 64-bit unsigned long long into 16-bit chunks,
176 * (x * 0x1a) >> 8: 11010 - same 215 * manipulates them cleverly and generates groups of 4 decimal digits.
177 * (x * 0x0d) >> 7: 1101 - same, shortest code (on i386) 216 * It so happens that it does NOT require long long division.
178 */ 217 *
179 d0 = 6*(d3 + d2 + d1) + (q & 0xf); 218 * If long is > 32 bits, division of 64-bit values is relatively easy,
180 q = (d0 * 0xcd) >> 11; 219 * and we will use the first algorithm.
181 d0 = d0 - 10*q; 220 * If long long is > 64 bits (strange architecture with VERY large long long),
182 *buf++ = d0 + '0'; 221 * second algorithm can't be used, and we again use the first one.
183 d1 = q + 9*d3 + 5*d2 + d1; 222 *
184 q = (d1 * 0xcd) >> 11; 223 * Else (if long is 32 bits and long long is 64 bits) we use second one.
185 d1 = d1 - 10*q; 224 */
186 *buf++ = d1 + '0';
187
188 d2 = q + 2*d2;
189 q = (d2 * 0xd) >> 7;
190 d2 = d2 - 10*q;
191 *buf++ = d2 + '0';
192
193 d3 = q + 4*d3;
194 q = (d3 * 0xcd) >> 11; /* - shorter code */
195 /* q = (d3 * 0x67) >> 10; - would also work */
196 d3 = d3 - 10*q;
197 *buf++ = d3 + '0';
198 *buf++ = q + '0';
199 225
200 return buf; 226#if BITS_PER_LONG != 32 || BITS_PER_LONG_LONG != 64
227
228/* First algorithm: generic */
229
230static
231char *put_dec(char *buf, unsigned long long n)
232{
233 if (n >= 100*1000*1000) {
234 while (n >= 1000*1000*1000)
235 buf = put_dec_full9(buf, do_div(n, 1000*1000*1000));
236 if (n >= 100*1000*1000)
237 return put_dec_full9(buf, n);
238 }
239 return put_dec_trunc8(buf, n);
201} 240}
202/* No inlining helps gcc to use registers better */ 241
242#else
243
244/* Second algorithm: valid only for 64-bit long longs */
245
203static noinline_for_stack 246static noinline_for_stack
204char *put_dec(char *buf, unsigned long long num) 247char *put_dec_full4(char *buf, unsigned q)
205{ 248{
206 while (1) { 249 unsigned r;
207 unsigned rem; 250 r = (q * 0xcccd) >> 19;
208 if (num < 100000) 251 *buf++ = (q - 10 * r) + '0';
209 return put_dec_trunc(buf, num); 252 q = (r * 0x199a) >> 16;
210 rem = do_div(num, 100000); 253 *buf++ = (r - 10 * q) + '0';
211 buf = put_dec_full(buf, rem); 254 r = (q * 0xcd) >> 11;
212 } 255 *buf++ = (q - 10 * r) + '0';
256 *buf++ = r + '0';
257 return buf;
213} 258}
214 259
260/* Based on code by Douglas W. Jones found at
261 * <http://www.cs.uiowa.edu/~jones/bcd/decimal.html#sixtyfour>
262 * (with permission from the author).
263 * Performs no 64-bit division and hence should be fast on 32-bit machines.
264 */
265static
266char *put_dec(char *buf, unsigned long long n)
267{
268 uint32_t d3, d2, d1, q, h;
269
270 if (n < 100*1000*1000)
271 return put_dec_trunc8(buf, n);
272
273 d1 = ((uint32_t)n >> 16); /* implicit "& 0xffff" */
274 h = (n >> 32);
275 d2 = (h ) & 0xffff;
276 d3 = (h >> 16); /* implicit "& 0xffff" */
277
278 q = 656 * d3 + 7296 * d2 + 5536 * d1 + ((uint32_t)n & 0xffff);
279
280 buf = put_dec_full4(buf, q % 10000);
281 q = q / 10000;
282
283 d1 = q + 7671 * d3 + 9496 * d2 + 6 * d1;
284 buf = put_dec_full4(buf, d1 % 10000);
285 q = d1 / 10000;
286
287 d2 = q + 4749 * d3 + 42 * d2;
288 buf = put_dec_full4(buf, d2 % 10000);
289 q = d2 / 10000;
290
291 d3 = q + 281 * d3;
292 if (!d3)
293 goto done;
294 buf = put_dec_full4(buf, d3 % 10000);
295 q = d3 / 10000;
296 if (!q)
297 goto done;
298 buf = put_dec_full4(buf, q);
299 done:
300 while (buf[-1] == '0')
301 --buf;
302
303 return buf;
304}
305
306#endif
307
215/* 308/*
216 * Convert passed number to decimal string. 309 * Convert passed number to decimal string.
217 * Returns the length of string. On buffer overflow, returns 0. 310 * Returns the length of string. On buffer overflow, returns 0.
@@ -220,16 +313,22 @@ char *put_dec(char *buf, unsigned long long num)
220 */ 313 */
221int num_to_str(char *buf, int size, unsigned long long num) 314int num_to_str(char *buf, int size, unsigned long long num)
222{ 315{
223 char tmp[21]; /* Enough for 2^64 in decimal */ 316 char tmp[sizeof(num) * 3];
224 int idx, len; 317 int idx, len;
225 318
226 len = put_dec(tmp, num) - tmp; 319 /* put_dec() may work incorrectly for num = 0 (generate "", not "0") */
320 if (num <= 9) {
321 tmp[0] = '0' + num;
322 len = 1;
323 } else {
324 len = put_dec(tmp, num) - tmp;
325 }
227 326
228 if (len > size) 327 if (len > size)
229 return 0; 328 return 0;
230 for (idx = 0; idx < len; ++idx) 329 for (idx = 0; idx < len; ++idx)
231 buf[idx] = tmp[len - idx - 1]; 330 buf[idx] = tmp[len - idx - 1];
232 return len; 331 return len;
233} 332}
234 333
235#define ZEROPAD 1 /* pad with zero */ 334#define ZEROPAD 1 /* pad with zero */
@@ -314,8 +413,8 @@ char *number(char *buf, char *end, unsigned long long num,
314 413
315 /* generate full string in tmp[], in reverse order */ 414 /* generate full string in tmp[], in reverse order */
316 i = 0; 415 i = 0;
317 if (num == 0) 416 if (num < spec.base)
318 tmp[i++] = '0'; 417 tmp[i++] = digits[num] | locase;
319 /* Generic code, for any base: 418 /* Generic code, for any base:
320 else do { 419 else do {
321 tmp[i++] = (digits[do_div(num,base)] | locase); 420 tmp[i++] = (digits[do_div(num,base)] | locase);
@@ -611,7 +710,7 @@ char *ip4_string(char *p, const u8 *addr, const char *fmt)
611 } 710 }
612 for (i = 0; i < 4; i++) { 711 for (i = 0; i < 4; i++) {
613 char temp[3]; /* hold each IP quad in reverse order */ 712 char temp[3]; /* hold each IP quad in reverse order */
614 int digits = put_dec_trunc(temp, addr[index]) - temp; 713 int digits = put_dec_trunc8(temp, addr[index]) - temp;
615 if (leading_zeros) { 714 if (leading_zeros) {
616 if (digits < 3) 715 if (digits < 3)
617 *p++ = '0'; 716 *p++ = '0';
@@ -870,13 +969,15 @@ static noinline_for_stack
870char *pointer(const char *fmt, char *buf, char *end, void *ptr, 969char *pointer(const char *fmt, char *buf, char *end, void *ptr,
871 struct printf_spec spec) 970 struct printf_spec spec)
872{ 971{
972 int default_width = 2 * sizeof(void *) + (spec.flags & SPECIAL ? 2 : 0);
973
873 if (!ptr && *fmt != 'K') { 974 if (!ptr && *fmt != 'K') {
874 /* 975 /*
875 * Print (null) with the same width as a pointer so it makes 976 * Print (null) with the same width as a pointer so it makes
876 * tabular output look nice. 977 * tabular output look nice.
877 */ 978 */
878 if (spec.field_width == -1) 979 if (spec.field_width == -1)
879 spec.field_width = 2 * sizeof(void *); 980 spec.field_width = default_width;
880 return string(buf, end, "(null)", spec); 981 return string(buf, end, "(null)", spec);
881 } 982 }
882 983
@@ -931,7 +1032,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
931 */ 1032 */
932 if (in_irq() || in_serving_softirq() || in_nmi()) { 1033 if (in_irq() || in_serving_softirq() || in_nmi()) {
933 if (spec.field_width == -1) 1034 if (spec.field_width == -1)
934 spec.field_width = 2 * sizeof(void *); 1035 spec.field_width = default_width;
935 return string(buf, end, "pK-error", spec); 1036 return string(buf, end, "pK-error", spec);
936 } 1037 }
937 if (!((kptr_restrict == 0) || 1038 if (!((kptr_restrict == 0) ||
@@ -948,7 +1049,7 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
948 } 1049 }
949 spec.flags |= SMALL; 1050 spec.flags |= SMALL;
950 if (spec.field_width == -1) { 1051 if (spec.field_width == -1) {
951 spec.field_width = 2 * sizeof(void *); 1052 spec.field_width = default_width;
952 spec.flags |= ZEROPAD; 1053 spec.flags |= ZEROPAD;
953 } 1054 }
954 spec.base = 16; 1055 spec.base = 16;
diff --git a/mm/Kconfig b/mm/Kconfig
index b2176374b98e..82fed4eb2b6f 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -389,3 +389,20 @@ config CLEANCACHE
389 in a negligible performance hit. 389 in a negligible performance hit.
390 390
391 If unsure, say Y to enable cleancache 391 If unsure, say Y to enable cleancache
392
393config FRONTSWAP
394 bool "Enable frontswap to cache swap pages if tmem is present"
395 depends on SWAP
396 default n
397 help
398 Frontswap is so named because it can be thought of as the opposite
399 of a "backing" store for a swap device. The data is stored into
400 "transcendent memory", memory that is not directly accessible or
401 addressable by the kernel and is of unknown and possibly
402 time-varying size. When space in transcendent memory is available,
403 a significant swap I/O reduction may be achieved. When none is
404 available, all frontswap calls are reduced to a single pointer-
405 compare-against-NULL resulting in a negligible performance hit
406 and swap data is stored as normal on the matching swap device.
407
408 If unsure, say Y to enable frontswap.
diff --git a/mm/Makefile b/mm/Makefile
index a156285ce88d..2e2fbbefb99f 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -29,6 +29,7 @@ obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
29 29
30obj-$(CONFIG_BOUNCE) += bounce.o 30obj-$(CONFIG_BOUNCE) += bounce.o
31obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o 31obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
32obj-$(CONFIG_FRONTSWAP) += frontswap.o
32obj-$(CONFIG_HAS_DMA) += dmapool.o 33obj-$(CONFIG_HAS_DMA) += dmapool.o
33obj-$(CONFIG_HUGETLBFS) += hugetlb.o 34obj-$(CONFIG_HUGETLBFS) += hugetlb.o
34obj-$(CONFIG_NUMA) += mempolicy.o 35obj-$(CONFIG_NUMA) += mempolicy.o
diff --git a/mm/cleancache.c b/mm/cleancache.c
index 5646c740f613..32e6f4136fa2 100644
--- a/mm/cleancache.c
+++ b/mm/cleancache.c
@@ -80,7 +80,7 @@ EXPORT_SYMBOL(__cleancache_init_shared_fs);
80static int cleancache_get_key(struct inode *inode, 80static int cleancache_get_key(struct inode *inode,
81 struct cleancache_filekey *key) 81 struct cleancache_filekey *key)
82{ 82{
83 int (*fhfn)(struct dentry *, __u32 *fh, int *, int); 83 int (*fhfn)(struct inode *, __u32 *fh, int *, struct inode *);
84 int len = 0, maxlen = CLEANCACHE_KEY_MAX; 84 int len = 0, maxlen = CLEANCACHE_KEY_MAX;
85 struct super_block *sb = inode->i_sb; 85 struct super_block *sb = inode->i_sb;
86 86
@@ -88,9 +88,7 @@ static int cleancache_get_key(struct inode *inode,
88 if (sb->s_export_op != NULL) { 88 if (sb->s_export_op != NULL) {
89 fhfn = sb->s_export_op->encode_fh; 89 fhfn = sb->s_export_op->encode_fh;
90 if (fhfn) { 90 if (fhfn) {
91 struct dentry d; 91 len = (*fhfn)(inode, &key->u.fh[0], &maxlen, NULL);
92 d.d_inode = inode;
93 len = (*fhfn)(&d, &key->u.fh[0], &maxlen, 0);
94 if (len <= 0 || len == 255) 92 if (len <= 0 || len == 255)
95 return -1; 93 return -1;
96 if (maxlen > CLEANCACHE_KEY_MAX) 94 if (maxlen > CLEANCACHE_KEY_MAX)
diff --git a/mm/compaction.c b/mm/compaction.c
index 4ac338af5120..7ea259d82a99 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -236,7 +236,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
236 */ 236 */
237 while (unlikely(too_many_isolated(zone))) { 237 while (unlikely(too_many_isolated(zone))) {
238 /* async migration should just abort */ 238 /* async migration should just abort */
239 if (cc->mode != COMPACT_SYNC) 239 if (!cc->sync)
240 return 0; 240 return 0;
241 241
242 congestion_wait(BLK_RW_ASYNC, HZ/10); 242 congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -304,8 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
304 * satisfies the allocation 304 * satisfies the allocation
305 */ 305 */
306 pageblock_nr = low_pfn >> pageblock_order; 306 pageblock_nr = low_pfn >> pageblock_order;
307 if (cc->mode != COMPACT_SYNC && 307 if (!cc->sync && last_pageblock_nr != pageblock_nr &&
308 last_pageblock_nr != pageblock_nr &&
309 !migrate_async_suitable(get_pageblock_migratetype(page))) { 308 !migrate_async_suitable(get_pageblock_migratetype(page))) {
310 low_pfn += pageblock_nr_pages; 309 low_pfn += pageblock_nr_pages;
311 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 310 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
@@ -326,7 +325,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
326 continue; 325 continue;
327 } 326 }
328 327
329 if (cc->mode != COMPACT_SYNC) 328 if (!cc->sync)
330 mode |= ISOLATE_ASYNC_MIGRATE; 329 mode |= ISOLATE_ASYNC_MIGRATE;
331 330
332 lruvec = mem_cgroup_page_lruvec(page, zone); 331 lruvec = mem_cgroup_page_lruvec(page, zone);
@@ -361,90 +360,27 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
361 360
362#endif /* CONFIG_COMPACTION || CONFIG_CMA */ 361#endif /* CONFIG_COMPACTION || CONFIG_CMA */
363#ifdef CONFIG_COMPACTION 362#ifdef CONFIG_COMPACTION
364/*
365 * Returns true if MIGRATE_UNMOVABLE pageblock was successfully
366 * converted to MIGRATE_MOVABLE type, false otherwise.
367 */
368static bool rescue_unmovable_pageblock(struct page *page)
369{
370 unsigned long pfn, start_pfn, end_pfn;
371 struct page *start_page, *end_page;
372
373 pfn = page_to_pfn(page);
374 start_pfn = pfn & ~(pageblock_nr_pages - 1);
375 end_pfn = start_pfn + pageblock_nr_pages;
376
377 start_page = pfn_to_page(start_pfn);
378 end_page = pfn_to_page(end_pfn);
379
380 /* Do not deal with pageblocks that overlap zones */
381 if (page_zone(start_page) != page_zone(end_page))
382 return false;
383
384 for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
385 page++) {
386 if (!pfn_valid_within(pfn))
387 continue;
388
389 if (PageBuddy(page)) {
390 int order = page_order(page);
391
392 pfn += (1 << order) - 1;
393 page += (1 << order) - 1;
394
395 continue;
396 } else if (page_count(page) == 0 || PageLRU(page))
397 continue;
398
399 return false;
400 }
401
402 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
403 move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
404 return true;
405}
406 363
407enum smt_result { 364/* Returns true if the page is within a block suitable for migration to */
408 GOOD_AS_MIGRATION_TARGET, 365static bool suitable_migration_target(struct page *page)
409 FAIL_UNMOVABLE_TARGET,
410 FAIL_BAD_TARGET,
411};
412
413/*
414 * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
415 * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
416 * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
417 */
418static enum smt_result suitable_migration_target(struct page *page,
419 struct compact_control *cc)
420{ 366{
421 367
422 int migratetype = get_pageblock_migratetype(page); 368 int migratetype = get_pageblock_migratetype(page);
423 369
424 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 370 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
425 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 371 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
426 return FAIL_BAD_TARGET; 372 return false;
427 373
428 /* If the page is a large free page, then allow migration */ 374 /* If the page is a large free page, then allow migration */
429 if (PageBuddy(page) && page_order(page) >= pageblock_order) 375 if (PageBuddy(page) && page_order(page) >= pageblock_order)
430 return GOOD_AS_MIGRATION_TARGET; 376 return true;
431 377
432 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */ 378 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
433 if (cc->mode != COMPACT_ASYNC_UNMOVABLE && 379 if (migrate_async_suitable(migratetype))
434 migrate_async_suitable(migratetype)) 380 return true;
435 return GOOD_AS_MIGRATION_TARGET;
436
437 if (cc->mode == COMPACT_ASYNC_MOVABLE &&
438 migratetype == MIGRATE_UNMOVABLE)
439 return FAIL_UNMOVABLE_TARGET;
440
441 if (cc->mode != COMPACT_ASYNC_MOVABLE &&
442 migratetype == MIGRATE_UNMOVABLE &&
443 rescue_unmovable_pageblock(page))
444 return GOOD_AS_MIGRATION_TARGET;
445 381
446 /* Otherwise skip the block */ 382 /* Otherwise skip the block */
447 return FAIL_BAD_TARGET; 383 return false;
448} 384}
449 385
450/* 386/*
@@ -478,13 +414,6 @@ static void isolate_freepages(struct zone *zone,
478 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages; 414 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
479 415
480 /* 416 /*
481 * isolate_freepages() may be called more than once during
482 * compact_zone_order() run and we want only the most recent
483 * count.
484 */
485 cc->nr_pageblocks_skipped = 0;
486
487 /*
488 * Isolate free pages until enough are available to migrate the 417 * Isolate free pages until enough are available to migrate the
489 * pages on cc->migratepages. We stop searching if the migrate 418 * pages on cc->migratepages. We stop searching if the migrate
490 * and free page scanners meet or enough free pages are isolated. 419 * and free page scanners meet or enough free pages are isolated.
@@ -492,7 +421,6 @@ static void isolate_freepages(struct zone *zone,
492 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 421 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
493 pfn -= pageblock_nr_pages) { 422 pfn -= pageblock_nr_pages) {
494 unsigned long isolated; 423 unsigned long isolated;
495 enum smt_result ret;
496 424
497 if (!pfn_valid(pfn)) 425 if (!pfn_valid(pfn))
498 continue; 426 continue;
@@ -509,12 +437,9 @@ static void isolate_freepages(struct zone *zone,
509 continue; 437 continue;
510 438
511 /* Check the block is suitable for migration */ 439 /* Check the block is suitable for migration */
512 ret = suitable_migration_target(page, cc); 440 if (!suitable_migration_target(page))
513 if (ret != GOOD_AS_MIGRATION_TARGET) {
514 if (ret == FAIL_UNMOVABLE_TARGET)
515 cc->nr_pageblocks_skipped++;
516 continue; 441 continue;
517 } 442
518 /* 443 /*
519 * Found a block suitable for isolating free pages from. Now 444 * Found a block suitable for isolating free pages from. Now
520 * we disabled interrupts, double check things are ok and 445 * we disabled interrupts, double check things are ok and
@@ -523,14 +448,12 @@ static void isolate_freepages(struct zone *zone,
523 */ 448 */
524 isolated = 0; 449 isolated = 0;
525 spin_lock_irqsave(&zone->lock, flags); 450 spin_lock_irqsave(&zone->lock, flags);
526 ret = suitable_migration_target(page, cc); 451 if (suitable_migration_target(page)) {
527 if (ret == GOOD_AS_MIGRATION_TARGET) {
528 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn); 452 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
529 isolated = isolate_freepages_block(pfn, end_pfn, 453 isolated = isolate_freepages_block(pfn, end_pfn,
530 freelist, false); 454 freelist, false);
531 nr_freepages += isolated; 455 nr_freepages += isolated;
532 } else if (ret == FAIL_UNMOVABLE_TARGET) 456 }
533 cc->nr_pageblocks_skipped++;
534 spin_unlock_irqrestore(&zone->lock, flags); 457 spin_unlock_irqrestore(&zone->lock, flags);
535 458
536 /* 459 /*
@@ -762,9 +685,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
762 685
763 nr_migrate = cc->nr_migratepages; 686 nr_migrate = cc->nr_migratepages;
764 err = migrate_pages(&cc->migratepages, compaction_alloc, 687 err = migrate_pages(&cc->migratepages, compaction_alloc,
765 (unsigned long)&cc->freepages, false, 688 (unsigned long)cc, false,
766 (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT 689 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
767 : MIGRATE_ASYNC);
768 update_nr_listpages(cc); 690 update_nr_listpages(cc);
769 nr_remaining = cc->nr_migratepages; 691 nr_remaining = cc->nr_migratepages;
770 692
@@ -793,8 +715,7 @@ out:
793 715
794static unsigned long compact_zone_order(struct zone *zone, 716static unsigned long compact_zone_order(struct zone *zone,
795 int order, gfp_t gfp_mask, 717 int order, gfp_t gfp_mask,
796 enum compact_mode mode, 718 bool sync)
797 unsigned long *nr_pageblocks_skipped)
798{ 719{
799 struct compact_control cc = { 720 struct compact_control cc = {
800 .nr_freepages = 0, 721 .nr_freepages = 0,
@@ -802,17 +723,12 @@ static unsigned long compact_zone_order(struct zone *zone,
802 .order = order, 723 .order = order,
803 .migratetype = allocflags_to_migratetype(gfp_mask), 724 .migratetype = allocflags_to_migratetype(gfp_mask),
804 .zone = zone, 725 .zone = zone,
805 .mode = mode, 726 .sync = sync,
806 }; 727 };
807 unsigned long rc;
808
809 INIT_LIST_HEAD(&cc.freepages); 728 INIT_LIST_HEAD(&cc.freepages);
810 INIT_LIST_HEAD(&cc.migratepages); 729 INIT_LIST_HEAD(&cc.migratepages);
811 730
812 rc = compact_zone(zone, &cc); 731 return compact_zone(zone, &cc);
813 *nr_pageblocks_skipped = cc.nr_pageblocks_skipped;
814
815 return rc;
816} 732}
817 733
818int sysctl_extfrag_threshold = 500; 734int sysctl_extfrag_threshold = 500;
@@ -837,8 +753,6 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
837 struct zoneref *z; 753 struct zoneref *z;
838 struct zone *zone; 754 struct zone *zone;
839 int rc = COMPACT_SKIPPED; 755 int rc = COMPACT_SKIPPED;
840 unsigned long nr_pageblocks_skipped;
841 enum compact_mode mode;
842 756
843 /* 757 /*
844 * Check whether it is worth even starting compaction. The order check is 758 * Check whether it is worth even starting compaction. The order check is
@@ -855,22 +769,12 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
855 nodemask) { 769 nodemask) {
856 int status; 770 int status;
857 771
858 mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE; 772 status = compact_zone_order(zone, order, gfp_mask, sync);
859retry:
860 status = compact_zone_order(zone, order, gfp_mask, mode,
861 &nr_pageblocks_skipped);
862 rc = max(status, rc); 773 rc = max(status, rc);
863 774
864 /* If a normal allocation would succeed, stop compacting */ 775 /* If a normal allocation would succeed, stop compacting */
865 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 776 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
866 break; 777 break;
867
868 if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
869 if (nr_pageblocks_skipped) {
870 mode = COMPACT_ASYNC_UNMOVABLE;
871 goto retry;
872 }
873 }
874 } 778 }
875 779
876 return rc; 780 return rc;
@@ -904,7 +808,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
904 if (ok && cc->order > zone->compact_order_failed) 808 if (ok && cc->order > zone->compact_order_failed)
905 zone->compact_order_failed = cc->order + 1; 809 zone->compact_order_failed = cc->order + 1;
906 /* Currently async compaction is never deferred. */ 810 /* Currently async compaction is never deferred. */
907 else if (!ok && cc->mode == COMPACT_SYNC) 811 else if (!ok && cc->sync)
908 defer_compaction(zone, cc->order); 812 defer_compaction(zone, cc->order);
909 } 813 }
910 814
@@ -919,7 +823,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
919{ 823{
920 struct compact_control cc = { 824 struct compact_control cc = {
921 .order = order, 825 .order = order,
922 .mode = COMPACT_ASYNC_MOVABLE, 826 .sync = false,
923 }; 827 };
924 828
925 return __compact_pgdat(pgdat, &cc); 829 return __compact_pgdat(pgdat, &cc);
@@ -929,7 +833,7 @@ static int compact_node(int nid)
929{ 833{
930 struct compact_control cc = { 834 struct compact_control cc = {
931 .order = -1, 835 .order = -1,
932 .mode = COMPACT_SYNC, 836 .sync = true,
933 }; 837 };
934 838
935 return __compact_pgdat(NODE_DATA(nid), &cc); 839 return __compact_pgdat(NODE_DATA(nid), &cc);
diff --git a/mm/filemap.c b/mm/filemap.c
index 64b48f934b89..a4a5260b0279 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1899,71 +1899,6 @@ struct page *read_cache_page(struct address_space *mapping,
1899} 1899}
1900EXPORT_SYMBOL(read_cache_page); 1900EXPORT_SYMBOL(read_cache_page);
1901 1901
1902/*
1903 * The logic we want is
1904 *
1905 * if suid or (sgid and xgrp)
1906 * remove privs
1907 */
1908int should_remove_suid(struct dentry *dentry)
1909{
1910 umode_t mode = dentry->d_inode->i_mode;
1911 int kill = 0;
1912
1913 /* suid always must be killed */
1914 if (unlikely(mode & S_ISUID))
1915 kill = ATTR_KILL_SUID;
1916
1917 /*
1918 * sgid without any exec bits is just a mandatory locking mark; leave
1919 * it alone. If some exec bits are set, it's a real sgid; kill it.
1920 */
1921 if (unlikely((mode & S_ISGID) && (mode & S_IXGRP)))
1922 kill |= ATTR_KILL_SGID;
1923
1924 if (unlikely(kill && !capable(CAP_FSETID) && S_ISREG(mode)))
1925 return kill;
1926
1927 return 0;
1928}
1929EXPORT_SYMBOL(should_remove_suid);
1930
1931static int __remove_suid(struct dentry *dentry, int kill)
1932{
1933 struct iattr newattrs;
1934
1935 newattrs.ia_valid = ATTR_FORCE | kill;
1936 return notify_change(dentry, &newattrs);
1937}
1938
1939int file_remove_suid(struct file *file)
1940{
1941 struct dentry *dentry = file->f_path.dentry;
1942 struct inode *inode = dentry->d_inode;
1943 int killsuid;
1944 int killpriv;
1945 int error = 0;
1946
1947 /* Fast path for nothing security related */
1948 if (IS_NOSEC(inode))
1949 return 0;
1950
1951 killsuid = should_remove_suid(dentry);
1952 killpriv = security_inode_need_killpriv(dentry);
1953
1954 if (killpriv < 0)
1955 return killpriv;
1956 if (killpriv)
1957 error = security_inode_killpriv(dentry);
1958 if (!error && killsuid)
1959 error = __remove_suid(dentry, killsuid);
1960 if (!error && (inode->i_sb->s_flags & MS_NOSEC))
1961 inode->i_flags |= S_NOSEC;
1962
1963 return error;
1964}
1965EXPORT_SYMBOL(file_remove_suid);
1966
1967static size_t __iovec_copy_from_user_inatomic(char *vaddr, 1902static size_t __iovec_copy_from_user_inatomic(char *vaddr,
1968 const struct iovec *iov, size_t base, size_t bytes) 1903 const struct iovec *iov, size_t base, size_t bytes)
1969{ 1904{
@@ -2489,7 +2424,9 @@ ssize_t __generic_file_aio_write(struct kiocb *iocb, const struct iovec *iov,
2489 if (err) 2424 if (err)
2490 goto out; 2425 goto out;
2491 2426
2492 file_update_time(file); 2427 err = file_update_time(file);
2428 if (err)
2429 goto out;
2493 2430
2494 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */ 2431 /* coalesce the iovecs and go direct-to-BIO for O_DIRECT */
2495 if (unlikely(file->f_flags & O_DIRECT)) { 2432 if (unlikely(file->f_flags & O_DIRECT)) {
diff --git a/mm/filemap_xip.c b/mm/filemap_xip.c
index a4eb31132229..213ca1f53409 100644
--- a/mm/filemap_xip.c
+++ b/mm/filemap_xip.c
@@ -426,7 +426,9 @@ xip_file_write(struct file *filp, const char __user *buf, size_t len,
426 if (ret) 426 if (ret)
427 goto out_backing; 427 goto out_backing;
428 428
429 file_update_time(filp); 429 ret = file_update_time(filp);
430 if (ret)
431 goto out_backing;
430 432
431 ret = __xip_file_write (filp, buf, count, pos, ppos); 433 ret = __xip_file_write (filp, buf, count, pos, ppos);
432 434
diff --git a/mm/frontswap.c b/mm/frontswap.c
new file mode 100644
index 000000000000..e25025574a02
--- /dev/null
+++ b/mm/frontswap.c
@@ -0,0 +1,314 @@
1/*
2 * Frontswap frontend
3 *
4 * This code provides the generic "frontend" layer to call a matching
5 * "backend" driver implementation of frontswap. See
6 * Documentation/vm/frontswap.txt for more information.
7 *
8 * Copyright (C) 2009-2012 Oracle Corp. All rights reserved.
9 * Author: Dan Magenheimer
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2.
12 */
13
14#include <linux/mm.h>
15#include <linux/mman.h>
16#include <linux/swap.h>
17#include <linux/swapops.h>
18#include <linux/proc_fs.h>
19#include <linux/security.h>
20#include <linux/capability.h>
21#include <linux/module.h>
22#include <linux/uaccess.h>
23#include <linux/debugfs.h>
24#include <linux/frontswap.h>
25#include <linux/swapfile.h>
26
27/*
28 * frontswap_ops is set by frontswap_register_ops to contain the pointers
29 * to the frontswap "backend" implementation functions.
30 */
31static struct frontswap_ops frontswap_ops __read_mostly;
32
33/*
34 * This global enablement flag reduces overhead on systems where frontswap_ops
35 * has not been registered, so is preferred to the slower alternative: a
36 * function call that checks a non-global.
37 */
38bool frontswap_enabled __read_mostly;
39EXPORT_SYMBOL(frontswap_enabled);
40
41/*
42 * If enabled, frontswap_store will return failure even on success. As
43 * a result, the swap subsystem will always write the page to swap, in
44 * effect converting frontswap into a writethrough cache. In this mode,
45 * there is no direct reduction in swap writes, but a frontswap backend
46 * can unilaterally "reclaim" any pages in use with no data loss, thus
47 * providing increases control over maximum memory usage due to frontswap.
48 */
49static bool frontswap_writethrough_enabled __read_mostly;
50
51#ifdef CONFIG_DEBUG_FS
52/*
53 * Counters available via /sys/kernel/debug/frontswap (if debugfs is
54 * properly configured). These are for information only so are not protected
55 * against increment races.
56 */
57static u64 frontswap_loads;
58static u64 frontswap_succ_stores;
59static u64 frontswap_failed_stores;
60static u64 frontswap_invalidates;
61
62static inline void inc_frontswap_loads(void) {
63 frontswap_loads++;
64}
65static inline void inc_frontswap_succ_stores(void) {
66 frontswap_succ_stores++;
67}
68static inline void inc_frontswap_failed_stores(void) {
69 frontswap_failed_stores++;
70}
71static inline void inc_frontswap_invalidates(void) {
72 frontswap_invalidates++;
73}
74#else
75static inline void inc_frontswap_loads(void) { }
76static inline void inc_frontswap_succ_stores(void) { }
77static inline void inc_frontswap_failed_stores(void) { }
78static inline void inc_frontswap_invalidates(void) { }
79#endif
80/*
81 * Register operations for frontswap, returning previous thus allowing
82 * detection of multiple backends and possible nesting.
83 */
84struct frontswap_ops frontswap_register_ops(struct frontswap_ops *ops)
85{
86 struct frontswap_ops old = frontswap_ops;
87
88 frontswap_ops = *ops;
89 frontswap_enabled = true;
90 return old;
91}
92EXPORT_SYMBOL(frontswap_register_ops);
93
94/*
95 * Enable/disable frontswap writethrough (see above).
96 */
97void frontswap_writethrough(bool enable)
98{
99 frontswap_writethrough_enabled = enable;
100}
101EXPORT_SYMBOL(frontswap_writethrough);
102
103/*
104 * Called when a swap device is swapon'd.
105 */
106void __frontswap_init(unsigned type)
107{
108 struct swap_info_struct *sis = swap_info[type];
109
110 BUG_ON(sis == NULL);
111 if (sis->frontswap_map == NULL)
112 return;
113 if (frontswap_enabled)
114 (*frontswap_ops.init)(type);
115}
116EXPORT_SYMBOL(__frontswap_init);
117
118/*
119 * "Store" data from a page to frontswap and associate it with the page's
120 * swaptype and offset. Page must be locked and in the swap cache.
121 * If frontswap already contains a page with matching swaptype and
122 * offset, the frontswap implmentation may either overwrite the data and
123 * return success or invalidate the page from frontswap and return failure.
124 */
125int __frontswap_store(struct page *page)
126{
127 int ret = -1, dup = 0;
128 swp_entry_t entry = { .val = page_private(page), };
129 int type = swp_type(entry);
130 struct swap_info_struct *sis = swap_info[type];
131 pgoff_t offset = swp_offset(entry);
132
133 BUG_ON(!PageLocked(page));
134 BUG_ON(sis == NULL);
135 if (frontswap_test(sis, offset))
136 dup = 1;
137 ret = (*frontswap_ops.store)(type, offset, page);
138 if (ret == 0) {
139 frontswap_set(sis, offset);
140 inc_frontswap_succ_stores();
141 if (!dup)
142 atomic_inc(&sis->frontswap_pages);
143 } else if (dup) {
144 /*
145 failed dup always results in automatic invalidate of
146 the (older) page from frontswap
147 */
148 frontswap_clear(sis, offset);
149 atomic_dec(&sis->frontswap_pages);
150 inc_frontswap_failed_stores();
151 } else
152 inc_frontswap_failed_stores();
153 if (frontswap_writethrough_enabled)
154 /* report failure so swap also writes to swap device */
155 ret = -1;
156 return ret;
157}
158EXPORT_SYMBOL(__frontswap_store);
159
160/*
161 * "Get" data from frontswap associated with swaptype and offset that were
162 * specified when the data was put to frontswap and use it to fill the
163 * specified page with data. Page must be locked and in the swap cache.
164 */
165int __frontswap_load(struct page *page)
166{
167 int ret = -1;
168 swp_entry_t entry = { .val = page_private(page), };
169 int type = swp_type(entry);
170 struct swap_info_struct *sis = swap_info[type];
171 pgoff_t offset = swp_offset(entry);
172
173 BUG_ON(!PageLocked(page));
174 BUG_ON(sis == NULL);
175 if (frontswap_test(sis, offset))
176 ret = (*frontswap_ops.load)(type, offset, page);
177 if (ret == 0)
178 inc_frontswap_loads();
179 return ret;
180}
181EXPORT_SYMBOL(__frontswap_load);
182
183/*
184 * Invalidate any data from frontswap associated with the specified swaptype
185 * and offset so that a subsequent "get" will fail.
186 */
187void __frontswap_invalidate_page(unsigned type, pgoff_t offset)
188{
189 struct swap_info_struct *sis = swap_info[type];
190
191 BUG_ON(sis == NULL);
192 if (frontswap_test(sis, offset)) {
193 (*frontswap_ops.invalidate_page)(type, offset);
194 atomic_dec(&sis->frontswap_pages);
195 frontswap_clear(sis, offset);
196 inc_frontswap_invalidates();
197 }
198}
199EXPORT_SYMBOL(__frontswap_invalidate_page);
200
201/*
202 * Invalidate all data from frontswap associated with all offsets for the
203 * specified swaptype.
204 */
205void __frontswap_invalidate_area(unsigned type)
206{
207 struct swap_info_struct *sis = swap_info[type];
208
209 BUG_ON(sis == NULL);
210 if (sis->frontswap_map == NULL)
211 return;
212 (*frontswap_ops.invalidate_area)(type);
213 atomic_set(&sis->frontswap_pages, 0);
214 memset(sis->frontswap_map, 0, sis->max / sizeof(long));
215}
216EXPORT_SYMBOL(__frontswap_invalidate_area);
217
218/*
219 * Frontswap, like a true swap device, may unnecessarily retain pages
220 * under certain circumstances; "shrink" frontswap is essentially a
221 * "partial swapoff" and works by calling try_to_unuse to attempt to
222 * unuse enough frontswap pages to attempt to -- subject to memory
223 * constraints -- reduce the number of pages in frontswap to the
224 * number given in the parameter target_pages.
225 */
226void frontswap_shrink(unsigned long target_pages)
227{
228 struct swap_info_struct *si = NULL;
229 int si_frontswap_pages;
230 unsigned long total_pages = 0, total_pages_to_unuse;
231 unsigned long pages = 0, pages_to_unuse = 0;
232 int type;
233 bool locked = false;
234
235 /*
236 * we don't want to hold swap_lock while doing a very
237 * lengthy try_to_unuse, but swap_list may change
238 * so restart scan from swap_list.head each time
239 */
240 spin_lock(&swap_lock);
241 locked = true;
242 total_pages = 0;
243 for (type = swap_list.head; type >= 0; type = si->next) {
244 si = swap_info[type];
245 total_pages += atomic_read(&si->frontswap_pages);
246 }
247 if (total_pages <= target_pages)
248 goto out;
249 total_pages_to_unuse = total_pages - target_pages;
250 for (type = swap_list.head; type >= 0; type = si->next) {
251 si = swap_info[type];
252 si_frontswap_pages = atomic_read(&si->frontswap_pages);
253 if (total_pages_to_unuse < si_frontswap_pages)
254 pages = pages_to_unuse = total_pages_to_unuse;
255 else {
256 pages = si_frontswap_pages;
257 pages_to_unuse = 0; /* unuse all */
258 }
259 /* ensure there is enough RAM to fetch pages from frontswap */
260 if (security_vm_enough_memory_mm(current->mm, pages))
261 continue;
262 vm_unacct_memory(pages);
263 break;
264 }
265 if (type < 0)
266 goto out;
267 locked = false;
268 spin_unlock(&swap_lock);
269 try_to_unuse(type, true, pages_to_unuse);
270out:
271 if (locked)
272 spin_unlock(&swap_lock);
273 return;
274}
275EXPORT_SYMBOL(frontswap_shrink);
276
277/*
278 * Count and return the number of frontswap pages across all
279 * swap devices. This is exported so that backend drivers can
280 * determine current usage without reading debugfs.
281 */
282unsigned long frontswap_curr_pages(void)
283{
284 int type;
285 unsigned long totalpages = 0;
286 struct swap_info_struct *si = NULL;
287
288 spin_lock(&swap_lock);
289 for (type = swap_list.head; type >= 0; type = si->next) {
290 si = swap_info[type];
291 totalpages += atomic_read(&si->frontswap_pages);
292 }
293 spin_unlock(&swap_lock);
294 return totalpages;
295}
296EXPORT_SYMBOL(frontswap_curr_pages);
297
298static int __init init_frontswap(void)
299{
300#ifdef CONFIG_DEBUG_FS
301 struct dentry *root = debugfs_create_dir("frontswap", NULL);
302 if (root == NULL)
303 return -ENXIO;
304 debugfs_create_u64("loads", S_IRUGO, root, &frontswap_loads);
305 debugfs_create_u64("succ_stores", S_IRUGO, root, &frontswap_succ_stores);
306 debugfs_create_u64("failed_stores", S_IRUGO, root,
307 &frontswap_failed_stores);
308 debugfs_create_u64("invalidates", S_IRUGO,
309 root, &frontswap_invalidates);
310#endif
311 return 0;
312}
313
314module_init(init_frontswap);
diff --git a/mm/internal.h b/mm/internal.h
index 4194ab9dc19b..2ba87fbfb75b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -94,9 +94,6 @@ extern void putback_lru_page(struct page *page);
94/* 94/*
95 * in mm/page_alloc.c 95 * in mm/page_alloc.c
96 */ 96 */
97extern void set_pageblock_migratetype(struct page *page, int migratetype);
98extern int move_freepages_block(struct zone *zone, struct page *page,
99 int migratetype);
100extern void __free_pages_bootmem(struct page *page, unsigned int order); 97extern void __free_pages_bootmem(struct page *page, unsigned int order);
101extern void prep_compound_page(struct page *page, unsigned long order); 98extern void prep_compound_page(struct page *page, unsigned long order);
102#ifdef CONFIG_MEMORY_FAILURE 99#ifdef CONFIG_MEMORY_FAILURE
@@ -104,7 +101,6 @@ extern bool is_free_buddy_page(struct page *page);
104#endif 101#endif
105 102
106#if defined CONFIG_COMPACTION || defined CONFIG_CMA 103#if defined CONFIG_COMPACTION || defined CONFIG_CMA
107#include <linux/compaction.h>
108 104
109/* 105/*
110 * in mm/compaction.c 106 * in mm/compaction.c
@@ -123,14 +119,11 @@ struct compact_control {
123 unsigned long nr_migratepages; /* Number of pages to migrate */ 119 unsigned long nr_migratepages; /* Number of pages to migrate */
124 unsigned long free_pfn; /* isolate_freepages search base */ 120 unsigned long free_pfn; /* isolate_freepages search base */
125 unsigned long migrate_pfn; /* isolate_migratepages search base */ 121 unsigned long migrate_pfn; /* isolate_migratepages search base */
126 enum compact_mode mode; /* Compaction mode */ 122 bool sync; /* Synchronous migration */
127 123
128 int order; /* order a direct compactor needs */ 124 int order; /* order a direct compactor needs */
129 int migratetype; /* MOVABLE, RECLAIMABLE etc */ 125 int migratetype; /* MOVABLE, RECLAIMABLE etc */
130 struct zone *zone; 126 struct zone *zone;
131
132 /* Number of UNMOVABLE destination pageblocks skipped during scan */
133 unsigned long nr_pageblocks_skipped;
134}; 127};
135 128
136unsigned long 129unsigned long
@@ -350,3 +343,7 @@ extern u64 hwpoison_filter_flags_mask;
350extern u64 hwpoison_filter_flags_value; 343extern u64 hwpoison_filter_flags_value;
351extern u64 hwpoison_filter_memcg; 344extern u64 hwpoison_filter_memcg;
352extern u32 hwpoison_filter_enable; 345extern u32 hwpoison_filter_enable;
346
347extern unsigned long vm_mmap_pgoff(struct file *, unsigned long,
348 unsigned long, unsigned long,
349 unsigned long, unsigned long);
diff --git a/mm/memblock.c b/mm/memblock.c
index 952123eba433..d4382095f8bd 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -184,7 +184,24 @@ static void __init_memblock memblock_remove_region(struct memblock_type *type, u
184 } 184 }
185} 185}
186 186
187static int __init_memblock memblock_double_array(struct memblock_type *type) 187/**
188 * memblock_double_array - double the size of the memblock regions array
189 * @type: memblock type of the regions array being doubled
190 * @new_area_start: starting address of memory range to avoid overlap with
191 * @new_area_size: size of memory range to avoid overlap with
192 *
193 * Double the size of the @type regions array. If memblock is being used to
194 * allocate memory for a new reserved regions array and there is a previously
195 * allocated memory range [@new_area_start,@new_area_start+@new_area_size]
196 * waiting to be reserved, ensure the memory used by the new array does
197 * not overlap.
198 *
199 * RETURNS:
200 * 0 on success, -1 on failure.
201 */
202static int __init_memblock memblock_double_array(struct memblock_type *type,
203 phys_addr_t new_area_start,
204 phys_addr_t new_area_size)
188{ 205{
189 struct memblock_region *new_array, *old_array; 206 struct memblock_region *new_array, *old_array;
190 phys_addr_t old_size, new_size, addr; 207 phys_addr_t old_size, new_size, addr;
@@ -222,7 +239,18 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
222 new_array = kmalloc(new_size, GFP_KERNEL); 239 new_array = kmalloc(new_size, GFP_KERNEL);
223 addr = new_array ? __pa(new_array) : 0; 240 addr = new_array ? __pa(new_array) : 0;
224 } else { 241 } else {
225 addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); 242 /* only exclude range when trying to double reserved.regions */
243 if (type != &memblock.reserved)
244 new_area_start = new_area_size = 0;
245
246 addr = memblock_find_in_range(new_area_start + new_area_size,
247 memblock.current_limit,
248 new_size, sizeof(phys_addr_t));
249 if (!addr && new_area_size)
250 addr = memblock_find_in_range(0,
251 min(new_area_start, memblock.current_limit),
252 new_size, sizeof(phys_addr_t));
253
226 new_array = addr ? __va(addr) : 0; 254 new_array = addr ? __va(addr) : 0;
227 } 255 }
228 if (!addr) { 256 if (!addr) {
@@ -399,7 +427,7 @@ repeat:
399 */ 427 */
400 if (!insert) { 428 if (!insert) {
401 while (type->cnt + nr_new > type->max) 429 while (type->cnt + nr_new > type->max)
402 if (memblock_double_array(type) < 0) 430 if (memblock_double_array(type, obase, size) < 0)
403 return -ENOMEM; 431 return -ENOMEM;
404 insert = true; 432 insert = true;
405 goto repeat; 433 goto repeat;
@@ -450,7 +478,7 @@ static int __init_memblock memblock_isolate_range(struct memblock_type *type,
450 478
451 /* we'll create at most two more regions */ 479 /* we'll create at most two more regions */
452 while (type->cnt + 2 > type->max) 480 while (type->cnt + 2 > type->max)
453 if (memblock_double_array(type) < 0) 481 if (memblock_double_array(type, base, size) < 0)
454 return -ENOMEM; 482 return -ENOMEM;
455 483
456 for (i = 0; i < type->cnt; i++) { 484 for (i = 0; i < type->cnt; i++) {
@@ -540,9 +568,9 @@ int __init_memblock memblock_reserve(phys_addr_t base, phys_addr_t size)
540 * __next_free_mem_range - next function for for_each_free_mem_range() 568 * __next_free_mem_range - next function for for_each_free_mem_range()
541 * @idx: pointer to u64 loop variable 569 * @idx: pointer to u64 loop variable
542 * @nid: nid: node selector, %MAX_NUMNODES for all nodes 570 * @nid: nid: node selector, %MAX_NUMNODES for all nodes
543 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 571 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
544 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 572 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
545 * @p_nid: ptr to int for nid of the range, can be %NULL 573 * @out_nid: ptr to int for nid of the range, can be %NULL
546 * 574 *
547 * Find the first free area from *@idx which matches @nid, fill the out 575 * Find the first free area from *@idx which matches @nid, fill the out
548 * parameters, and update *@idx for the next iteration. The lower 32bit of 576 * parameters, and update *@idx for the next iteration. The lower 32bit of
@@ -616,9 +644,9 @@ void __init_memblock __next_free_mem_range(u64 *idx, int nid,
616 * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse() 644 * __next_free_mem_range_rev - next function for for_each_free_mem_range_reverse()
617 * @idx: pointer to u64 loop variable 645 * @idx: pointer to u64 loop variable
618 * @nid: nid: node selector, %MAX_NUMNODES for all nodes 646 * @nid: nid: node selector, %MAX_NUMNODES for all nodes
619 * @p_start: ptr to phys_addr_t for start address of the range, can be %NULL 647 * @out_start: ptr to phys_addr_t for start address of the range, can be %NULL
620 * @p_end: ptr to phys_addr_t for end address of the range, can be %NULL 648 * @out_end: ptr to phys_addr_t for end address of the range, can be %NULL
621 * @p_nid: ptr to int for nid of the range, can be %NULL 649 * @out_nid: ptr to int for nid of the range, can be %NULL
622 * 650 *
623 * Reverse of __next_free_mem_range(). 651 * Reverse of __next_free_mem_range().
624 */ 652 */
@@ -867,6 +895,16 @@ int __init_memblock memblock_is_memory(phys_addr_t addr)
867 return memblock_search(&memblock.memory, addr) != -1; 895 return memblock_search(&memblock.memory, addr) != -1;
868} 896}
869 897
898/**
899 * memblock_is_region_memory - check if a region is a subset of memory
900 * @base: base of region to check
901 * @size: size of region to check
902 *
903 * Check if the region [@base, @base+@size) is a subset of a memory block.
904 *
905 * RETURNS:
906 * 0 if false, non-zero if true
907 */
870int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size) 908int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size)
871{ 909{
872 int idx = memblock_search(&memblock.memory, base); 910 int idx = memblock_search(&memblock.memory, base);
@@ -879,6 +917,16 @@ int __init_memblock memblock_is_region_memory(phys_addr_t base, phys_addr_t size
879 memblock.memory.regions[idx].size) >= end; 917 memblock.memory.regions[idx].size) >= end;
880} 918}
881 919
920/**
921 * memblock_is_region_reserved - check if a region intersects reserved memory
922 * @base: base of region to check
923 * @size: size of region to check
924 *
925 * Check if the region [@base, @base+@size) intersects a reserved memory block.
926 *
927 * RETURNS:
928 * 0 if false, non-zero if true
929 */
882int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size) 930int __init_memblock memblock_is_region_reserved(phys_addr_t base, phys_addr_t size)
883{ 931{
884 memblock_cap_size(base, &size); 932 memblock_cap_size(base, &size);
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ac35bccadb7b..f72b5e52451a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1148,7 +1148,7 @@ bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1148{ 1148{
1149 if (root_memcg == memcg) 1149 if (root_memcg == memcg)
1150 return true; 1150 return true;
1151 if (!root_memcg->use_hierarchy) 1151 if (!root_memcg->use_hierarchy || !memcg)
1152 return false; 1152 return false;
1153 return css_is_ancestor(&memcg->css, &root_memcg->css); 1153 return css_is_ancestor(&memcg->css, &root_memcg->css);
1154} 1154}
@@ -1234,7 +1234,7 @@ int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
1234 1234
1235/** 1235/**
1236 * mem_cgroup_margin - calculate chargeable space of a memory cgroup 1236 * mem_cgroup_margin - calculate chargeable space of a memory cgroup
1237 * @mem: the memory cgroup 1237 * @memcg: the memory cgroup
1238 * 1238 *
1239 * Returns the maximum amount of memory @mem can be charged with, in 1239 * Returns the maximum amount of memory @mem can be charged with, in
1240 * pages. 1240 * pages.
@@ -1508,7 +1508,7 @@ static unsigned long mem_cgroup_reclaim(struct mem_cgroup *memcg,
1508 1508
1509/** 1509/**
1510 * test_mem_cgroup_node_reclaimable 1510 * test_mem_cgroup_node_reclaimable
1511 * @mem: the target memcg 1511 * @memcg: the target memcg
1512 * @nid: the node ID to be checked. 1512 * @nid: the node ID to be checked.
1513 * @noswap : specify true here if the user wants flle only information. 1513 * @noswap : specify true here if the user wants flle only information.
1514 * 1514 *
diff --git a/mm/memory.c b/mm/memory.c
index 1b7dc662bf9f..2466d1250231 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1225,7 +1225,15 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
1225 next = pmd_addr_end(addr, end); 1225 next = pmd_addr_end(addr, end);
1226 if (pmd_trans_huge(*pmd)) { 1226 if (pmd_trans_huge(*pmd)) {
1227 if (next - addr != HPAGE_PMD_SIZE) { 1227 if (next - addr != HPAGE_PMD_SIZE) {
1228 VM_BUG_ON(!rwsem_is_locked(&tlb->mm->mmap_sem)); 1228#ifdef CONFIG_DEBUG_VM
1229 if (!rwsem_is_locked(&tlb->mm->mmap_sem)) {
1230 pr_err("%s: mmap_sem is unlocked! addr=0x%lx end=0x%lx vma->vm_start=0x%lx vma->vm_end=0x%lx\n",
1231 __func__, addr, end,
1232 vma->vm_start,
1233 vma->vm_end);
1234 BUG();
1235 }
1236#endif
1229 split_huge_page_pmd(vma->vm_mm, pmd); 1237 split_huge_page_pmd(vma->vm_mm, pmd);
1230 } else if (zap_huge_pmd(tlb, vma, pmd, addr)) 1238 } else if (zap_huge_pmd(tlb, vma, pmd, addr))
1231 goto next; 1239 goto next;
@@ -1366,7 +1374,7 @@ void unmap_vmas(struct mmu_gather *tlb,
1366/** 1374/**
1367 * zap_page_range - remove user pages in a given range 1375 * zap_page_range - remove user pages in a given range
1368 * @vma: vm_area_struct holding the applicable pages 1376 * @vma: vm_area_struct holding the applicable pages
1369 * @address: starting address of pages to zap 1377 * @start: starting address of pages to zap
1370 * @size: number of bytes to zap 1378 * @size: number of bytes to zap
1371 * @details: details of nonlinear truncation or shared cache invalidation 1379 * @details: details of nonlinear truncation or shared cache invalidation
1372 * 1380 *
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index f15c1b24ca18..1d771e4200d2 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1177,7 +1177,7 @@ static long do_mbind(unsigned long start, unsigned long len,
1177 if (!list_empty(&pagelist)) { 1177 if (!list_empty(&pagelist)) {
1178 nr_failed = migrate_pages(&pagelist, new_vma_page, 1178 nr_failed = migrate_pages(&pagelist, new_vma_page,
1179 (unsigned long)vma, 1179 (unsigned long)vma,
1180 false, true); 1180 false, MIGRATE_SYNC);
1181 if (nr_failed) 1181 if (nr_failed)
1182 putback_lru_pages(&pagelist); 1182 putback_lru_pages(&pagelist);
1183 } 1183 }
diff --git a/mm/migrate.c b/mm/migrate.c
index ab81d482ae6f..be26d5cbe56b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -436,7 +436,10 @@ void migrate_page_copy(struct page *newpage, struct page *page)
436 * is actually a signal that all of the page has become dirty. 436 * is actually a signal that all of the page has become dirty.
437 * Whereas only part of our page may be dirty. 437 * Whereas only part of our page may be dirty.
438 */ 438 */
439 __set_page_dirty_nobuffers(newpage); 439 if (PageSwapBacked(page))
440 SetPageDirty(newpage);
441 else
442 __set_page_dirty_nobuffers(newpage);
440 } 443 }
441 444
442 mlock_migrate_page(newpage, page); 445 mlock_migrate_page(newpage, page);
diff --git a/mm/mmap.c b/mm/mmap.c
index 4a9c2a391e28..3edfcdfa42d9 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -971,15 +971,13 @@ static inline unsigned long round_hint_to_min(unsigned long hint)
971 * The caller must hold down_write(&current->mm->mmap_sem). 971 * The caller must hold down_write(&current->mm->mmap_sem).
972 */ 972 */
973 973
974static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr, 974unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
975 unsigned long len, unsigned long prot, 975 unsigned long len, unsigned long prot,
976 unsigned long flags, unsigned long pgoff) 976 unsigned long flags, unsigned long pgoff)
977{ 977{
978 struct mm_struct * mm = current->mm; 978 struct mm_struct * mm = current->mm;
979 struct inode *inode; 979 struct inode *inode;
980 vm_flags_t vm_flags; 980 vm_flags_t vm_flags;
981 int error;
982 unsigned long reqprot = prot;
983 981
984 /* 982 /*
985 * Does the application expect PROT_READ to imply PROT_EXEC? 983 * Does the application expect PROT_READ to imply PROT_EXEC?
@@ -1101,39 +1099,9 @@ static unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
1101 } 1099 }
1102 } 1100 }
1103 1101
1104 error = security_file_mmap(file, reqprot, prot, flags, addr, 0);
1105 if (error)
1106 return error;
1107
1108 return mmap_region(file, addr, len, flags, vm_flags, pgoff); 1102 return mmap_region(file, addr, len, flags, vm_flags, pgoff);
1109} 1103}
1110 1104
1111unsigned long do_mmap(struct file *file, unsigned long addr,
1112 unsigned long len, unsigned long prot,
1113 unsigned long flag, unsigned long offset)
1114{
1115 if (unlikely(offset + PAGE_ALIGN(len) < offset))
1116 return -EINVAL;
1117 if (unlikely(offset & ~PAGE_MASK))
1118 return -EINVAL;
1119 return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
1120}
1121EXPORT_SYMBOL(do_mmap);
1122
1123unsigned long vm_mmap(struct file *file, unsigned long addr,
1124 unsigned long len, unsigned long prot,
1125 unsigned long flag, unsigned long offset)
1126{
1127 unsigned long ret;
1128 struct mm_struct *mm = current->mm;
1129
1130 down_write(&mm->mmap_sem);
1131 ret = do_mmap(file, addr, len, prot, flag, offset);
1132 up_write(&mm->mmap_sem);
1133 return ret;
1134}
1135EXPORT_SYMBOL(vm_mmap);
1136
1137SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1105SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1138 unsigned long, prot, unsigned long, flags, 1106 unsigned long, prot, unsigned long, flags,
1139 unsigned long, fd, unsigned long, pgoff) 1107 unsigned long, fd, unsigned long, pgoff)
@@ -1165,10 +1133,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1165 1133
1166 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 1134 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1167 1135
1168 down_write(&current->mm->mmap_sem); 1136 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1169 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1170 up_write(&current->mm->mmap_sem);
1171
1172 if (file) 1137 if (file)
1173 fput(file); 1138 fput(file);
1174out: 1139out:
@@ -1629,7 +1594,9 @@ get_unmapped_area(struct file *file, unsigned long addr, unsigned long len,
1629 if (addr & ~PAGE_MASK) 1594 if (addr & ~PAGE_MASK)
1630 return -EINVAL; 1595 return -EINVAL;
1631 1596
1632 return arch_rebalance_pgtables(addr, len); 1597 addr = arch_rebalance_pgtables(addr, len);
1598 error = security_mmap_addr(addr);
1599 return error ? error : addr;
1633} 1600}
1634 1601
1635EXPORT_SYMBOL(get_unmapped_area); 1602EXPORT_SYMBOL(get_unmapped_area);
@@ -1819,7 +1786,7 @@ int expand_downwards(struct vm_area_struct *vma,
1819 return -ENOMEM; 1786 return -ENOMEM;
1820 1787
1821 address &= PAGE_MASK; 1788 address &= PAGE_MASK;
1822 error = security_file_mmap(NULL, 0, 0, 0, address, 1); 1789 error = security_mmap_addr(address);
1823 if (error) 1790 if (error)
1824 return error; 1791 return error;
1825 1792
@@ -2159,7 +2126,6 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
2159 2126
2160 return 0; 2127 return 0;
2161} 2128}
2162EXPORT_SYMBOL(do_munmap);
2163 2129
2164int vm_munmap(unsigned long start, size_t len) 2130int vm_munmap(unsigned long start, size_t len)
2165{ 2131{
@@ -2207,10 +2173,6 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
2207 if (!len) 2173 if (!len)
2208 return addr; 2174 return addr;
2209 2175
2210 error = security_file_mmap(NULL, 0, 0, 0, addr, 1);
2211 if (error)
2212 return error;
2213
2214 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; 2176 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
2215 2177
2216 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); 2178 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
@@ -2563,10 +2525,6 @@ int install_special_mapping(struct mm_struct *mm,
2563 vma->vm_ops = &special_mapping_vmops; 2525 vma->vm_ops = &special_mapping_vmops;
2564 vma->vm_private_data = pages; 2526 vma->vm_private_data = pages;
2565 2527
2566 ret = security_file_mmap(NULL, 0, 0, 0, vma->vm_start, 1);
2567 if (ret)
2568 goto out;
2569
2570 ret = insert_vm_struct(mm, vma); 2528 ret = insert_vm_struct(mm, vma);
2571 if (ret) 2529 if (ret)
2572 goto out; 2530 goto out;
diff --git a/mm/mremap.c b/mm/mremap.c
index db8d983b5a7d..21fed202ddad 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -371,10 +371,6 @@ static unsigned long mremap_to(unsigned long addr,
371 if ((addr <= new_addr) && (addr+old_len) > new_addr) 371 if ((addr <= new_addr) && (addr+old_len) > new_addr)
372 goto out; 372 goto out;
373 373
374 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
375 if (ret)
376 goto out;
377
378 ret = do_munmap(mm, new_addr, new_len); 374 ret = do_munmap(mm, new_addr, new_len);
379 if (ret) 375 if (ret)
380 goto out; 376 goto out;
@@ -432,15 +428,17 @@ static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
432 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise 428 * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
433 * This option implies MREMAP_MAYMOVE. 429 * This option implies MREMAP_MAYMOVE.
434 */ 430 */
435unsigned long do_mremap(unsigned long addr, 431SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
436 unsigned long old_len, unsigned long new_len, 432 unsigned long, new_len, unsigned long, flags,
437 unsigned long flags, unsigned long new_addr) 433 unsigned long, new_addr)
438{ 434{
439 struct mm_struct *mm = current->mm; 435 struct mm_struct *mm = current->mm;
440 struct vm_area_struct *vma; 436 struct vm_area_struct *vma;
441 unsigned long ret = -EINVAL; 437 unsigned long ret = -EINVAL;
442 unsigned long charged = 0; 438 unsigned long charged = 0;
443 439
440 down_write(&current->mm->mmap_sem);
441
444 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) 442 if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
445 goto out; 443 goto out;
446 444
@@ -530,25 +528,11 @@ unsigned long do_mremap(unsigned long addr,
530 goto out; 528 goto out;
531 } 529 }
532 530
533 ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
534 if (ret)
535 goto out;
536 ret = move_vma(vma, addr, old_len, new_len, new_addr); 531 ret = move_vma(vma, addr, old_len, new_len, new_addr);
537 } 532 }
538out: 533out:
539 if (ret & ~PAGE_MASK) 534 if (ret & ~PAGE_MASK)
540 vm_unacct_memory(charged); 535 vm_unacct_memory(charged);
541 return ret;
542}
543
544SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
545 unsigned long, new_len, unsigned long, flags,
546 unsigned long, new_addr)
547{
548 unsigned long ret;
549
550 down_write(&current->mm->mmap_sem);
551 ret = do_mremap(addr, old_len, new_len, flags, new_addr);
552 up_write(&current->mm->mmap_sem); 536 up_write(&current->mm->mmap_sem);
553 return ret; 537 return ret;
554} 538}
diff --git a/mm/nommu.c b/mm/nommu.c
index bb8f4f004a82..d4b0c10872de 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -889,7 +889,6 @@ static int validate_mmap_request(struct file *file,
889 unsigned long *_capabilities) 889 unsigned long *_capabilities)
890{ 890{
891 unsigned long capabilities, rlen; 891 unsigned long capabilities, rlen;
892 unsigned long reqprot = prot;
893 int ret; 892 int ret;
894 893
895 /* do the simple checks first */ 894 /* do the simple checks first */
@@ -1047,7 +1046,7 @@ static int validate_mmap_request(struct file *file,
1047 } 1046 }
1048 1047
1049 /* allow the security API to have its say */ 1048 /* allow the security API to have its say */
1050 ret = security_file_mmap(file, reqprot, prot, flags, addr, 0); 1049 ret = security_mmap_addr(addr);
1051 if (ret < 0) 1050 if (ret < 0)
1052 return ret; 1051 return ret;
1053 1052
@@ -1233,7 +1232,7 @@ enomem:
1233/* 1232/*
1234 * handle mapping creation for uClinux 1233 * handle mapping creation for uClinux
1235 */ 1234 */
1236static unsigned long do_mmap_pgoff(struct file *file, 1235unsigned long do_mmap_pgoff(struct file *file,
1237 unsigned long addr, 1236 unsigned long addr,
1238 unsigned long len, 1237 unsigned long len,
1239 unsigned long prot, 1238 unsigned long prot,
@@ -1471,32 +1470,6 @@ error_getting_region:
1471 return -ENOMEM; 1470 return -ENOMEM;
1472} 1471}
1473 1472
1474unsigned long do_mmap(struct file *file, unsigned long addr,
1475 unsigned long len, unsigned long prot,
1476 unsigned long flag, unsigned long offset)
1477{
1478 if (unlikely(offset + PAGE_ALIGN(len) < offset))
1479 return -EINVAL;
1480 if (unlikely(offset & ~PAGE_MASK))
1481 return -EINVAL;
1482 return do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
1483}
1484EXPORT_SYMBOL(do_mmap);
1485
1486unsigned long vm_mmap(struct file *file, unsigned long addr,
1487 unsigned long len, unsigned long prot,
1488 unsigned long flag, unsigned long offset)
1489{
1490 unsigned long ret;
1491 struct mm_struct *mm = current->mm;
1492
1493 down_write(&mm->mmap_sem);
1494 ret = do_mmap(file, addr, len, prot, flag, offset);
1495 up_write(&mm->mmap_sem);
1496 return ret;
1497}
1498EXPORT_SYMBOL(vm_mmap);
1499
1500SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len, 1473SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1501 unsigned long, prot, unsigned long, flags, 1474 unsigned long, prot, unsigned long, flags,
1502 unsigned long, fd, unsigned long, pgoff) 1475 unsigned long, fd, unsigned long, pgoff)
@@ -1513,9 +1486,7 @@ SYSCALL_DEFINE6(mmap_pgoff, unsigned long, addr, unsigned long, len,
1513 1486
1514 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE); 1487 flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
1515 1488
1516 down_write(&current->mm->mmap_sem); 1489 retval = vm_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1517 retval = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
1518 up_write(&current->mm->mmap_sem);
1519 1490
1520 if (file) 1491 if (file)
1521 fput(file); 1492 fput(file);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index ed0e19677360..ac300c99baf6 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -183,7 +183,8 @@ static bool oom_unkillable_task(struct task_struct *p,
183unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg, 183unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
184 const nodemask_t *nodemask, unsigned long totalpages) 184 const nodemask_t *nodemask, unsigned long totalpages)
185{ 185{
186 unsigned long points; 186 long points;
187 long adj;
187 188
188 if (oom_unkillable_task(p, memcg, nodemask)) 189 if (oom_unkillable_task(p, memcg, nodemask))
189 return 0; 190 return 0;
@@ -192,7 +193,8 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
192 if (!p) 193 if (!p)
193 return 0; 194 return 0;
194 195
195 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) { 196 adj = p->signal->oom_score_adj;
197 if (adj == OOM_SCORE_ADJ_MIN) {
196 task_unlock(p); 198 task_unlock(p);
197 return 0; 199 return 0;
198 } 200 }
@@ -210,20 +212,17 @@ unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
210 * implementation used by LSMs. 212 * implementation used by LSMs.
211 */ 213 */
212 if (has_capability_noaudit(p, CAP_SYS_ADMIN)) 214 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
213 points -= 30 * totalpages / 1000; 215 adj -= 30;
214 216
215 /* 217 /* Normalize to oom_score_adj units */
216 * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may 218 adj *= totalpages / 1000;
217 * either completely disable oom killing or always prefer a certain 219 points += adj;
218 * task.
219 */
220 points += p->signal->oom_score_adj * totalpages / 1000;
221 220
222 /* 221 /*
223 * Never return 0 for an eligible task regardless of the root bonus and 222 * Never return 0 for an eligible task regardless of the root bonus and
224 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here). 223 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
225 */ 224 */
226 return points ? points : 1; 225 return points > 0 ? points : 1;
227} 226}
228 227
229/* 228/*
@@ -366,7 +365,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
366 365
367/** 366/**
368 * dump_tasks - dump current memory state of all system tasks 367 * dump_tasks - dump current memory state of all system tasks
369 * @mem: current's memory controller, if constrained 368 * @memcg: current's memory controller, if constrained
370 * @nodemask: nodemask passed to page allocator for mempolicy ooms 369 * @nodemask: nodemask passed to page allocator for mempolicy ooms
371 * 370 *
372 * Dumps the current memory state of all eligible tasks. Tasks not in the same 371 * Dumps the current memory state of all eligible tasks. Tasks not in the same
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 6092f331b32e..44030096da63 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -219,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes);
219 219
220int page_group_by_mobility_disabled __read_mostly; 220int page_group_by_mobility_disabled __read_mostly;
221 221
222void set_pageblock_migratetype(struct page *page, int migratetype) 222static void set_pageblock_migratetype(struct page *page, int migratetype)
223{ 223{
224 224
225 if (unlikely(page_group_by_mobility_disabled)) 225 if (unlikely(page_group_by_mobility_disabled))
@@ -954,8 +954,8 @@ static int move_freepages(struct zone *zone,
954 return pages_moved; 954 return pages_moved;
955} 955}
956 956
957int move_freepages_block(struct zone *zone, struct page *page, 957static int move_freepages_block(struct zone *zone, struct page *page,
958 int migratetype) 958 int migratetype)
959{ 959{
960 unsigned long start_pfn, end_pfn; 960 unsigned long start_pfn, end_pfn;
961 struct page *start_page, *end_page; 961 struct page *start_page, *end_page;
@@ -5651,7 +5651,7 @@ static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
5651 .nr_migratepages = 0, 5651 .nr_migratepages = 0,
5652 .order = -1, 5652 .order = -1,
5653 .zone = page_zone(pfn_to_page(start)), 5653 .zone = page_zone(pfn_to_page(start)),
5654 .mode = COMPACT_SYNC, 5654 .sync = true,
5655 }; 5655 };
5656 INIT_LIST_HEAD(&cc.migratepages); 5656 INIT_LIST_HEAD(&cc.migratepages);
5657 5657
diff --git a/mm/page_cgroup.c b/mm/page_cgroup.c
index 1ccbd714059c..eb750f851395 100644
--- a/mm/page_cgroup.c
+++ b/mm/page_cgroup.c
@@ -392,7 +392,7 @@ static struct swap_cgroup *lookup_swap_cgroup(swp_entry_t ent,
392 392
393/** 393/**
394 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry. 394 * swap_cgroup_cmpxchg - cmpxchg mem_cgroup's id for this swp_entry.
395 * @end: swap entry to be cmpxchged 395 * @ent: swap entry to be cmpxchged
396 * @old: old id 396 * @old: old id
397 * @new: new id 397 * @new: new id
398 * 398 *
@@ -422,7 +422,7 @@ unsigned short swap_cgroup_cmpxchg(swp_entry_t ent,
422/** 422/**
423 * swap_cgroup_record - record mem_cgroup for this swp_entry. 423 * swap_cgroup_record - record mem_cgroup for this swp_entry.
424 * @ent: swap entry to be recorded into 424 * @ent: swap entry to be recorded into
425 * @mem: mem_cgroup to be recorded 425 * @id: mem_cgroup to be recorded
426 * 426 *
427 * Returns old value at success, 0 at failure. 427 * Returns old value at success, 0 at failure.
428 * (Of course, old value can be 0.) 428 * (Of course, old value can be 0.)
diff --git a/mm/page_io.c b/mm/page_io.c
index dc76b4d0611e..34f02923744c 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -18,6 +18,7 @@
18#include <linux/bio.h> 18#include <linux/bio.h>
19#include <linux/swapops.h> 19#include <linux/swapops.h>
20#include <linux/writeback.h> 20#include <linux/writeback.h>
21#include <linux/frontswap.h>
21#include <asm/pgtable.h> 22#include <asm/pgtable.h>
22 23
23static struct bio *get_swap_bio(gfp_t gfp_flags, 24static struct bio *get_swap_bio(gfp_t gfp_flags,
@@ -98,6 +99,12 @@ int swap_writepage(struct page *page, struct writeback_control *wbc)
98 unlock_page(page); 99 unlock_page(page);
99 goto out; 100 goto out;
100 } 101 }
102 if (frontswap_store(page) == 0) {
103 set_page_writeback(page);
104 unlock_page(page);
105 end_page_writeback(page);
106 goto out;
107 }
101 bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write); 108 bio = get_swap_bio(GFP_NOIO, page, end_swap_bio_write);
102 if (bio == NULL) { 109 if (bio == NULL) {
103 set_page_dirty(page); 110 set_page_dirty(page);
@@ -122,6 +129,11 @@ int swap_readpage(struct page *page)
122 129
123 VM_BUG_ON(!PageLocked(page)); 130 VM_BUG_ON(!PageLocked(page));
124 VM_BUG_ON(PageUptodate(page)); 131 VM_BUG_ON(PageUptodate(page));
132 if (frontswap_load(page) == 0) {
133 SetPageUptodate(page);
134 unlock_page(page);
135 goto out;
136 }
125 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read); 137 bio = get_swap_bio(GFP_KERNEL, page, end_swap_bio_read);
126 if (bio == NULL) { 138 if (bio == NULL) {
127 unlock_page(page); 139 unlock_page(page);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index aa9701e12714..6c118d012bb5 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -162,7 +162,6 @@ static int walk_hugetlb_range(struct vm_area_struct *vma,
162 162
163/** 163/**
164 * walk_page_range - walk a memory map's page tables with a callback 164 * walk_page_range - walk a memory map's page tables with a callback
165 * @mm: memory map to walk
166 * @addr: starting address 165 * @addr: starting address
167 * @end: ending address 166 * @end: ending address
168 * @walk: set of callbacks to invoke for each level of the tree 167 * @walk: set of callbacks to invoke for each level of the tree
diff --git a/mm/percpu-vm.c b/mm/percpu-vm.c
index 405d331804c3..3707c71ae4cd 100644
--- a/mm/percpu-vm.c
+++ b/mm/percpu-vm.c
@@ -360,7 +360,6 @@ err_free:
360 * @chunk: chunk to depopulate 360 * @chunk: chunk to depopulate
361 * @off: offset to the area to depopulate 361 * @off: offset to the area to depopulate
362 * @size: size of the area to depopulate in bytes 362 * @size: size of the area to depopulate in bytes
363 * @flush: whether to flush cache and tlb or not
364 * 363 *
365 * For each cpu, depopulate and unmap pages [@page_start,@page_end) 364 * For each cpu, depopulate and unmap pages [@page_start,@page_end)
366 * from @chunk. If @flush is true, vcache is flushed before unmapping 365 * from @chunk. If @flush is true, vcache is flushed before unmapping
diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
index c20ff48994c2..926b46649749 100644
--- a/mm/process_vm_access.c
+++ b/mm/process_vm_access.c
@@ -371,15 +371,15 @@ static ssize_t process_vm_rw(pid_t pid,
371 /* Check iovecs */ 371 /* Check iovecs */
372 if (vm_write) 372 if (vm_write)
373 rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV, 373 rc = rw_copy_check_uvector(WRITE, lvec, liovcnt, UIO_FASTIOV,
374 iovstack_l, &iov_l, 1); 374 iovstack_l, &iov_l);
375 else 375 else
376 rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV, 376 rc = rw_copy_check_uvector(READ, lvec, liovcnt, UIO_FASTIOV,
377 iovstack_l, &iov_l, 1); 377 iovstack_l, &iov_l);
378 if (rc <= 0) 378 if (rc <= 0)
379 goto free_iovecs; 379 goto free_iovecs;
380 380
381 rc = rw_copy_check_uvector(READ, rvec, riovcnt, UIO_FASTIOV, 381 rc = rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt, UIO_FASTIOV,
382 iovstack_r, &iov_r, 0); 382 iovstack_r, &iov_r);
383 if (rc <= 0) 383 if (rc <= 0)
384 goto free_iovecs; 384 goto free_iovecs;
385 385
@@ -438,16 +438,16 @@ compat_process_vm_rw(compat_pid_t pid,
438 if (vm_write) 438 if (vm_write)
439 rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt, 439 rc = compat_rw_copy_check_uvector(WRITE, lvec, liovcnt,
440 UIO_FASTIOV, iovstack_l, 440 UIO_FASTIOV, iovstack_l,
441 &iov_l, 1); 441 &iov_l);
442 else 442 else
443 rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt, 443 rc = compat_rw_copy_check_uvector(READ, lvec, liovcnt,
444 UIO_FASTIOV, iovstack_l, 444 UIO_FASTIOV, iovstack_l,
445 &iov_l, 1); 445 &iov_l);
446 if (rc <= 0) 446 if (rc <= 0)
447 goto free_iovecs; 447 goto free_iovecs;
448 rc = compat_rw_copy_check_uvector(READ, rvec, riovcnt, 448 rc = compat_rw_copy_check_uvector(CHECK_IOVEC_ONLY, rvec, riovcnt,
449 UIO_FASTIOV, iovstack_r, 449 UIO_FASTIOV, iovstack_r,
450 &iov_r, 0); 450 &iov_r);
451 if (rc <= 0) 451 if (rc <= 0)
452 goto free_iovecs; 452 goto free_iovecs;
453 453
diff --git a/mm/shmem.c b/mm/shmem.c
index d576b84d913c..a15a466d0d1d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -683,10 +683,21 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
683 mutex_lock(&shmem_swaplist_mutex); 683 mutex_lock(&shmem_swaplist_mutex);
684 /* 684 /*
685 * We needed to drop mutex to make that restrictive page 685 * We needed to drop mutex to make that restrictive page
686 * allocation; but the inode might already be freed by now, 686 * allocation, but the inode might have been freed while we
687 * and we cannot refer to inode or mapping or info to check. 687 * dropped it: although a racing shmem_evict_inode() cannot
688 * However, we do hold page lock on the PageSwapCache page, 688 * complete without emptying the radix_tree, our page lock
689 * so can check if that still has our reference remaining. 689 * on this swapcache page is not enough to prevent that -
690 * free_swap_and_cache() of our swap entry will only
691 * trylock_page(), removing swap from radix_tree whatever.
692 *
693 * We must not proceed to shmem_add_to_page_cache() if the
694 * inode has been freed, but of course we cannot rely on
695 * inode or mapping or info to check that. However, we can
696 * safely check if our swap entry is still in use (and here
697 * it can't have got reused for another page): if it's still
698 * in use, then the inode cannot have been freed yet, and we
699 * can safely proceed (if it's no longer in use, that tells
700 * nothing about the inode, but we don't need to unuse swap).
690 */ 701 */
691 if (!page_swapcount(*pagep)) 702 if (!page_swapcount(*pagep))
692 error = -ENOENT; 703 error = -ENOENT;
@@ -730,9 +741,9 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
730 741
731 /* 742 /*
732 * There's a faint possibility that swap page was replaced before 743 * There's a faint possibility that swap page was replaced before
733 * caller locked it: it will come back later with the right page. 744 * caller locked it: caller will come back later with the right page.
734 */ 745 */
735 if (unlikely(!PageSwapCache(page))) 746 if (unlikely(!PageSwapCache(page) || page_private(page) != swap.val))
736 goto out; 747 goto out;
737 748
738 /* 749 /*
@@ -995,21 +1006,15 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
995 newpage = shmem_alloc_page(gfp, info, index); 1006 newpage = shmem_alloc_page(gfp, info, index);
996 if (!newpage) 1007 if (!newpage)
997 return -ENOMEM; 1008 return -ENOMEM;
998 VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
999 1009
1000 *pagep = newpage;
1001 page_cache_get(newpage); 1010 page_cache_get(newpage);
1002 copy_highpage(newpage, oldpage); 1011 copy_highpage(newpage, oldpage);
1012 flush_dcache_page(newpage);
1003 1013
1004 VM_BUG_ON(!PageLocked(oldpage));
1005 __set_page_locked(newpage); 1014 __set_page_locked(newpage);
1006 VM_BUG_ON(!PageUptodate(oldpage));
1007 SetPageUptodate(newpage); 1015 SetPageUptodate(newpage);
1008 VM_BUG_ON(!PageSwapBacked(oldpage));
1009 SetPageSwapBacked(newpage); 1016 SetPageSwapBacked(newpage);
1010 VM_BUG_ON(!swap_index);
1011 set_page_private(newpage, swap_index); 1017 set_page_private(newpage, swap_index);
1012 VM_BUG_ON(!PageSwapCache(oldpage));
1013 SetPageSwapCache(newpage); 1018 SetPageSwapCache(newpage);
1014 1019
1015 /* 1020 /*
@@ -1019,13 +1024,24 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1019 spin_lock_irq(&swap_mapping->tree_lock); 1024 spin_lock_irq(&swap_mapping->tree_lock);
1020 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage, 1025 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1021 newpage); 1026 newpage);
1022 __inc_zone_page_state(newpage, NR_FILE_PAGES); 1027 if (!error) {
1023 __dec_zone_page_state(oldpage, NR_FILE_PAGES); 1028 __inc_zone_page_state(newpage, NR_FILE_PAGES);
1029 __dec_zone_page_state(oldpage, NR_FILE_PAGES);
1030 }
1024 spin_unlock_irq(&swap_mapping->tree_lock); 1031 spin_unlock_irq(&swap_mapping->tree_lock);
1025 BUG_ON(error);
1026 1032
1027 mem_cgroup_replace_page_cache(oldpage, newpage); 1033 if (unlikely(error)) {
1028 lru_cache_add_anon(newpage); 1034 /*
1035 * Is this possible? I think not, now that our callers check
1036 * both PageSwapCache and page_private after getting page lock;
1037 * but be defensive. Reverse old to newpage for clear and free.
1038 */
1039 oldpage = newpage;
1040 } else {
1041 mem_cgroup_replace_page_cache(oldpage, newpage);
1042 lru_cache_add_anon(newpage);
1043 *pagep = newpage;
1044 }
1029 1045
1030 ClearPageSwapCache(oldpage); 1046 ClearPageSwapCache(oldpage);
1031 set_page_private(oldpage, 0); 1047 set_page_private(oldpage, 0);
@@ -1033,7 +1049,7 @@ static int shmem_replace_page(struct page **pagep, gfp_t gfp,
1033 unlock_page(oldpage); 1049 unlock_page(oldpage);
1034 page_cache_release(oldpage); 1050 page_cache_release(oldpage);
1035 page_cache_release(oldpage); 1051 page_cache_release(oldpage);
1036 return 0; 1052 return error;
1037} 1053}
1038 1054
1039/* 1055/*
@@ -1107,7 +1123,8 @@ repeat:
1107 1123
1108 /* We have to do this with page locked to prevent races */ 1124 /* We have to do this with page locked to prevent races */
1109 lock_page(page); 1125 lock_page(page);
1110 if (!PageSwapCache(page) || page->mapping) { 1126 if (!PageSwapCache(page) || page_private(page) != swap.val ||
1127 page->mapping) {
1111 error = -EEXIST; /* try again */ 1128 error = -EEXIST; /* try again */
1112 goto failed; 1129 goto failed;
1113 } 1130 }
@@ -2439,11 +2456,9 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb,
2439 return dentry; 2456 return dentry;
2440} 2457}
2441 2458
2442static int shmem_encode_fh(struct dentry *dentry, __u32 *fh, int *len, 2459static int shmem_encode_fh(struct inode *inode, __u32 *fh, int *len,
2443 int connectable) 2460 struct inode *parent)
2444{ 2461{
2445 struct inode *inode = dentry->d_inode;
2446
2447 if (*len < 3) { 2462 if (*len < 3) {
2448 *len = 3; 2463 *len = 3;
2449 return 255; 2464 return 255;
diff --git a/mm/slub.c b/mm/slub.c
index 80848cd3901c..8c691fa1cf3c 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1369,7 +1369,7 @@ static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1369 1369
1370 inc_slabs_node(s, page_to_nid(page), page->objects); 1370 inc_slabs_node(s, page_to_nid(page), page->objects);
1371 page->slab = s; 1371 page->slab = s;
1372 page->flags |= 1 << PG_slab; 1372 __SetPageSlab(page);
1373 1373
1374 start = page_address(page); 1374 start = page_address(page);
1375 1375
@@ -1514,15 +1514,19 @@ static inline void *acquire_slab(struct kmem_cache *s,
1514 freelist = page->freelist; 1514 freelist = page->freelist;
1515 counters = page->counters; 1515 counters = page->counters;
1516 new.counters = counters; 1516 new.counters = counters;
1517 if (mode) 1517 if (mode) {
1518 new.inuse = page->objects; 1518 new.inuse = page->objects;
1519 new.freelist = NULL;
1520 } else {
1521 new.freelist = freelist;
1522 }
1519 1523
1520 VM_BUG_ON(new.frozen); 1524 VM_BUG_ON(new.frozen);
1521 new.frozen = 1; 1525 new.frozen = 1;
1522 1526
1523 } while (!__cmpxchg_double_slab(s, page, 1527 } while (!__cmpxchg_double_slab(s, page,
1524 freelist, counters, 1528 freelist, counters,
1525 NULL, new.counters, 1529 new.freelist, new.counters,
1526 "lock and freeze")); 1530 "lock and freeze"));
1527 1531
1528 remove_partial(n, page); 1532 remove_partial(n, page);
@@ -1564,7 +1568,6 @@ static void *get_partial_node(struct kmem_cache *s,
1564 object = t; 1568 object = t;
1565 available = page->objects - page->inuse; 1569 available = page->objects - page->inuse;
1566 } else { 1570 } else {
1567 page->freelist = t;
1568 available = put_cpu_partial(s, page, 0); 1571 available = put_cpu_partial(s, page, 0);
1569 stat(s, CPU_PARTIAL_NODE); 1572 stat(s, CPU_PARTIAL_NODE);
1570 } 1573 }
@@ -1579,7 +1582,7 @@ static void *get_partial_node(struct kmem_cache *s,
1579/* 1582/*
1580 * Get a page from somewhere. Search in increasing NUMA distances. 1583 * Get a page from somewhere. Search in increasing NUMA distances.
1581 */ 1584 */
1582static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags, 1585static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1583 struct kmem_cache_cpu *c) 1586 struct kmem_cache_cpu *c)
1584{ 1587{
1585#ifdef CONFIG_NUMA 1588#ifdef CONFIG_NUMA
@@ -2766,7 +2769,7 @@ static unsigned long calculate_alignment(unsigned long flags,
2766} 2769}
2767 2770
2768static void 2771static void
2769init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s) 2772init_kmem_cache_node(struct kmem_cache_node *n)
2770{ 2773{
2771 n->nr_partial = 0; 2774 n->nr_partial = 0;
2772 spin_lock_init(&n->list_lock); 2775 spin_lock_init(&n->list_lock);
@@ -2836,7 +2839,7 @@ static void early_kmem_cache_node_alloc(int node)
2836 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE); 2839 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
2837 init_tracking(kmem_cache_node, n); 2840 init_tracking(kmem_cache_node, n);
2838#endif 2841#endif
2839 init_kmem_cache_node(n, kmem_cache_node); 2842 init_kmem_cache_node(n);
2840 inc_slabs_node(kmem_cache_node, node, page->objects); 2843 inc_slabs_node(kmem_cache_node, node, page->objects);
2841 2844
2842 add_partial(n, page, DEACTIVATE_TO_HEAD); 2845 add_partial(n, page, DEACTIVATE_TO_HEAD);
@@ -2876,7 +2879,7 @@ static int init_kmem_cache_nodes(struct kmem_cache *s)
2876 } 2879 }
2877 2880
2878 s->node[node] = n; 2881 s->node[node] = n;
2879 init_kmem_cache_node(n, s); 2882 init_kmem_cache_node(n);
2880 } 2883 }
2881 return 1; 2884 return 1;
2882} 2885}
@@ -3625,7 +3628,7 @@ static int slab_mem_going_online_callback(void *arg)
3625 ret = -ENOMEM; 3628 ret = -ENOMEM;
3626 goto out; 3629 goto out;
3627 } 3630 }
3628 init_kmem_cache_node(n, s); 3631 init_kmem_cache_node(n);
3629 s->node[nid] = n; 3632 s->node[nid] = n;
3630 } 3633 }
3631out: 3634out:
@@ -3968,9 +3971,9 @@ struct kmem_cache *kmem_cache_create(const char *name, size_t size,
3968 } 3971 }
3969 return s; 3972 return s;
3970 } 3973 }
3971 kfree(n);
3972 kfree(s); 3974 kfree(s);
3973 } 3975 }
3976 kfree(n);
3974err: 3977err:
3975 up_write(&slub_lock); 3978 up_write(&slub_lock);
3976 3979
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 457b10baef59..71373d03fcee 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -31,6 +31,8 @@
31#include <linux/memcontrol.h> 31#include <linux/memcontrol.h>
32#include <linux/poll.h> 32#include <linux/poll.h>
33#include <linux/oom.h> 33#include <linux/oom.h>
34#include <linux/frontswap.h>
35#include <linux/swapfile.h>
34 36
35#include <asm/pgtable.h> 37#include <asm/pgtable.h>
36#include <asm/tlbflush.h> 38#include <asm/tlbflush.h>
@@ -42,7 +44,7 @@ static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
42static void free_swap_count_continuations(struct swap_info_struct *); 44static void free_swap_count_continuations(struct swap_info_struct *);
43static sector_t map_swap_entry(swp_entry_t, struct block_device**); 45static sector_t map_swap_entry(swp_entry_t, struct block_device**);
44 46
45static DEFINE_SPINLOCK(swap_lock); 47DEFINE_SPINLOCK(swap_lock);
46static unsigned int nr_swapfiles; 48static unsigned int nr_swapfiles;
47long nr_swap_pages; 49long nr_swap_pages;
48long total_swap_pages; 50long total_swap_pages;
@@ -53,9 +55,9 @@ static const char Unused_file[] = "Unused swap file entry ";
53static const char Bad_offset[] = "Bad swap offset entry "; 55static const char Bad_offset[] = "Bad swap offset entry ";
54static const char Unused_offset[] = "Unused swap offset entry "; 56static const char Unused_offset[] = "Unused swap offset entry ";
55 57
56static struct swap_list_t swap_list = {-1, -1}; 58struct swap_list_t swap_list = {-1, -1};
57 59
58static struct swap_info_struct *swap_info[MAX_SWAPFILES]; 60struct swap_info_struct *swap_info[MAX_SWAPFILES];
59 61
60static DEFINE_MUTEX(swapon_mutex); 62static DEFINE_MUTEX(swapon_mutex);
61 63
@@ -556,6 +558,7 @@ static unsigned char swap_entry_free(struct swap_info_struct *p,
556 swap_list.next = p->type; 558 swap_list.next = p->type;
557 nr_swap_pages++; 559 nr_swap_pages++;
558 p->inuse_pages--; 560 p->inuse_pages--;
561 frontswap_invalidate_page(p->type, offset);
559 if ((p->flags & SWP_BLKDEV) && 562 if ((p->flags & SWP_BLKDEV) &&
560 disk->fops->swap_slot_free_notify) 563 disk->fops->swap_slot_free_notify)
561 disk->fops->swap_slot_free_notify(p->bdev, offset); 564 disk->fops->swap_slot_free_notify(p->bdev, offset);
@@ -985,11 +988,12 @@ static int unuse_mm(struct mm_struct *mm,
985} 988}
986 989
987/* 990/*
988 * Scan swap_map from current position to next entry still in use. 991 * Scan swap_map (or frontswap_map if frontswap parameter is true)
992 * from current position to next entry still in use.
989 * Recycle to start on reaching the end, returning 0 when empty. 993 * Recycle to start on reaching the end, returning 0 when empty.
990 */ 994 */
991static unsigned int find_next_to_unuse(struct swap_info_struct *si, 995static unsigned int find_next_to_unuse(struct swap_info_struct *si,
992 unsigned int prev) 996 unsigned int prev, bool frontswap)
993{ 997{
994 unsigned int max = si->max; 998 unsigned int max = si->max;
995 unsigned int i = prev; 999 unsigned int i = prev;
@@ -1015,6 +1019,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1015 prev = 0; 1019 prev = 0;
1016 i = 1; 1020 i = 1;
1017 } 1021 }
1022 if (frontswap) {
1023 if (frontswap_test(si, i))
1024 break;
1025 else
1026 continue;
1027 }
1018 count = si->swap_map[i]; 1028 count = si->swap_map[i];
1019 if (count && swap_count(count) != SWAP_MAP_BAD) 1029 if (count && swap_count(count) != SWAP_MAP_BAD)
1020 break; 1030 break;
@@ -1026,8 +1036,12 @@ static unsigned int find_next_to_unuse(struct swap_info_struct *si,
1026 * We completely avoid races by reading each swap page in advance, 1036 * We completely avoid races by reading each swap page in advance,
1027 * and then search for the process using it. All the necessary 1037 * and then search for the process using it. All the necessary
1028 * page table adjustments can then be made atomically. 1038 * page table adjustments can then be made atomically.
1039 *
1040 * if the boolean frontswap is true, only unuse pages_to_unuse pages;
1041 * pages_to_unuse==0 means all pages; ignored if frontswap is false
1029 */ 1042 */
1030static int try_to_unuse(unsigned int type) 1043int try_to_unuse(unsigned int type, bool frontswap,
1044 unsigned long pages_to_unuse)
1031{ 1045{
1032 struct swap_info_struct *si = swap_info[type]; 1046 struct swap_info_struct *si = swap_info[type];
1033 struct mm_struct *start_mm; 1047 struct mm_struct *start_mm;
@@ -1060,7 +1074,7 @@ static int try_to_unuse(unsigned int type)
1060 * one pass through swap_map is enough, but not necessarily: 1074 * one pass through swap_map is enough, but not necessarily:
1061 * there are races when an instance of an entry might be missed. 1075 * there are races when an instance of an entry might be missed.
1062 */ 1076 */
1063 while ((i = find_next_to_unuse(si, i)) != 0) { 1077 while ((i = find_next_to_unuse(si, i, frontswap)) != 0) {
1064 if (signal_pending(current)) { 1078 if (signal_pending(current)) {
1065 retval = -EINTR; 1079 retval = -EINTR;
1066 break; 1080 break;
@@ -1227,6 +1241,10 @@ static int try_to_unuse(unsigned int type)
1227 * interactive performance. 1241 * interactive performance.
1228 */ 1242 */
1229 cond_resched(); 1243 cond_resched();
1244 if (frontswap && pages_to_unuse > 0) {
1245 if (!--pages_to_unuse)
1246 break;
1247 }
1230 } 1248 }
1231 1249
1232 mmput(start_mm); 1250 mmput(start_mm);
@@ -1486,7 +1504,8 @@ bad_bmap:
1486} 1504}
1487 1505
1488static void enable_swap_info(struct swap_info_struct *p, int prio, 1506static void enable_swap_info(struct swap_info_struct *p, int prio,
1489 unsigned char *swap_map) 1507 unsigned char *swap_map,
1508 unsigned long *frontswap_map)
1490{ 1509{
1491 int i, prev; 1510 int i, prev;
1492 1511
@@ -1496,6 +1515,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
1496 else 1515 else
1497 p->prio = --least_priority; 1516 p->prio = --least_priority;
1498 p->swap_map = swap_map; 1517 p->swap_map = swap_map;
1518 frontswap_map_set(p, frontswap_map);
1499 p->flags |= SWP_WRITEOK; 1519 p->flags |= SWP_WRITEOK;
1500 nr_swap_pages += p->pages; 1520 nr_swap_pages += p->pages;
1501 total_swap_pages += p->pages; 1521 total_swap_pages += p->pages;
@@ -1512,6 +1532,7 @@ static void enable_swap_info(struct swap_info_struct *p, int prio,
1512 swap_list.head = swap_list.next = p->type; 1532 swap_list.head = swap_list.next = p->type;
1513 else 1533 else
1514 swap_info[prev]->next = p->type; 1534 swap_info[prev]->next = p->type;
1535 frontswap_init(p->type);
1515 spin_unlock(&swap_lock); 1536 spin_unlock(&swap_lock);
1516} 1537}
1517 1538
@@ -1585,7 +1606,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1585 spin_unlock(&swap_lock); 1606 spin_unlock(&swap_lock);
1586 1607
1587 oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); 1608 oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX);
1588 err = try_to_unuse(type); 1609 err = try_to_unuse(type, false, 0); /* force all pages to be unused */
1589 compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj); 1610 compare_swap_oom_score_adj(OOM_SCORE_ADJ_MAX, oom_score_adj);
1590 1611
1591 if (err) { 1612 if (err) {
@@ -1596,7 +1617,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1596 * sys_swapoff for this swap_info_struct at this point. 1617 * sys_swapoff for this swap_info_struct at this point.
1597 */ 1618 */
1598 /* re-insert swap space back into swap_list */ 1619 /* re-insert swap space back into swap_list */
1599 enable_swap_info(p, p->prio, p->swap_map); 1620 enable_swap_info(p, p->prio, p->swap_map, frontswap_map_get(p));
1600 goto out_dput; 1621 goto out_dput;
1601 } 1622 }
1602 1623
@@ -1622,9 +1643,11 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
1622 swap_map = p->swap_map; 1643 swap_map = p->swap_map;
1623 p->swap_map = NULL; 1644 p->swap_map = NULL;
1624 p->flags = 0; 1645 p->flags = 0;
1646 frontswap_invalidate_area(type);
1625 spin_unlock(&swap_lock); 1647 spin_unlock(&swap_lock);
1626 mutex_unlock(&swapon_mutex); 1648 mutex_unlock(&swapon_mutex);
1627 vfree(swap_map); 1649 vfree(swap_map);
1650 vfree(frontswap_map_get(p));
1628 /* Destroy swap account informatin */ 1651 /* Destroy swap account informatin */
1629 swap_cgroup_swapoff(type); 1652 swap_cgroup_swapoff(type);
1630 1653
@@ -1893,24 +1916,20 @@ static unsigned long read_swap_header(struct swap_info_struct *p,
1893 1916
1894 /* 1917 /*
1895 * Find out how many pages are allowed for a single swap 1918 * Find out how many pages are allowed for a single swap
1896 * device. There are three limiting factors: 1) the number 1919 * device. There are two limiting factors: 1) the number
1897 * of bits for the swap offset in the swp_entry_t type, and 1920 * of bits for the swap offset in the swp_entry_t type, and
1898 * 2) the number of bits in the swap pte as defined by the 1921 * 2) the number of bits in the swap pte as defined by the
1899 * the different architectures, and 3) the number of free bits 1922 * different architectures. In order to find the
1900 * in an exceptional radix_tree entry. In order to find the
1901 * largest possible bit mask, a swap entry with swap type 0 1923 * largest possible bit mask, a swap entry with swap type 0
1902 * and swap offset ~0UL is created, encoded to a swap pte, 1924 * and swap offset ~0UL is created, encoded to a swap pte,
1903 * decoded to a swp_entry_t again, and finally the swap 1925 * decoded to a swp_entry_t again, and finally the swap
1904 * offset is extracted. This will mask all the bits from 1926 * offset is extracted. This will mask all the bits from
1905 * the initial ~0UL mask that can't be encoded in either 1927 * the initial ~0UL mask that can't be encoded in either
1906 * the swp_entry_t or the architecture definition of a 1928 * the swp_entry_t or the architecture definition of a
1907 * swap pte. Then the same is done for a radix_tree entry. 1929 * swap pte.
1908 */ 1930 */
1909 maxpages = swp_offset(pte_to_swp_entry( 1931 maxpages = swp_offset(pte_to_swp_entry(
1910 swp_entry_to_pte(swp_entry(0, ~0UL)))); 1932 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
1911 maxpages = swp_offset(radix_to_swp_entry(
1912 swp_to_radix_entry(swp_entry(0, maxpages)))) + 1;
1913
1914 if (maxpages > swap_header->info.last_page) { 1933 if (maxpages > swap_header->info.last_page) {
1915 maxpages = swap_header->info.last_page + 1; 1934 maxpages = swap_header->info.last_page + 1;
1916 /* p->max is an unsigned int: don't overflow it */ 1935 /* p->max is an unsigned int: don't overflow it */
@@ -1988,6 +2007,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
1988 sector_t span; 2007 sector_t span;
1989 unsigned long maxpages; 2008 unsigned long maxpages;
1990 unsigned char *swap_map = NULL; 2009 unsigned char *swap_map = NULL;
2010 unsigned long *frontswap_map = NULL;
1991 struct page *page = NULL; 2011 struct page *page = NULL;
1992 struct inode *inode = NULL; 2012 struct inode *inode = NULL;
1993 2013
@@ -2071,6 +2091,9 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2071 error = nr_extents; 2091 error = nr_extents;
2072 goto bad_swap; 2092 goto bad_swap;
2073 } 2093 }
2094 /* frontswap enabled? set up bit-per-page map for frontswap */
2095 if (frontswap_enabled)
2096 frontswap_map = vzalloc(maxpages / sizeof(long));
2074 2097
2075 if (p->bdev) { 2098 if (p->bdev) {
2076 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 2099 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) {
@@ -2086,14 +2109,15 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
2086 if (swap_flags & SWAP_FLAG_PREFER) 2109 if (swap_flags & SWAP_FLAG_PREFER)
2087 prio = 2110 prio =
2088 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; 2111 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
2089 enable_swap_info(p, prio, swap_map); 2112 enable_swap_info(p, prio, swap_map, frontswap_map);
2090 2113
2091 printk(KERN_INFO "Adding %uk swap on %s. " 2114 printk(KERN_INFO "Adding %uk swap on %s. "
2092 "Priority:%d extents:%d across:%lluk %s%s\n", 2115 "Priority:%d extents:%d across:%lluk %s%s%s\n",
2093 p->pages<<(PAGE_SHIFT-10), name, p->prio, 2116 p->pages<<(PAGE_SHIFT-10), name, p->prio,
2094 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), 2117 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10),
2095 (p->flags & SWP_SOLIDSTATE) ? "SS" : "", 2118 (p->flags & SWP_SOLIDSTATE) ? "SS" : "",
2096 (p->flags & SWP_DISCARDABLE) ? "D" : ""); 2119 (p->flags & SWP_DISCARDABLE) ? "D" : "",
2120 (frontswap_map) ? "FS" : "");
2097 2121
2098 mutex_unlock(&swapon_mutex); 2122 mutex_unlock(&swapon_mutex);
2099 atomic_inc(&proc_poll_event); 2123 atomic_inc(&proc_poll_event);
diff --git a/mm/util.c b/mm/util.c
index ae962b31de88..8c7265afa29f 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -4,6 +4,7 @@
4#include <linux/export.h> 4#include <linux/export.h>
5#include <linux/err.h> 5#include <linux/err.h>
6#include <linux/sched.h> 6#include <linux/sched.h>
7#include <linux/security.h>
7#include <asm/uaccess.h> 8#include <asm/uaccess.h>
8 9
9#include "internal.h" 10#include "internal.h"
@@ -341,6 +342,35 @@ int __attribute__((weak)) get_user_pages_fast(unsigned long start,
341} 342}
342EXPORT_SYMBOL_GPL(get_user_pages_fast); 343EXPORT_SYMBOL_GPL(get_user_pages_fast);
343 344
345unsigned long vm_mmap_pgoff(struct file *file, unsigned long addr,
346 unsigned long len, unsigned long prot,
347 unsigned long flag, unsigned long pgoff)
348{
349 unsigned long ret;
350 struct mm_struct *mm = current->mm;
351
352 ret = security_mmap_file(file, prot, flag);
353 if (!ret) {
354 down_write(&mm->mmap_sem);
355 ret = do_mmap_pgoff(file, addr, len, prot, flag, pgoff);
356 up_write(&mm->mmap_sem);
357 }
358 return ret;
359}
360
361unsigned long vm_mmap(struct file *file, unsigned long addr,
362 unsigned long len, unsigned long prot,
363 unsigned long flag, unsigned long offset)
364{
365 if (unlikely(offset + PAGE_ALIGN(len) < offset))
366 return -EINVAL;
367 if (unlikely(offset & ~PAGE_MASK))
368 return -EINVAL;
369
370 return vm_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
371}
372EXPORT_SYMBOL(vm_mmap);
373
344/* Tracepoints definitions. */ 374/* Tracepoints definitions. */
345EXPORT_TRACEPOINT_SYMBOL(kmalloc); 375EXPORT_TRACEPOINT_SYMBOL(kmalloc);
346EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); 376EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
diff --git a/net/9p/trans_virtio.c b/net/9p/trans_virtio.c
index 5af18d11b518..2a167658bb95 100644
--- a/net/9p/trans_virtio.c
+++ b/net/9p/trans_virtio.c
@@ -192,10 +192,10 @@ static int pack_sg_list(struct scatterlist *sg, int start,
192 s = rest_of_page(data); 192 s = rest_of_page(data);
193 if (s > count) 193 if (s > count)
194 s = count; 194 s = count;
195 BUG_ON(index > limit);
195 sg_set_buf(&sg[index++], data, s); 196 sg_set_buf(&sg[index++], data, s);
196 count -= s; 197 count -= s;
197 data += s; 198 data += s;
198 BUG_ON(index > limit);
199 } 199 }
200 200
201 return index-start; 201 return index-start;
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index 0301b328cf0f..86852963b7f7 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1208,9 +1208,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr,
1208 if (addr->sat_addr.s_node == ATADDR_BCAST && 1208 if (addr->sat_addr.s_node == ATADDR_BCAST &&
1209 !sock_flag(sk, SOCK_BROADCAST)) { 1209 !sock_flag(sk, SOCK_BROADCAST)) {
1210#if 1 1210#if 1
1211 printk(KERN_WARNING "%s is broken and did not set " 1211 pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n",
1212 "SO_BROADCAST. It will break when 2.2 is "
1213 "released.\n",
1214 current->comm); 1212 current->comm);
1215#else 1213#else
1216 return -EACCES; 1214 return -EACCES;
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 46e7f86acfc9..3e18af4dadc4 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -210,7 +210,7 @@ struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock)
210 } 210 }
211 211
212 if (sk->sk_state == BT_CONNECTED || !newsock || 212 if (sk->sk_state == BT_CONNECTED || !newsock ||
213 test_bit(BT_DEFER_SETUP, &bt_sk(parent)->flags)) { 213 test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) {
214 bt_accept_unlink(sk); 214 bt_accept_unlink(sk);
215 if (newsock) 215 if (newsock)
216 sock_graft(sk, newsock); 216 sock_graft(sk, newsock);
diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
index aa5d73b786ac..d1820ff14aee 100644
--- a/net/bluetooth/rfcomm/tty.c
+++ b/net/bluetooth/rfcomm/tty.c
@@ -710,9 +710,9 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
710 break; 710 break;
711 } 711 }
712 712
713 tty_unlock(tty); 713 tty_unlock();
714 schedule(); 714 schedule();
715 tty_lock(tty); 715 tty_lock();
716 } 716 }
717 set_current_state(TASK_RUNNING); 717 set_current_state(TASK_RUNNING);
718 remove_wait_queue(&dev->wait, &wait); 718 remove_wait_queue(&dev->wait, &wait);
diff --git a/net/ceph/ceph_common.c b/net/ceph/ceph_common.c
index a776f751edbf..ba4323bce0e9 100644
--- a/net/ceph/ceph_common.c
+++ b/net/ceph/ceph_common.c
@@ -504,13 +504,6 @@ void ceph_destroy_client(struct ceph_client *client)
504 /* unmount */ 504 /* unmount */
505 ceph_osdc_stop(&client->osdc); 505 ceph_osdc_stop(&client->osdc);
506 506
507 /*
508 * make sure osd connections close out before destroying the
509 * auth module, which is needed to free those connections'
510 * ceph_authorizers.
511 */
512 ceph_msgr_flush();
513
514 ceph_monc_stop(&client->monc); 507 ceph_monc_stop(&client->monc);
515 508
516 ceph_debugfs_client_cleanup(client); 509 ceph_debugfs_client_cleanup(client);
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 524f4e4f598b..b332c3d76059 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -563,6 +563,10 @@ static void prepare_write_message(struct ceph_connection *con)
563 m->hdr.seq = cpu_to_le64(++con->out_seq); 563 m->hdr.seq = cpu_to_le64(++con->out_seq);
564 m->needs_out_seq = false; 564 m->needs_out_seq = false;
565 } 565 }
566#ifdef CONFIG_BLOCK
567 else
568 m->bio_iter = NULL;
569#endif
566 570
567 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n", 571 dout("prepare_write_message %p seq %lld type %d len %d+%d+%d %d pgs\n",
568 m, con->out_seq, le16_to_cpu(m->hdr.type), 572 m, con->out_seq, le16_to_cpu(m->hdr.type),
diff --git a/net/ceph/mon_client.c b/net/ceph/mon_client.c
index 10d6008d31f2..d0649a9655be 100644
--- a/net/ceph/mon_client.c
+++ b/net/ceph/mon_client.c
@@ -847,6 +847,14 @@ void ceph_monc_stop(struct ceph_mon_client *monc)
847 847
848 mutex_unlock(&monc->mutex); 848 mutex_unlock(&monc->mutex);
849 849
850 /*
851 * flush msgr queue before we destroy ourselves to ensure that:
852 * - any work that references our embedded con is finished.
853 * - any osd_client or other work that may reference an authorizer
854 * finishes before we shut down the auth subsystem.
855 */
856 ceph_msgr_flush();
857
850 ceph_auth_destroy(monc->auth); 858 ceph_auth_destroy(monc->auth);
851 859
852 ceph_msg_put(monc->m_auth); 860 ceph_msg_put(monc->m_auth);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 1ffebed5ce0f..ca59e66c9787 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -139,15 +139,15 @@ void ceph_osdc_release_request(struct kref *kref)
139 139
140 if (req->r_request) 140 if (req->r_request)
141 ceph_msg_put(req->r_request); 141 ceph_msg_put(req->r_request);
142 if (req->r_reply)
143 ceph_msg_put(req->r_reply);
144 if (req->r_con_filling_msg) { 142 if (req->r_con_filling_msg) {
145 dout("release_request revoking pages %p from con %p\n", 143 dout("release_request revoking pages %p from con %p\n",
146 req->r_pages, req->r_con_filling_msg); 144 req->r_pages, req->r_con_filling_msg);
147 ceph_con_revoke_message(req->r_con_filling_msg, 145 ceph_con_revoke_message(req->r_con_filling_msg,
148 req->r_reply); 146 req->r_reply);
149 ceph_con_put(req->r_con_filling_msg); 147 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
150 } 148 }
149 if (req->r_reply)
150 ceph_msg_put(req->r_reply);
151 if (req->r_own_pages) 151 if (req->r_own_pages)
152 ceph_release_page_vector(req->r_pages, 152 ceph_release_page_vector(req->r_pages,
153 req->r_num_pages); 153 req->r_num_pages);
@@ -1216,7 +1216,7 @@ static void handle_reply(struct ceph_osd_client *osdc, struct ceph_msg *msg,
1216 if (req->r_con_filling_msg == con && req->r_reply == msg) { 1216 if (req->r_con_filling_msg == con && req->r_reply == msg) {
1217 dout(" dropping con_filling_msg ref %p\n", con); 1217 dout(" dropping con_filling_msg ref %p\n", con);
1218 req->r_con_filling_msg = NULL; 1218 req->r_con_filling_msg = NULL;
1219 ceph_con_put(con); 1219 con->ops->put(con);
1220 } 1220 }
1221 1221
1222 if (!req->r_got_reply) { 1222 if (!req->r_got_reply) {
@@ -2028,7 +2028,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2028 dout("get_reply revoking msg %p from old con %p\n", 2028 dout("get_reply revoking msg %p from old con %p\n",
2029 req->r_reply, req->r_con_filling_msg); 2029 req->r_reply, req->r_con_filling_msg);
2030 ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply); 2030 ceph_con_revoke_message(req->r_con_filling_msg, req->r_reply);
2031 ceph_con_put(req->r_con_filling_msg); 2031 req->r_con_filling_msg->ops->put(req->r_con_filling_msg);
2032 req->r_con_filling_msg = NULL; 2032 req->r_con_filling_msg = NULL;
2033 } 2033 }
2034 2034
@@ -2063,7 +2063,7 @@ static struct ceph_msg *get_reply(struct ceph_connection *con,
2063#endif 2063#endif
2064 } 2064 }
2065 *skip = 0; 2065 *skip = 0;
2066 req->r_con_filling_msg = ceph_con_get(con); 2066 req->r_con_filling_msg = con->ops->get(con);
2067 dout("get_reply tid %lld %p\n", tid, m); 2067 dout("get_reply tid %lld %p\n", tid, m);
2068 2068
2069out: 2069out:
diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c
index 3252e7e0a005..d23b6682f4e9 100644
--- a/net/core/drop_monitor.c
+++ b/net/core/drop_monitor.c
@@ -36,9 +36,6 @@
36#define TRACE_ON 1 36#define TRACE_ON 1
37#define TRACE_OFF 0 37#define TRACE_OFF 0
38 38
39static void send_dm_alert(struct work_struct *unused);
40
41
42/* 39/*
43 * Globals, our netlink socket pointer 40 * Globals, our netlink socket pointer
44 * and the work handle that will send up 41 * and the work handle that will send up
@@ -48,11 +45,10 @@ static int trace_state = TRACE_OFF;
48static DEFINE_MUTEX(trace_state_mutex); 45static DEFINE_MUTEX(trace_state_mutex);
49 46
50struct per_cpu_dm_data { 47struct per_cpu_dm_data {
51 struct work_struct dm_alert_work; 48 spinlock_t lock;
52 struct sk_buff __rcu *skb; 49 struct sk_buff *skb;
53 atomic_t dm_hit_count; 50 struct work_struct dm_alert_work;
54 struct timer_list send_timer; 51 struct timer_list send_timer;
55 int cpu;
56}; 52};
57 53
58struct dm_hw_stat_delta { 54struct dm_hw_stat_delta {
@@ -78,13 +74,13 @@ static int dm_delay = 1;
78static unsigned long dm_hw_check_delta = 2*HZ; 74static unsigned long dm_hw_check_delta = 2*HZ;
79static LIST_HEAD(hw_stats_list); 75static LIST_HEAD(hw_stats_list);
80 76
81static void reset_per_cpu_data(struct per_cpu_dm_data *data) 77static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data)
82{ 78{
83 size_t al; 79 size_t al;
84 struct net_dm_alert_msg *msg; 80 struct net_dm_alert_msg *msg;
85 struct nlattr *nla; 81 struct nlattr *nla;
86 struct sk_buff *skb; 82 struct sk_buff *skb;
87 struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1); 83 unsigned long flags;
88 84
89 al = sizeof(struct net_dm_alert_msg); 85 al = sizeof(struct net_dm_alert_msg);
90 al += dm_hit_limit * sizeof(struct net_dm_drop_point); 86 al += dm_hit_limit * sizeof(struct net_dm_drop_point);
@@ -99,65 +95,40 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data)
99 sizeof(struct net_dm_alert_msg)); 95 sizeof(struct net_dm_alert_msg));
100 msg = nla_data(nla); 96 msg = nla_data(nla);
101 memset(msg, 0, al); 97 memset(msg, 0, al);
102 } else 98 } else {
103 schedule_work_on(data->cpu, &data->dm_alert_work); 99 mod_timer(&data->send_timer, jiffies + HZ / 10);
104
105 /*
106 * Don't need to lock this, since we are guaranteed to only
107 * run this on a single cpu at a time.
108 * Note also that we only update data->skb if the old and new skb
109 * pointers don't match. This ensures that we don't continually call
110 * synchornize_rcu if we repeatedly fail to alloc a new netlink message.
111 */
112 if (skb != oskb) {
113 rcu_assign_pointer(data->skb, skb);
114
115 synchronize_rcu();
116
117 atomic_set(&data->dm_hit_count, dm_hit_limit);
118 } 100 }
119 101
102 spin_lock_irqsave(&data->lock, flags);
103 swap(data->skb, skb);
104 spin_unlock_irqrestore(&data->lock, flags);
105
106 return skb;
120} 107}
121 108
122static void send_dm_alert(struct work_struct *unused) 109static void send_dm_alert(struct work_struct *work)
123{ 110{
124 struct sk_buff *skb; 111 struct sk_buff *skb;
125 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); 112 struct per_cpu_dm_data *data;
126 113
127 WARN_ON_ONCE(data->cpu != smp_processor_id()); 114 data = container_of(work, struct per_cpu_dm_data, dm_alert_work);
128 115
129 /* 116 skb = reset_per_cpu_data(data);
130 * Grab the skb we're about to send
131 */
132 skb = rcu_dereference_protected(data->skb, 1);
133
134 /*
135 * Replace it with a new one
136 */
137 reset_per_cpu_data(data);
138 117
139 /*
140 * Ship it!
141 */
142 if (skb) 118 if (skb)
143 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); 119 genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL);
144
145 put_cpu_var(dm_cpu_data);
146} 120}
147 121
148/* 122/*
149 * This is the timer function to delay the sending of an alert 123 * This is the timer function to delay the sending of an alert
150 * in the event that more drops will arrive during the 124 * in the event that more drops will arrive during the
151 * hysteresis period. Note that it operates under the timer interrupt 125 * hysteresis period.
152 * so we don't need to disable preemption here
153 */ 126 */
154static void sched_send_work(unsigned long unused) 127static void sched_send_work(unsigned long _data)
155{ 128{
156 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); 129 struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data;
157
158 schedule_work_on(smp_processor_id(), &data->dm_alert_work);
159 130
160 put_cpu_var(dm_cpu_data); 131 schedule_work(&data->dm_alert_work);
161} 132}
162 133
163static void trace_drop_common(struct sk_buff *skb, void *location) 134static void trace_drop_common(struct sk_buff *skb, void *location)
@@ -167,33 +138,28 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
167 struct nlattr *nla; 138 struct nlattr *nla;
168 int i; 139 int i;
169 struct sk_buff *dskb; 140 struct sk_buff *dskb;
170 struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); 141 struct per_cpu_dm_data *data;
171 142 unsigned long flags;
172 143
173 rcu_read_lock(); 144 local_irq_save(flags);
174 dskb = rcu_dereference(data->skb); 145 data = &__get_cpu_var(dm_cpu_data);
146 spin_lock(&data->lock);
147 dskb = data->skb;
175 148
176 if (!dskb) 149 if (!dskb)
177 goto out; 150 goto out;
178 151
179 if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) {
180 /*
181 * we're already at zero, discard this hit
182 */
183 goto out;
184 }
185
186 nlh = (struct nlmsghdr *)dskb->data; 152 nlh = (struct nlmsghdr *)dskb->data;
187 nla = genlmsg_data(nlmsg_data(nlh)); 153 nla = genlmsg_data(nlmsg_data(nlh));
188 msg = nla_data(nla); 154 msg = nla_data(nla);
189 for (i = 0; i < msg->entries; i++) { 155 for (i = 0; i < msg->entries; i++) {
190 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { 156 if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) {
191 msg->points[i].count++; 157 msg->points[i].count++;
192 atomic_inc(&data->dm_hit_count);
193 goto out; 158 goto out;
194 } 159 }
195 } 160 }
196 161 if (msg->entries == dm_hit_limit)
162 goto out;
197 /* 163 /*
198 * We need to create a new entry 164 * We need to create a new entry
199 */ 165 */
@@ -205,13 +171,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location)
205 171
206 if (!timer_pending(&data->send_timer)) { 172 if (!timer_pending(&data->send_timer)) {
207 data->send_timer.expires = jiffies + dm_delay * HZ; 173 data->send_timer.expires = jiffies + dm_delay * HZ;
208 add_timer_on(&data->send_timer, smp_processor_id()); 174 add_timer(&data->send_timer);
209 } 175 }
210 176
211out: 177out:
212 rcu_read_unlock(); 178 spin_unlock_irqrestore(&data->lock, flags);
213 put_cpu_var(dm_cpu_data);
214 return;
215} 179}
216 180
217static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) 181static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location)
@@ -418,11 +382,11 @@ static int __init init_net_drop_monitor(void)
418 382
419 for_each_possible_cpu(cpu) { 383 for_each_possible_cpu(cpu) {
420 data = &per_cpu(dm_cpu_data, cpu); 384 data = &per_cpu(dm_cpu_data, cpu);
421 data->cpu = cpu;
422 INIT_WORK(&data->dm_alert_work, send_dm_alert); 385 INIT_WORK(&data->dm_alert_work, send_dm_alert);
423 init_timer(&data->send_timer); 386 init_timer(&data->send_timer);
424 data->send_timer.data = cpu; 387 data->send_timer.data = (unsigned long)data;
425 data->send_timer.function = sched_send_work; 388 data->send_timer.function = sched_send_work;
389 spin_lock_init(&data->lock);
426 reset_per_cpu_data(data); 390 reset_per_cpu_data(data);
427 } 391 }
428 392
@@ -468,3 +432,4 @@ module_exit(exit_net_drop_monitor);
468 432
469MODULE_LICENSE("GPL v2"); 433MODULE_LICENSE("GPL v2");
470MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>"); 434MODULE_AUTHOR("Neil Horman <nhorman@tuxdriver.com>");
435MODULE_ALIAS_GENL_FAMILY("NET_DM");
diff --git a/net/core/filter.c b/net/core/filter.c
index a3eddb515d1b..d4ce2dc712e3 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -616,9 +616,9 @@ static int __sk_prepare_filter(struct sk_filter *fp)
616/** 616/**
617 * sk_unattached_filter_create - create an unattached filter 617 * sk_unattached_filter_create - create an unattached filter
618 * @fprog: the filter program 618 * @fprog: the filter program
619 * @sk: the socket to use 619 * @pfp: the unattached filter that is created
620 * 620 *
621 * Create a filter independent ofr any socket. We first run some 621 * Create a filter independent of any socket. We first run some
622 * sanity checks on it to make sure it does not explode on us later. 622 * sanity checks on it to make sure it does not explode on us later.
623 * If an error occurs or there is insufficient memory for the filter 623 * If an error occurs or there is insufficient memory for the filter
624 * a negative errno code is returned. On success the return is zero. 624 * a negative errno code is returned. On success the return is zero.
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index eb09f8bbbf07..d81d026138f0 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -2219,9 +2219,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2219 rcu_read_lock_bh(); 2219 rcu_read_lock_bh();
2220 nht = rcu_dereference_bh(tbl->nht); 2220 nht = rcu_dereference_bh(tbl->nht);
2221 2221
2222 for (h = 0; h < (1 << nht->hash_shift); h++) { 2222 for (h = s_h; h < (1 << nht->hash_shift); h++) {
2223 if (h < s_h)
2224 continue;
2225 if (h > s_h) 2223 if (h > s_h)
2226 s_idx = 0; 2224 s_idx = 0;
2227 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; 2225 for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0;
@@ -2260,9 +2258,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb,
2260 2258
2261 read_lock_bh(&tbl->lock); 2259 read_lock_bh(&tbl->lock);
2262 2260
2263 for (h = 0; h <= PNEIGH_HASHMASK; h++) { 2261 for (h = s_h; h <= PNEIGH_HASHMASK; h++) {
2264 if (h < s_h)
2265 continue;
2266 if (h > s_h) 2262 if (h > s_h)
2267 s_idx = 0; 2263 s_idx = 0;
2268 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { 2264 for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) {
@@ -2297,7 +2293,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2297 struct neigh_table *tbl; 2293 struct neigh_table *tbl;
2298 int t, family, s_t; 2294 int t, family, s_t;
2299 int proxy = 0; 2295 int proxy = 0;
2300 int err = 0; 2296 int err;
2301 2297
2302 read_lock(&neigh_tbl_lock); 2298 read_lock(&neigh_tbl_lock);
2303 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; 2299 family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family;
@@ -2311,7 +2307,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2311 2307
2312 s_t = cb->args[0]; 2308 s_t = cb->args[0];
2313 2309
2314 for (tbl = neigh_tables, t = 0; tbl && (err >= 0); 2310 for (tbl = neigh_tables, t = 0; tbl;
2315 tbl = tbl->next, t++) { 2311 tbl = tbl->next, t++) {
2316 if (t < s_t || (family && tbl->family != family)) 2312 if (t < s_t || (family && tbl->family != family))
2317 continue; 2313 continue;
@@ -2322,6 +2318,8 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb)
2322 err = pneigh_dump_table(tbl, skb, cb); 2318 err = pneigh_dump_table(tbl, skb, cb);
2323 else 2319 else
2324 err = neigh_dump_table(tbl, skb, cb); 2320 err = neigh_dump_table(tbl, skb, cb);
2321 if (err < 0)
2322 break;
2325 } 2323 }
2326 read_unlock(&neigh_tbl_lock); 2324 read_unlock(&neigh_tbl_lock);
2327 2325
diff --git a/net/core/netpoll.c b/net/core/netpoll.c
index 3d84fb9d8873..f9f40b932e4b 100644
--- a/net/core/netpoll.c
+++ b/net/core/netpoll.c
@@ -362,22 +362,23 @@ EXPORT_SYMBOL(netpoll_send_skb_on_dev);
362 362
363void netpoll_send_udp(struct netpoll *np, const char *msg, int len) 363void netpoll_send_udp(struct netpoll *np, const char *msg, int len)
364{ 364{
365 int total_len, eth_len, ip_len, udp_len; 365 int total_len, ip_len, udp_len;
366 struct sk_buff *skb; 366 struct sk_buff *skb;
367 struct udphdr *udph; 367 struct udphdr *udph;
368 struct iphdr *iph; 368 struct iphdr *iph;
369 struct ethhdr *eth; 369 struct ethhdr *eth;
370 370
371 udp_len = len + sizeof(*udph); 371 udp_len = len + sizeof(*udph);
372 ip_len = eth_len = udp_len + sizeof(*iph); 372 ip_len = udp_len + sizeof(*iph);
373 total_len = eth_len + ETH_HLEN + NET_IP_ALIGN; 373 total_len = ip_len + LL_RESERVED_SPACE(np->dev);
374 374
375 skb = find_skb(np, total_len, total_len - len); 375 skb = find_skb(np, total_len + np->dev->needed_tailroom,
376 total_len - len);
376 if (!skb) 377 if (!skb)
377 return; 378 return;
378 379
379 skb_copy_to_linear_data(skb, msg, len); 380 skb_copy_to_linear_data(skb, msg, len);
380 skb->len += len; 381 skb_put(skb, len);
381 382
382 skb_push(skb, sizeof(*udph)); 383 skb_push(skb, sizeof(*udph));
383 skb_reset_transport_header(skb); 384 skb_reset_transport_header(skb);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 016694d62484..d78671e9d545 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -3361,7 +3361,7 @@ EXPORT_SYMBOL(kfree_skb_partial);
3361 * @to: prior buffer 3361 * @to: prior buffer
3362 * @from: buffer to add 3362 * @from: buffer to add
3363 * @fragstolen: pointer to boolean 3363 * @fragstolen: pointer to boolean
3364 * 3364 * @delta_truesize: how much more was allocated than was requested
3365 */ 3365 */
3366bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, 3366bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
3367 bool *fragstolen, int *delta_truesize) 3367 bool *fragstolen, int *delta_truesize)
diff --git a/net/core/sock.c b/net/core/sock.c
index 653f8c0aedc5..9e5b71fda6ec 100644
--- a/net/core/sock.c
+++ b/net/core/sock.c
@@ -1592,6 +1592,11 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1592 gfp_t gfp_mask; 1592 gfp_t gfp_mask;
1593 long timeo; 1593 long timeo;
1594 int err; 1594 int err;
1595 int npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1596
1597 err = -EMSGSIZE;
1598 if (npages > MAX_SKB_FRAGS)
1599 goto failure;
1595 1600
1596 gfp_mask = sk->sk_allocation; 1601 gfp_mask = sk->sk_allocation;
1597 if (gfp_mask & __GFP_WAIT) 1602 if (gfp_mask & __GFP_WAIT)
@@ -1610,14 +1615,12 @@ struct sk_buff *sock_alloc_send_pskb(struct sock *sk, unsigned long header_len,
1610 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) { 1615 if (atomic_read(&sk->sk_wmem_alloc) < sk->sk_sndbuf) {
1611 skb = alloc_skb(header_len, gfp_mask); 1616 skb = alloc_skb(header_len, gfp_mask);
1612 if (skb) { 1617 if (skb) {
1613 int npages;
1614 int i; 1618 int i;
1615 1619
1616 /* No pages, we're done... */ 1620 /* No pages, we're done... */
1617 if (!data_len) 1621 if (!data_len)
1618 break; 1622 break;
1619 1623
1620 npages = (data_len + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
1621 skb->truesize += data_len; 1624 skb->truesize += data_len;
1622 skb_shinfo(skb)->nr_frags = npages; 1625 skb_shinfo(skb)->nr_frags = npages;
1623 for (i = 0; i < npages; i++) { 1626 for (i = 0; i < npages; i++) {
diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
index 89a47b35905d..cb982a61536f 100644
--- a/net/ipv4/esp4.c
+++ b/net/ipv4/esp4.c
@@ -459,28 +459,22 @@ static u32 esp4_get_mtu(struct xfrm_state *x, int mtu)
459 struct esp_data *esp = x->data; 459 struct esp_data *esp = x->data;
460 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); 460 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
461 u32 align = max_t(u32, blksize, esp->padlen); 461 u32 align = max_t(u32, blksize, esp->padlen);
462 u32 rem; 462 unsigned int net_adj;
463
464 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead);
465 rem = mtu & (align - 1);
466 mtu &= ~(align - 1);
467 463
468 switch (x->props.mode) { 464 switch (x->props.mode) {
469 case XFRM_MODE_TUNNEL:
470 break;
471 default:
472 case XFRM_MODE_TRANSPORT: 465 case XFRM_MODE_TRANSPORT:
473 /* The worst case */
474 mtu -= blksize - 4;
475 mtu += min_t(u32, blksize - 4, rem);
476 break;
477 case XFRM_MODE_BEET: 466 case XFRM_MODE_BEET:
478 /* The worst case. */ 467 net_adj = sizeof(struct iphdr);
479 mtu += min_t(u32, IPV4_BEET_PHMAXLEN, rem);
480 break; 468 break;
469 case XFRM_MODE_TUNNEL:
470 net_adj = 0;
471 break;
472 default:
473 BUG();
481 } 474 }
482 475
483 return mtu - 2; 476 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
477 net_adj) & ~(align - 1)) + (net_adj - 2);
484} 478}
485 479
486static void esp4_err(struct sk_buff *skb, u32 info) 480static void esp4_err(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 95e61596e605..f9ee7417f6a0 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -377,7 +377,8 @@ struct dst_entry *inet_csk_route_req(struct sock *sk,
377 377
378 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark, 378 flowi4_init_output(fl4, sk->sk_bound_dev_if, sk->sk_mark,
379 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, 379 RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
380 sk->sk_protocol, inet_sk_flowi_flags(sk), 380 sk->sk_protocol,
381 inet_sk_flowi_flags(sk) & ~FLOWI_FLAG_PRECOW_METRICS,
381 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr, 382 (opt && opt->opt.srr) ? opt->opt.faddr : ireq->rmt_addr,
382 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport); 383 ireq->loc_addr, ireq->rmt_port, inet_sk(sk)->inet_sport);
383 security_req_classify_flow(req, flowi4_to_flowi(fl4)); 384 security_req_classify_flow(req, flowi4_to_flowi(fl4));
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index d4d61b694fab..dfba343b2509 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -560,6 +560,17 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout)
560} 560}
561EXPORT_SYMBOL(inet_peer_xrlim_allow); 561EXPORT_SYMBOL(inet_peer_xrlim_allow);
562 562
563static void inetpeer_inval_rcu(struct rcu_head *head)
564{
565 struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu);
566
567 spin_lock_bh(&gc_lock);
568 list_add_tail(&p->gc_list, &gc_list);
569 spin_unlock_bh(&gc_lock);
570
571 schedule_delayed_work(&gc_work, gc_delay);
572}
573
563void inetpeer_invalidate_tree(int family) 574void inetpeer_invalidate_tree(int family)
564{ 575{
565 struct inet_peer *old, *new, *prev; 576 struct inet_peer *old, *new, *prev;
@@ -576,10 +587,7 @@ void inetpeer_invalidate_tree(int family)
576 prev = cmpxchg(&base->root, old, new); 587 prev = cmpxchg(&base->root, old, new);
577 if (prev == old) { 588 if (prev == old) {
578 base->total = 0; 589 base->total = 0;
579 spin_lock(&gc_lock); 590 call_rcu(&prev->gc_rcu, inetpeer_inval_rcu);
580 list_add_tail(&prev->gc_list, &gc_list);
581 spin_unlock(&gc_lock);
582 schedule_delayed_work(&gc_work, gc_delay);
583 } 591 }
584 592
585out: 593out:
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index e5c44fc586ab..ab09b126423c 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -44,6 +44,7 @@ static int ip_forward_finish(struct sk_buff *skb)
44 struct ip_options *opt = &(IPCB(skb)->opt); 44 struct ip_options *opt = &(IPCB(skb)->opt);
45 45
46 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 46 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
47 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
47 48
48 if (unlikely(opt->optlen)) 49 if (unlikely(opt->optlen))
49 ip_forward_options(skb); 50 ip_forward_options(skb);
diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c
index a9e519ad6db5..c94bbc6f2ba3 100644
--- a/net/ipv4/ipmr.c
+++ b/net/ipv4/ipmr.c
@@ -1574,6 +1574,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb)
1574 struct ip_options *opt = &(IPCB(skb)->opt); 1574 struct ip_options *opt = &(IPCB(skb)->opt);
1575 1575
1576 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); 1576 IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS);
1577 IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len);
1577 1578
1578 if (unlikely(opt->optlen)) 1579 if (unlikely(opt->optlen))
1579 ip_forward_options(skb); 1580 ip_forward_options(skb);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a43b87dfe800..c8d28c433b2b 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -824,7 +824,8 @@ static void tcp_v4_reqsk_send_ack(struct sock *sk, struct sk_buff *skb,
824 */ 824 */
825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst, 825static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
826 struct request_sock *req, 826 struct request_sock *req,
827 struct request_values *rvp) 827 struct request_values *rvp,
828 u16 queue_mapping)
828{ 829{
829 const struct inet_request_sock *ireq = inet_rsk(req); 830 const struct inet_request_sock *ireq = inet_rsk(req);
830 struct flowi4 fl4; 831 struct flowi4 fl4;
@@ -840,6 +841,7 @@ static int tcp_v4_send_synack(struct sock *sk, struct dst_entry *dst,
840 if (skb) { 841 if (skb) {
841 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr); 842 __tcp_v4_send_check(skb, ireq->loc_addr, ireq->rmt_addr);
842 843
844 skb_set_queue_mapping(skb, queue_mapping);
843 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr, 845 err = ip_build_and_send_pkt(skb, sk, ireq->loc_addr,
844 ireq->rmt_addr, 846 ireq->rmt_addr,
845 ireq->opt); 847 ireq->opt);
@@ -854,7 +856,7 @@ static int tcp_v4_rtx_synack(struct sock *sk, struct request_sock *req,
854 struct request_values *rvp) 856 struct request_values *rvp)
855{ 857{
856 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 858 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
857 return tcp_v4_send_synack(sk, NULL, req, rvp); 859 return tcp_v4_send_synack(sk, NULL, req, rvp, 0);
858} 860}
859 861
860/* 862/*
@@ -1422,7 +1424,8 @@ int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1422 tcp_rsk(req)->snt_synack = tcp_time_stamp; 1424 tcp_rsk(req)->snt_synack = tcp_time_stamp;
1423 1425
1424 if (tcp_v4_send_synack(sk, dst, req, 1426 if (tcp_v4_send_synack(sk, dst, req,
1425 (struct request_values *)&tmp_ext) || 1427 (struct request_values *)&tmp_ext,
1428 skb_get_queue_mapping(skb)) ||
1426 want_cookie) 1429 want_cookie)
1427 goto drop_and_free; 1430 goto drop_and_free;
1428 1431
diff --git a/net/ipv6/esp6.c b/net/ipv6/esp6.c
index 1e62b7557b00..db1521fcda5b 100644
--- a/net/ipv6/esp6.c
+++ b/net/ipv6/esp6.c
@@ -413,19 +413,15 @@ static u32 esp6_get_mtu(struct xfrm_state *x, int mtu)
413 struct esp_data *esp = x->data; 413 struct esp_data *esp = x->data;
414 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4); 414 u32 blksize = ALIGN(crypto_aead_blocksize(esp->aead), 4);
415 u32 align = max_t(u32, blksize, esp->padlen); 415 u32 align = max_t(u32, blksize, esp->padlen);
416 u32 rem; 416 unsigned int net_adj;
417 417
418 mtu -= x->props.header_len + crypto_aead_authsize(esp->aead); 418 if (x->props.mode != XFRM_MODE_TUNNEL)
419 rem = mtu & (align - 1); 419 net_adj = sizeof(struct ipv6hdr);
420 mtu &= ~(align - 1); 420 else
421 421 net_adj = 0;
422 if (x->props.mode != XFRM_MODE_TUNNEL) {
423 u32 padsize = ((blksize - 1) & 7) + 1;
424 mtu -= blksize - padsize;
425 mtu += min_t(u32, blksize - padsize, rem);
426 }
427 422
428 return mtu - 2; 423 return ((mtu - x->props.header_len - crypto_aead_authsize(esp->aead) -
424 net_adj) & ~(align - 1)) + (net_adj - 2);
429} 425}
430 426
431static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt, 427static void esp6_err(struct sk_buff *skb, struct inet6_skb_parm *opt,
diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
index 0c220a416626..74c21b924a79 100644
--- a/net/ipv6/ip6_fib.c
+++ b/net/ipv6/ip6_fib.c
@@ -1561,7 +1561,7 @@ static int fib6_age(struct rt6_info *rt, void *arg)
1561 neigh_flags = neigh->flags; 1561 neigh_flags = neigh->flags;
1562 neigh_release(neigh); 1562 neigh_release(neigh);
1563 } 1563 }
1564 if (neigh_flags & NTF_ROUTER) { 1564 if (!(neigh_flags & NTF_ROUTER)) {
1565 RT6_TRACE("purging route %p via non-router but gateway\n", 1565 RT6_TRACE("purging route %p via non-router but gateway\n",
1566 rt); 1566 rt);
1567 return -1; 1567 return -1;
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index d99fdc699625..decc21d19c53 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -526,6 +526,7 @@ int ip6_forward(struct sk_buff *skb)
526 hdr->hop_limit--; 526 hdr->hop_limit--;
527 527
528 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); 528 IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS);
529 IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len);
529 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, 530 return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev,
530 ip6_forward_finish); 531 ip6_forward_finish);
531 532
@@ -1187,6 +1188,29 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
1187 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL; 1188 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
1188} 1189}
1189 1190
1191static void ip6_append_data_mtu(int *mtu,
1192 int *maxfraglen,
1193 unsigned int fragheaderlen,
1194 struct sk_buff *skb,
1195 struct rt6_info *rt)
1196{
1197 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
1198 if (skb == NULL) {
1199 /* first fragment, reserve header_len */
1200 *mtu = *mtu - rt->dst.header_len;
1201
1202 } else {
1203 /*
1204 * this fragment is not first, the headers
1205 * space is regarded as data space.
1206 */
1207 *mtu = dst_mtu(rt->dst.path);
1208 }
1209 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
1210 + fragheaderlen - sizeof(struct frag_hdr);
1211 }
1212}
1213
1190int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, 1214int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1191 int offset, int len, int odd, struct sk_buff *skb), 1215 int offset, int len, int odd, struct sk_buff *skb),
1192 void *from, int length, int transhdrlen, 1216 void *from, int length, int transhdrlen,
@@ -1196,7 +1220,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1196 struct inet_sock *inet = inet_sk(sk); 1220 struct inet_sock *inet = inet_sk(sk);
1197 struct ipv6_pinfo *np = inet6_sk(sk); 1221 struct ipv6_pinfo *np = inet6_sk(sk);
1198 struct inet_cork *cork; 1222 struct inet_cork *cork;
1199 struct sk_buff *skb; 1223 struct sk_buff *skb, *skb_prev = NULL;
1200 unsigned int maxfraglen, fragheaderlen; 1224 unsigned int maxfraglen, fragheaderlen;
1201 int exthdrlen; 1225 int exthdrlen;
1202 int dst_exthdrlen; 1226 int dst_exthdrlen;
@@ -1253,8 +1277,12 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1253 inet->cork.fl.u.ip6 = *fl6; 1277 inet->cork.fl.u.ip6 = *fl6;
1254 np->cork.hop_limit = hlimit; 1278 np->cork.hop_limit = hlimit;
1255 np->cork.tclass = tclass; 1279 np->cork.tclass = tclass;
1256 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ? 1280 if (rt->dst.flags & DST_XFRM_TUNNEL)
1257 rt->dst.dev->mtu : dst_mtu(&rt->dst); 1281 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1282 rt->dst.dev->mtu : dst_mtu(&rt->dst);
1283 else
1284 mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
1285 rt->dst.dev->mtu : dst_mtu(rt->dst.path);
1258 if (np->frag_size < mtu) { 1286 if (np->frag_size < mtu) {
1259 if (np->frag_size) 1287 if (np->frag_size)
1260 mtu = np->frag_size; 1288 mtu = np->frag_size;
@@ -1350,25 +1378,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
1350 unsigned int fraglen; 1378 unsigned int fraglen;
1351 unsigned int fraggap; 1379 unsigned int fraggap;
1352 unsigned int alloclen; 1380 unsigned int alloclen;
1353 struct sk_buff *skb_prev;
1354alloc_new_skb: 1381alloc_new_skb:
1355 skb_prev = skb;
1356
1357 /* There's no room in the current skb */ 1382 /* There's no room in the current skb */
1358 if (skb_prev) 1383 if (skb)
1359 fraggap = skb_prev->len - maxfraglen; 1384 fraggap = skb->len - maxfraglen;
1360 else 1385 else
1361 fraggap = 0; 1386 fraggap = 0;
1387 /* update mtu and maxfraglen if necessary */
1388 if (skb == NULL || skb_prev == NULL)
1389 ip6_append_data_mtu(&mtu, &maxfraglen,
1390 fragheaderlen, skb, rt);
1391
1392 skb_prev = skb;
1362 1393
1363 /* 1394 /*
1364 * If remaining data exceeds the mtu, 1395 * If remaining data exceeds the mtu,
1365 * we know we need more fragment(s). 1396 * we know we need more fragment(s).
1366 */ 1397 */
1367 datalen = length + fraggap; 1398 datalen = length + fraggap;
1368 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1369 datalen = maxfraglen - fragheaderlen;
1370 1399
1371 fraglen = datalen + fragheaderlen; 1400 if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
1401 datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
1372 if ((flags & MSG_MORE) && 1402 if ((flags & MSG_MORE) &&
1373 !(rt->dst.dev->features&NETIF_F_SG)) 1403 !(rt->dst.dev->features&NETIF_F_SG))
1374 alloclen = mtu; 1404 alloclen = mtu;
@@ -1377,13 +1407,16 @@ alloc_new_skb:
1377 1407
1378 alloclen += dst_exthdrlen; 1408 alloclen += dst_exthdrlen;
1379 1409
1380 /* 1410 if (datalen != length + fraggap) {
1381 * The last fragment gets additional space at tail. 1411 /*
1382 * Note: we overallocate on fragments with MSG_MODE 1412 * this is not the last fragment, the trailer
1383 * because we have no idea if we're the last one. 1413 * space is regarded as data space.
1384 */ 1414 */
1385 if (datalen == length + fraggap) 1415 datalen += rt->dst.trailer_len;
1386 alloclen += rt->dst.trailer_len; 1416 }
1417
1418 alloclen += rt->dst.trailer_len;
1419 fraglen = datalen + fragheaderlen;
1387 1420
1388 /* 1421 /*
1389 * We just reserve space for fragment header. 1422 * We just reserve space for fragment header.
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index b15dc08643a4..461e47c8e956 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1886,6 +1886,8 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb)
1886{ 1886{
1887 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), 1887 IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1888 IPSTATS_MIB_OUTFORWDATAGRAMS); 1888 IPSTATS_MIB_OUTFORWDATAGRAMS);
1889 IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)),
1890 IPSTATS_MIB_OUTOCTETS, skb->len);
1889 return dst_output(skb); 1891 return dst_output(skb);
1890} 1892}
1891 1893
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 554d5999abc4..3a9aec29581a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -476,7 +476,8 @@ out:
476 476
477 477
478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req, 478static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
479 struct request_values *rvp) 479 struct request_values *rvp,
480 u16 queue_mapping)
480{ 481{
481 struct inet6_request_sock *treq = inet6_rsk(req); 482 struct inet6_request_sock *treq = inet6_rsk(req);
482 struct ipv6_pinfo *np = inet6_sk(sk); 483 struct ipv6_pinfo *np = inet6_sk(sk);
@@ -513,6 +514,7 @@ static int tcp_v6_send_synack(struct sock *sk, struct request_sock *req,
513 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr); 514 __tcp_v6_send_check(skb, &treq->loc_addr, &treq->rmt_addr);
514 515
515 fl6.daddr = treq->rmt_addr; 516 fl6.daddr = treq->rmt_addr;
517 skb_set_queue_mapping(skb, queue_mapping);
516 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass); 518 err = ip6_xmit(sk, skb, &fl6, opt, np->tclass);
517 err = net_xmit_eval(err); 519 err = net_xmit_eval(err);
518 } 520 }
@@ -528,7 +530,7 @@ static int tcp_v6_rtx_synack(struct sock *sk, struct request_sock *req,
528 struct request_values *rvp) 530 struct request_values *rvp)
529{ 531{
530 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS); 532 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_RETRANSSEGS);
531 return tcp_v6_send_synack(sk, req, rvp); 533 return tcp_v6_send_synack(sk, req, rvp, 0);
532} 534}
533 535
534static void tcp_v6_reqsk_destructor(struct request_sock *req) 536static void tcp_v6_reqsk_destructor(struct request_sock *req)
@@ -1213,7 +1215,8 @@ have_isn:
1213 security_inet_conn_request(sk, skb, req); 1215 security_inet_conn_request(sk, skb, req);
1214 1216
1215 if (tcp_v6_send_synack(sk, req, 1217 if (tcp_v6_send_synack(sk, req,
1216 (struct request_values *)&tmp_ext) || 1218 (struct request_values *)&tmp_ext,
1219 skb_get_queue_mapping(skb)) ||
1217 want_cookie) 1220 want_cookie)
1218 goto drop_and_free; 1221 goto drop_and_free;
1219 1222
diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c
index 443591d629ca..185f12f4a5fa 100644
--- a/net/l2tp/l2tp_eth.c
+++ b/net/l2tp/l2tp_eth.c
@@ -162,6 +162,7 @@ static void l2tp_eth_delete(struct l2tp_session *session)
162 if (dev) { 162 if (dev) {
163 unregister_netdev(dev); 163 unregister_netdev(dev);
164 spriv->dev = NULL; 164 spriv->dev = NULL;
165 module_put(THIS_MODULE);
165 } 166 }
166 } 167 }
167} 168}
@@ -249,6 +250,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p
249 if (rc < 0) 250 if (rc < 0)
250 goto out_del_dev; 251 goto out_del_dev;
251 252
253 __module_get(THIS_MODULE);
252 /* Must be done after register_netdev() */ 254 /* Must be done after register_netdev() */
253 strlcpy(session->ifname, dev->name, IFNAMSIZ); 255 strlcpy(session->ifname, dev->name, IFNAMSIZ);
254 256
diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c
index 889f5d13d7ba..61d8b75d2686 100644
--- a/net/l2tp/l2tp_ip.c
+++ b/net/l2tp/l2tp_ip.c
@@ -239,9 +239,16 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
239{ 239{
240 struct inet_sock *inet = inet_sk(sk); 240 struct inet_sock *inet = inet_sk(sk);
241 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr; 241 struct sockaddr_l2tpip *addr = (struct sockaddr_l2tpip *) uaddr;
242 int ret = -EINVAL; 242 int ret;
243 int chk_addr_ret; 243 int chk_addr_ret;
244 244
245 if (!sock_flag(sk, SOCK_ZAPPED))
246 return -EINVAL;
247 if (addr_len < sizeof(struct sockaddr_l2tpip))
248 return -EINVAL;
249 if (addr->l2tp_family != AF_INET)
250 return -EINVAL;
251
245 ret = -EADDRINUSE; 252 ret = -EADDRINUSE;
246 read_lock_bh(&l2tp_ip_lock); 253 read_lock_bh(&l2tp_ip_lock);
247 if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id)) 254 if (__l2tp_ip_bind_lookup(&init_net, addr->l2tp_addr.s_addr, sk->sk_bound_dev_if, addr->l2tp_conn_id))
@@ -272,6 +279,8 @@ static int l2tp_ip_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
272 sk_del_node_init(sk); 279 sk_del_node_init(sk);
273 write_unlock_bh(&l2tp_ip_lock); 280 write_unlock_bh(&l2tp_ip_lock);
274 ret = 0; 281 ret = 0;
282 sock_reset_flag(sk, SOCK_ZAPPED);
283
275out: 284out:
276 release_sock(sk); 285 release_sock(sk);
277 286
@@ -288,6 +297,9 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
288 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr; 297 struct sockaddr_l2tpip *lsa = (struct sockaddr_l2tpip *) uaddr;
289 int rc; 298 int rc;
290 299
300 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
301 return -EINVAL;
302
291 if (addr_len < sizeof(*lsa)) 303 if (addr_len < sizeof(*lsa))
292 return -EINVAL; 304 return -EINVAL;
293 305
@@ -311,6 +323,14 @@ static int l2tp_ip_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len
311 return rc; 323 return rc;
312} 324}
313 325
326static int l2tp_ip_disconnect(struct sock *sk, int flags)
327{
328 if (sock_flag(sk, SOCK_ZAPPED))
329 return 0;
330
331 return udp_disconnect(sk, flags);
332}
333
314static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr, 334static int l2tp_ip_getname(struct socket *sock, struct sockaddr *uaddr,
315 int *uaddr_len, int peer) 335 int *uaddr_len, int peer)
316{ 336{
@@ -444,10 +464,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
444 sk->sk_bound_dev_if); 464 sk->sk_bound_dev_if);
445 if (IS_ERR(rt)) 465 if (IS_ERR(rt))
446 goto no_route; 466 goto no_route;
447 if (connected) 467 if (connected) {
448 sk_setup_caps(sk, &rt->dst); 468 sk_setup_caps(sk, &rt->dst);
449 else 469 } else {
450 dst_release(&rt->dst); /* safe since we hold rcu_read_lock */ 470 skb_dst_set(skb, &rt->dst);
471 goto xmit;
472 }
451 } 473 }
452 474
453 /* We dont need to clone dst here, it is guaranteed to not disappear. 475 /* We dont need to clone dst here, it is guaranteed to not disappear.
@@ -455,6 +477,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m
455 */ 477 */
456 skb_dst_set_noref(skb, &rt->dst); 478 skb_dst_set_noref(skb, &rt->dst);
457 479
480xmit:
458 /* Queue the packet to IP for output */ 481 /* Queue the packet to IP for output */
459 rc = ip_queue_xmit(skb, &inet->cork.fl); 482 rc = ip_queue_xmit(skb, &inet->cork.fl);
460 rcu_read_unlock(); 483 rcu_read_unlock();
@@ -530,7 +553,7 @@ static struct proto l2tp_ip_prot = {
530 .close = l2tp_ip_close, 553 .close = l2tp_ip_close,
531 .bind = l2tp_ip_bind, 554 .bind = l2tp_ip_bind,
532 .connect = l2tp_ip_connect, 555 .connect = l2tp_ip_connect,
533 .disconnect = udp_disconnect, 556 .disconnect = l2tp_ip_disconnect,
534 .ioctl = udp_ioctl, 557 .ioctl = udp_ioctl,
535 .destroy = l2tp_ip_destroy_sock, 558 .destroy = l2tp_ip_destroy_sock,
536 .setsockopt = ip_setsockopt, 559 .setsockopt = ip_setsockopt,
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 0291d8d85f30..35e1e4bde587 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -258,6 +258,10 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
258 int addr_type; 258 int addr_type;
259 int err; 259 int err;
260 260
261 if (!sock_flag(sk, SOCK_ZAPPED))
262 return -EINVAL;
263 if (addr->l2tp_family != AF_INET6)
264 return -EINVAL;
261 if (addr_len < sizeof(*addr)) 265 if (addr_len < sizeof(*addr))
262 return -EINVAL; 266 return -EINVAL;
263 267
@@ -331,6 +335,7 @@ static int l2tp_ip6_bind(struct sock *sk, struct sockaddr *uaddr, int addr_len)
331 sk_del_node_init(sk); 335 sk_del_node_init(sk);
332 write_unlock_bh(&l2tp_ip6_lock); 336 write_unlock_bh(&l2tp_ip6_lock);
333 337
338 sock_reset_flag(sk, SOCK_ZAPPED);
334 release_sock(sk); 339 release_sock(sk);
335 return 0; 340 return 0;
336 341
@@ -354,6 +359,9 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
354 int addr_type; 359 int addr_type;
355 int rc; 360 int rc;
356 361
362 if (sock_flag(sk, SOCK_ZAPPED)) /* Must bind first - autobinding does not work */
363 return -EINVAL;
364
357 if (addr_len < sizeof(*lsa)) 365 if (addr_len < sizeof(*lsa))
358 return -EINVAL; 366 return -EINVAL;
359 367
@@ -383,6 +391,14 @@ static int l2tp_ip6_connect(struct sock *sk, struct sockaddr *uaddr,
383 return rc; 391 return rc;
384} 392}
385 393
394static int l2tp_ip6_disconnect(struct sock *sk, int flags)
395{
396 if (sock_flag(sk, SOCK_ZAPPED))
397 return 0;
398
399 return udp_disconnect(sk, flags);
400}
401
386static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr, 402static int l2tp_ip6_getname(struct socket *sock, struct sockaddr *uaddr,
387 int *uaddr_len, int peer) 403 int *uaddr_len, int peer)
388{ 404{
@@ -689,7 +705,7 @@ static struct proto l2tp_ip6_prot = {
689 .close = l2tp_ip6_close, 705 .close = l2tp_ip6_close,
690 .bind = l2tp_ip6_bind, 706 .bind = l2tp_ip6_bind,
691 .connect = l2tp_ip6_connect, 707 .connect = l2tp_ip6_connect,
692 .disconnect = udp_disconnect, 708 .disconnect = l2tp_ip6_disconnect,
693 .ioctl = udp_ioctl, 709 .ioctl = udp_ioctl,
694 .destroy = l2tp_ip6_destroy_sock, 710 .destroy = l2tp_ip6_destroy_sock,
695 .setsockopt = ipv6_setsockopt, 711 .setsockopt = ipv6_setsockopt,
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index 8577264378fe..ddc553e76671 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -923,5 +923,4 @@ MODULE_AUTHOR("James Chapman <jchapman@katalix.com>");
923MODULE_DESCRIPTION("L2TP netlink"); 923MODULE_DESCRIPTION("L2TP netlink");
924MODULE_LICENSE("GPL"); 924MODULE_LICENSE("GPL");
925MODULE_VERSION("1.0"); 925MODULE_VERSION("1.0");
926MODULE_ALIAS("net-pf-" __stringify(PF_NETLINK) "-proto-" \ 926MODULE_ALIAS_GENL_FAMILY("l2tp");
927 __stringify(NETLINK_GENERIC) "-type-" "l2tp");
diff --git a/net/mac80211/agg-rx.c b/net/mac80211/agg-rx.c
index 26ddb699d693..c649188314cc 100644
--- a/net/mac80211/agg-rx.c
+++ b/net/mac80211/agg-rx.c
@@ -145,15 +145,20 @@ static void sta_rx_agg_session_timer_expired(unsigned long data)
145 struct tid_ampdu_rx *tid_rx; 145 struct tid_ampdu_rx *tid_rx;
146 unsigned long timeout; 146 unsigned long timeout;
147 147
148 rcu_read_lock();
148 tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]); 149 tid_rx = rcu_dereference(sta->ampdu_mlme.tid_rx[*ptid]);
149 if (!tid_rx) 150 if (!tid_rx) {
151 rcu_read_unlock();
150 return; 152 return;
153 }
151 154
152 timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout); 155 timeout = tid_rx->last_rx + TU_TO_JIFFIES(tid_rx->timeout);
153 if (time_is_after_jiffies(timeout)) { 156 if (time_is_after_jiffies(timeout)) {
154 mod_timer(&tid_rx->session_timer, timeout); 157 mod_timer(&tid_rx->session_timer, timeout);
158 rcu_read_unlock();
155 return; 159 return;
156 } 160 }
161 rcu_read_unlock();
157 162
158#ifdef CONFIG_MAC80211_HT_DEBUG 163#ifdef CONFIG_MAC80211_HT_DEBUG
159 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid); 164 printk(KERN_DEBUG "rx session timer expired on tid %d\n", (u16)*ptid);
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index 495831ee48f1..e9cecca5c44d 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -533,16 +533,16 @@ static void ieee80211_get_et_stats(struct wiphy *wiphy,
533 sinfo.filled = 0; 533 sinfo.filled = 0;
534 sta_set_sinfo(sta, &sinfo); 534 sta_set_sinfo(sta, &sinfo);
535 535
536 if (sinfo.filled | STATION_INFO_TX_BITRATE) 536 if (sinfo.filled & STATION_INFO_TX_BITRATE)
537 data[i] = 100000 * 537 data[i] = 100000 *
538 cfg80211_calculate_bitrate(&sinfo.txrate); 538 cfg80211_calculate_bitrate(&sinfo.txrate);
539 i++; 539 i++;
540 if (sinfo.filled | STATION_INFO_RX_BITRATE) 540 if (sinfo.filled & STATION_INFO_RX_BITRATE)
541 data[i] = 100000 * 541 data[i] = 100000 *
542 cfg80211_calculate_bitrate(&sinfo.rxrate); 542 cfg80211_calculate_bitrate(&sinfo.rxrate);
543 i++; 543 i++;
544 544
545 if (sinfo.filled | STATION_INFO_SIGNAL_AVG) 545 if (sinfo.filled & STATION_INFO_SIGNAL_AVG)
546 data[i] = (u8)sinfo.signal_avg; 546 data[i] = (u8)sinfo.signal_avg;
547 i++; 547 i++;
548 } else { 548 } else {
diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
index d4c19a7773db..8664111d0566 100644
--- a/net/mac80211/iface.c
+++ b/net/mac80211/iface.c
@@ -637,6 +637,18 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
637 ieee80211_configure_filter(local); 637 ieee80211_configure_filter(local);
638 break; 638 break;
639 default: 639 default:
640 mutex_lock(&local->mtx);
641 if (local->hw_roc_dev == sdata->dev &&
642 local->hw_roc_channel) {
643 /* ignore return value since this is racy */
644 drv_cancel_remain_on_channel(local);
645 ieee80211_queue_work(&local->hw, &local->hw_roc_done);
646 }
647 mutex_unlock(&local->mtx);
648
649 flush_work(&local->hw_roc_start);
650 flush_work(&local->hw_roc_done);
651
640 flush_work(&sdata->work); 652 flush_work(&sdata->work);
641 /* 653 /*
642 * When we get here, the interface is marked down. 654 * When we get here, the interface is marked down.
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index b3b3c264ff66..91d84cc77bbf 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -1220,6 +1220,22 @@ static void ieee80211_sta_wmm_params(struct ieee80211_local *local,
1220 sdata->vif.bss_conf.qos = true; 1220 sdata->vif.bss_conf.qos = true;
1221} 1221}
1222 1222
1223static void __ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
1224{
1225 lockdep_assert_held(&sdata->local->mtx);
1226
1227 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL |
1228 IEEE80211_STA_BEACON_POLL);
1229 ieee80211_run_deferred_scan(sdata->local);
1230}
1231
1232static void ieee80211_stop_poll(struct ieee80211_sub_if_data *sdata)
1233{
1234 mutex_lock(&sdata->local->mtx);
1235 __ieee80211_stop_poll(sdata);
1236 mutex_unlock(&sdata->local->mtx);
1237}
1238
1223static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata, 1239static u32 ieee80211_handle_bss_capability(struct ieee80211_sub_if_data *sdata,
1224 u16 capab, bool erp_valid, u8 erp) 1240 u16 capab, bool erp_valid, u8 erp)
1225{ 1241{
@@ -1285,8 +1301,7 @@ static void ieee80211_set_associated(struct ieee80211_sub_if_data *sdata,
1285 sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE; 1301 sdata->u.mgd.flags |= IEEE80211_STA_RESET_SIGNAL_AVE;
1286 1302
1287 /* just to be sure */ 1303 /* just to be sure */
1288 sdata->u.mgd.flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1304 ieee80211_stop_poll(sdata);
1289 IEEE80211_STA_BEACON_POLL);
1290 1305
1291 ieee80211_led_assoc(local, 1); 1306 ieee80211_led_assoc(local, 1);
1292 1307
@@ -1456,8 +1471,7 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
1456 return; 1471 return;
1457 } 1472 }
1458 1473
1459 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 1474 __ieee80211_stop_poll(sdata);
1460 IEEE80211_STA_BEACON_POLL);
1461 1475
1462 mutex_lock(&local->iflist_mtx); 1476 mutex_lock(&local->iflist_mtx);
1463 ieee80211_recalc_ps(local, -1); 1477 ieee80211_recalc_ps(local, -1);
@@ -1477,7 +1491,6 @@ static void ieee80211_reset_ap_probe(struct ieee80211_sub_if_data *sdata)
1477 round_jiffies_up(jiffies + 1491 round_jiffies_up(jiffies +
1478 IEEE80211_CONNECTION_IDLE_TIME)); 1492 IEEE80211_CONNECTION_IDLE_TIME));
1479out: 1493out:
1480 ieee80211_run_deferred_scan(local);
1481 mutex_unlock(&local->mtx); 1494 mutex_unlock(&local->mtx);
1482} 1495}
1483 1496
@@ -1522,6 +1535,8 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1522 * anymore. The timeout will be reset if the frame is ACKed by 1535 * anymore. The timeout will be reset if the frame is ACKed by
1523 * the AP. 1536 * the AP.
1524 */ 1537 */
1538 ifmgd->probe_send_count++;
1539
1525 if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) { 1540 if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) {
1526 ifmgd->nullfunc_failed = false; 1541 ifmgd->nullfunc_failed = false;
1527 ieee80211_send_nullfunc(sdata->local, sdata, 0); 1542 ieee80211_send_nullfunc(sdata->local, sdata, 0);
@@ -1538,7 +1553,6 @@ static void ieee80211_mgd_probe_ap_send(struct ieee80211_sub_if_data *sdata)
1538 0, (u32) -1, true, false); 1553 0, (u32) -1, true, false);
1539 } 1554 }
1540 1555
1541 ifmgd->probe_send_count++;
1542 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms); 1556 ifmgd->probe_timeout = jiffies + msecs_to_jiffies(probe_wait_ms);
1543 run_again(ifmgd, ifmgd->probe_timeout); 1557 run_again(ifmgd, ifmgd->probe_timeout);
1544 if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS) 1558 if (sdata->local->hw.flags & IEEE80211_HW_REPORTS_TX_ACK_STATUS)
@@ -2407,7 +2421,11 @@ static void ieee80211_rx_mgmt_beacon(struct ieee80211_sub_if_data *sdata,
2407 net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n", 2421 net_dbg_ratelimited("%s: cancelling probereq poll due to a received beacon\n",
2408 sdata->name); 2422 sdata->name);
2409#endif 2423#endif
2424 mutex_lock(&local->mtx);
2410 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL; 2425 ifmgd->flags &= ~IEEE80211_STA_BEACON_POLL;
2426 ieee80211_run_deferred_scan(local);
2427 mutex_unlock(&local->mtx);
2428
2411 mutex_lock(&local->iflist_mtx); 2429 mutex_lock(&local->iflist_mtx);
2412 ieee80211_recalc_ps(local, -1); 2430 ieee80211_recalc_ps(local, -1);
2413 mutex_unlock(&local->iflist_mtx); 2431 mutex_unlock(&local->iflist_mtx);
@@ -2594,8 +2612,7 @@ static void ieee80211_sta_connection_lost(struct ieee80211_sub_if_data *sdata,
2594 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd; 2612 struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
2595 u8 frame_buf[DEAUTH_DISASSOC_LEN]; 2613 u8 frame_buf[DEAUTH_DISASSOC_LEN];
2596 2614
2597 ifmgd->flags &= ~(IEEE80211_STA_CONNECTION_POLL | 2615 ieee80211_stop_poll(sdata);
2598 IEEE80211_STA_BEACON_POLL);
2599 2616
2600 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason, 2617 ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH, reason,
2601 false, frame_buf); 2618 false, frame_buf);
@@ -2873,8 +2890,7 @@ static void ieee80211_restart_sta_timer(struct ieee80211_sub_if_data *sdata)
2873 u32 flags; 2890 u32 flags;
2874 2891
2875 if (sdata->vif.type == NL80211_IFTYPE_STATION) { 2892 if (sdata->vif.type == NL80211_IFTYPE_STATION) {
2876 sdata->u.mgd.flags &= ~(IEEE80211_STA_BEACON_POLL | 2893 __ieee80211_stop_poll(sdata);
2877 IEEE80211_STA_CONNECTION_POLL);
2878 2894
2879 /* let's probe the connection once */ 2895 /* let's probe the connection once */
2880 flags = sdata->local->hw.flags; 2896 flags = sdata->local->hw.flags;
@@ -2943,7 +2959,10 @@ void ieee80211_sta_restart(struct ieee80211_sub_if_data *sdata)
2943 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running)) 2959 if (test_and_clear_bit(TMR_RUNNING_CHANSW, &ifmgd->timers_running))
2944 add_timer(&ifmgd->chswitch_timer); 2960 add_timer(&ifmgd->chswitch_timer);
2945 ieee80211_sta_reset_beacon_monitor(sdata); 2961 ieee80211_sta_reset_beacon_monitor(sdata);
2962
2963 mutex_lock(&sdata->local->mtx);
2946 ieee80211_restart_sta_timer(sdata); 2964 ieee80211_restart_sta_timer(sdata);
2965 mutex_unlock(&sdata->local->mtx);
2947} 2966}
2948#endif 2967#endif
2949 2968
@@ -3105,7 +3124,7 @@ static int ieee80211_prep_connection(struct ieee80211_sub_if_data *sdata,
3105 } 3124 }
3106 3125
3107 local->oper_channel = cbss->channel; 3126 local->oper_channel = cbss->channel;
3108 ieee80211_hw_config(local, 0); 3127 ieee80211_hw_config(local, IEEE80211_CONF_CHANGE_CHANNEL);
3109 3128
3110 if (!have_sta) { 3129 if (!have_sta) {
3111 u32 rates = 0, basic_rates = 0; 3130 u32 rates = 0, basic_rates = 0;
diff --git a/net/mac80211/offchannel.c b/net/mac80211/offchannel.c
index f054e94901a2..935aa4b6deee 100644
--- a/net/mac80211/offchannel.c
+++ b/net/mac80211/offchannel.c
@@ -234,6 +234,22 @@ static void ieee80211_hw_roc_done(struct work_struct *work)
234 return; 234 return;
235 } 235 }
236 236
237 /* was never transmitted */
238 if (local->hw_roc_skb) {
239 u64 cookie;
240
241 cookie = local->hw_roc_cookie ^ 2;
242
243 cfg80211_mgmt_tx_status(local->hw_roc_dev, cookie,
244 local->hw_roc_skb->data,
245 local->hw_roc_skb->len, false,
246 GFP_KERNEL);
247
248 kfree_skb(local->hw_roc_skb);
249 local->hw_roc_skb = NULL;
250 local->hw_roc_skb_for_status = NULL;
251 }
252
237 if (!local->hw_roc_for_tx) 253 if (!local->hw_roc_for_tx)
238 cfg80211_remain_on_channel_expired(local->hw_roc_dev, 254 cfg80211_remain_on_channel_expired(local->hw_roc_dev,
239 local->hw_roc_cookie, 255 local->hw_roc_cookie,
diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c
index f5b1638fbf80..de455f8bbb91 100644
--- a/net/mac80211/sta_info.c
+++ b/net/mac80211/sta_info.c
@@ -378,7 +378,7 @@ static int sta_info_insert_finish(struct sta_info *sta) __acquires(RCU)
378 /* make the station visible */ 378 /* make the station visible */
379 sta_info_hash_add(local, sta); 379 sta_info_hash_add(local, sta);
380 380
381 list_add(&sta->list, &local->sta_list); 381 list_add_rcu(&sta->list, &local->sta_list);
382 382
383 set_sta_flag(sta, WLAN_STA_INSERTED); 383 set_sta_flag(sta, WLAN_STA_INSERTED);
384 384
@@ -688,7 +688,7 @@ int __must_check __sta_info_destroy(struct sta_info *sta)
688 if (ret) 688 if (ret)
689 return ret; 689 return ret;
690 690
691 list_del(&sta->list); 691 list_del_rcu(&sta->list);
692 692
693 mutex_lock(&local->key_mtx); 693 mutex_lock(&local->key_mtx);
694 for (i = 0; i < NUM_DEFAULT_KEYS; i++) 694 for (i = 0; i < NUM_DEFAULT_KEYS; i++)
diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
index 5f827a6b0d8d..e453212fa17f 100644
--- a/net/mac80211/tx.c
+++ b/net/mac80211/tx.c
@@ -153,7 +153,7 @@ static __le16 ieee80211_duration(struct ieee80211_tx_data *tx,
153 153
154 /* Don't calculate ACKs for QoS Frames with NoAck Policy set */ 154 /* Don't calculate ACKs for QoS Frames with NoAck Policy set */
155 if (ieee80211_is_data_qos(hdr->frame_control) && 155 if (ieee80211_is_data_qos(hdr->frame_control) &&
156 *(ieee80211_get_qos_ctl(hdr)) | IEEE80211_QOS_CTL_ACK_POLICY_NOACK) 156 *(ieee80211_get_qos_ctl(hdr)) & IEEE80211_QOS_CTL_ACK_POLICY_NOACK)
157 dur = 0; 157 dur = 0;
158 else 158 else
159 /* Time needed to transmit ACK 159 /* Time needed to transmit ACK
@@ -1737,7 +1737,7 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1737 __le16 fc; 1737 __le16 fc;
1738 struct ieee80211_hdr hdr; 1738 struct ieee80211_hdr hdr;
1739 struct ieee80211s_hdr mesh_hdr __maybe_unused; 1739 struct ieee80211s_hdr mesh_hdr __maybe_unused;
1740 struct mesh_path __maybe_unused *mppath = NULL; 1740 struct mesh_path __maybe_unused *mppath = NULL, *mpath = NULL;
1741 const u8 *encaps_data; 1741 const u8 *encaps_data;
1742 int encaps_len, skip_header_bytes; 1742 int encaps_len, skip_header_bytes;
1743 int nh_pos, h_pos; 1743 int nh_pos, h_pos;
@@ -1803,8 +1803,11 @@ netdev_tx_t ieee80211_subif_start_xmit(struct sk_buff *skb,
1803 goto fail; 1803 goto fail;
1804 } 1804 }
1805 rcu_read_lock(); 1805 rcu_read_lock();
1806 if (!is_multicast_ether_addr(skb->data)) 1806 if (!is_multicast_ether_addr(skb->data)) {
1807 mppath = mpp_path_lookup(skb->data, sdata); 1807 mpath = mesh_path_lookup(skb->data, sdata);
1808 if (!mpath)
1809 mppath = mpp_path_lookup(skb->data, sdata);
1810 }
1808 1811
1809 /* 1812 /*
1810 * Use address extension if it is a packet from 1813 * Use address extension if it is a packet from
diff --git a/net/mac80211/util.c b/net/mac80211/util.c
index 22f2216b397e..8dd4712620ff 100644
--- a/net/mac80211/util.c
+++ b/net/mac80211/util.c
@@ -1271,7 +1271,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1271 enum ieee80211_sta_state state; 1271 enum ieee80211_sta_state state;
1272 1272
1273 for (state = IEEE80211_STA_NOTEXIST; 1273 for (state = IEEE80211_STA_NOTEXIST;
1274 state < sta->sta_state - 1; state++) 1274 state < sta->sta_state; state++)
1275 WARN_ON(drv_sta_state(local, sta->sdata, sta, 1275 WARN_ON(drv_sta_state(local, sta->sdata, sta,
1276 state, state + 1)); 1276 state, state + 1));
1277 } 1277 }
@@ -1371,6 +1371,12 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1371 } 1371 }
1372 } 1372 }
1373 1373
1374 /* add back keys */
1375 list_for_each_entry(sdata, &local->interfaces, list)
1376 if (ieee80211_sdata_running(sdata))
1377 ieee80211_enable_keys(sdata);
1378
1379 wake_up:
1374 /* 1380 /*
1375 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation 1381 * Clear the WLAN_STA_BLOCK_BA flag so new aggregation
1376 * sessions can be established after a resume. 1382 * sessions can be established after a resume.
@@ -1392,12 +1398,6 @@ int ieee80211_reconfig(struct ieee80211_local *local)
1392 mutex_unlock(&local->sta_mtx); 1398 mutex_unlock(&local->sta_mtx);
1393 } 1399 }
1394 1400
1395 /* add back keys */
1396 list_for_each_entry(sdata, &local->interfaces, list)
1397 if (ieee80211_sdata_running(sdata))
1398 ieee80211_enable_keys(sdata);
1399
1400 wake_up:
1401 ieee80211_wake_queues_by_reason(hw, 1401 ieee80211_wake_queues_by_reason(hw,
1402 IEEE80211_QUEUE_STOP_REASON_SUSPEND); 1402 IEEE80211_QUEUE_STOP_REASON_SUSPEND);
1403 1403
diff --git a/net/netfilter/nf_conntrack_h323_main.c b/net/netfilter/nf_conntrack_h323_main.c
index 46d69d7f1bb4..31f50bc3a312 100644
--- a/net/netfilter/nf_conntrack_h323_main.c
+++ b/net/netfilter/nf_conntrack_h323_main.c
@@ -270,9 +270,8 @@ static int expect_rtp_rtcp(struct sk_buff *skb, struct nf_conn *ct,
270 return 0; 270 return 0;
271 271
272 /* RTP port is even */ 272 /* RTP port is even */
273 port &= htons(~1); 273 rtp_port = port & ~htons(1);
274 rtp_port = port; 274 rtcp_port = port | htons(1);
275 rtcp_port = htons(ntohs(port) + 1);
276 275
277 /* Create expect for RTP */ 276 /* Create expect for RTP */
278 if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL) 277 if ((rtp_exp = nf_ct_expect_alloc(ct)) == NULL)
diff --git a/net/netfilter/xt_HMARK.c b/net/netfilter/xt_HMARK.c
index 0a96a43108ed..1686ca1b53a1 100644
--- a/net/netfilter/xt_HMARK.c
+++ b/net/netfilter/xt_HMARK.c
@@ -32,13 +32,13 @@ MODULE_ALIAS("ipt_HMARK");
32MODULE_ALIAS("ip6t_HMARK"); 32MODULE_ALIAS("ip6t_HMARK");
33 33
34struct hmark_tuple { 34struct hmark_tuple {
35 u32 src; 35 __be32 src;
36 u32 dst; 36 __be32 dst;
37 union hmark_ports uports; 37 union hmark_ports uports;
38 uint8_t proto; 38 u8 proto;
39}; 39};
40 40
41static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask) 41static inline __be32 hmark_addr6_mask(const __be32 *addr32, const __be32 *mask)
42{ 42{
43 return (addr32[0] & mask[0]) ^ 43 return (addr32[0] & mask[0]) ^
44 (addr32[1] & mask[1]) ^ 44 (addr32[1] & mask[1]) ^
@@ -46,8 +46,8 @@ static inline u32 hmark_addr6_mask(const __u32 *addr32, const __u32 *mask)
46 (addr32[3] & mask[3]); 46 (addr32[3] & mask[3]);
47} 47}
48 48
49static inline u32 49static inline __be32
50hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask) 50hmark_addr_mask(int l3num, const __be32 *addr32, const __be32 *mask)
51{ 51{
52 switch (l3num) { 52 switch (l3num) {
53 case AF_INET: 53 case AF_INET:
@@ -58,6 +58,22 @@ hmark_addr_mask(int l3num, const __u32 *addr32, const __u32 *mask)
58 return 0; 58 return 0;
59} 59}
60 60
61static inline void hmark_swap_ports(union hmark_ports *uports,
62 const struct xt_hmark_info *info)
63{
64 union hmark_ports hp;
65 u16 src, dst;
66
67 hp.b32 = (uports->b32 & info->port_mask.b32) | info->port_set.b32;
68 src = ntohs(hp.b16.src);
69 dst = ntohs(hp.b16.dst);
70
71 if (dst > src)
72 uports->v32 = (dst << 16) | src;
73 else
74 uports->v32 = (src << 16) | dst;
75}
76
61static int 77static int
62hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t, 78hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
63 const struct xt_hmark_info *info) 79 const struct xt_hmark_info *info)
@@ -74,22 +90,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
74 otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple; 90 otuple = &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple;
75 rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple; 91 rtuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
76 92
77 t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.all, 93 t->src = hmark_addr_mask(otuple->src.l3num, otuple->src.u3.ip6,
78 info->src_mask.all); 94 info->src_mask.ip6);
79 t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.all, 95 t->dst = hmark_addr_mask(otuple->src.l3num, rtuple->src.u3.ip6,
80 info->dst_mask.all); 96 info->dst_mask.ip6);
81 97
82 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) 98 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
83 return 0; 99 return 0;
84 100
85 t->proto = nf_ct_protonum(ct); 101 t->proto = nf_ct_protonum(ct);
86 if (t->proto != IPPROTO_ICMP) { 102 if (t->proto != IPPROTO_ICMP) {
87 t->uports.p16.src = otuple->src.u.all; 103 t->uports.b16.src = otuple->src.u.all;
88 t->uports.p16.dst = rtuple->src.u.all; 104 t->uports.b16.dst = rtuple->src.u.all;
89 t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | 105 hmark_swap_ports(&t->uports, info);
90 info->port_set.v32;
91 if (t->uports.p16.dst < t->uports.p16.src)
92 swap(t->uports.p16.dst, t->uports.p16.src);
93 } 106 }
94 107
95 return 0; 108 return 0;
@@ -98,15 +111,19 @@ hmark_ct_set_htuple(const struct sk_buff *skb, struct hmark_tuple *t,
98#endif 111#endif
99} 112}
100 113
114/* This hash function is endian independent, to ensure consistent hashing if
115 * the cluster is composed of big and little endian systems. */
101static inline u32 116static inline u32
102hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info) 117hmark_hash(struct hmark_tuple *t, const struct xt_hmark_info *info)
103{ 118{
104 u32 hash; 119 u32 hash;
120 u32 src = ntohl(t->src);
121 u32 dst = ntohl(t->dst);
105 122
106 if (t->dst < t->src) 123 if (dst < src)
107 swap(t->src, t->dst); 124 swap(src, dst);
108 125
109 hash = jhash_3words(t->src, t->dst, t->uports.v32, info->hashrnd); 126 hash = jhash_3words(src, dst, t->uports.v32, info->hashrnd);
110 hash = hash ^ (t->proto & info->proto_mask); 127 hash = hash ^ (t->proto & info->proto_mask);
111 128
112 return (((u64)hash * info->hmodulus) >> 32) + info->hoffset; 129 return (((u64)hash * info->hmodulus) >> 32) + info->hoffset;
@@ -126,11 +143,7 @@ hmark_set_tuple_ports(const struct sk_buff *skb, unsigned int nhoff,
126 if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0) 143 if (skb_copy_bits(skb, nhoff, &t->uports, sizeof(t->uports)) < 0)
127 return; 144 return;
128 145
129 t->uports.v32 = (t->uports.v32 & info->port_mask.v32) | 146 hmark_swap_ports(&t->uports, info);
130 info->port_set.v32;
131
132 if (t->uports.p16.dst < t->uports.p16.src)
133 swap(t->uports.p16.dst, t->uports.p16.src);
134} 147}
135 148
136#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) 149#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
@@ -178,8 +191,8 @@ hmark_pkt_set_htuple_ipv6(const struct sk_buff *skb, struct hmark_tuple *t,
178 return -1; 191 return -1;
179 } 192 }
180noicmp: 193noicmp:
181 t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.all); 194 t->src = hmark_addr6_mask(ip6->saddr.s6_addr32, info->src_mask.ip6);
182 t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.all); 195 t->dst = hmark_addr6_mask(ip6->daddr.s6_addr32, info->dst_mask.ip6);
183 196
184 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) 197 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
185 return 0; 198 return 0;
@@ -255,11 +268,8 @@ hmark_pkt_set_htuple_ipv4(const struct sk_buff *skb, struct hmark_tuple *t,
255 } 268 }
256 } 269 }
257 270
258 t->src = (__force u32) ip->saddr; 271 t->src = ip->saddr & info->src_mask.ip;
259 t->dst = (__force u32) ip->daddr; 272 t->dst = ip->daddr & info->dst_mask.ip;
260
261 t->src &= info->src_mask.ip;
262 t->dst &= info->dst_mask.ip;
263 273
264 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3)) 274 if (info->flags & XT_HMARK_FLAG(XT_HMARK_METHOD_L3))
265 return 0; 275 return 0;
diff --git a/net/netlink/genetlink.c b/net/netlink/genetlink.c
index 8340ace837f2..2cc7c1ee7690 100644
--- a/net/netlink/genetlink.c
+++ b/net/netlink/genetlink.c
@@ -836,7 +836,7 @@ static int ctrl_getfamily(struct sk_buff *skb, struct genl_info *info)
836#ifdef CONFIG_MODULES 836#ifdef CONFIG_MODULES
837 if (res == NULL) { 837 if (res == NULL) {
838 genl_unlock(); 838 genl_unlock();
839 request_module("net-pf-%d-proto-%d-type-%s", 839 request_module("net-pf-%d-proto-%d-family-%s",
840 PF_NETLINK, NETLINK_GENERIC, name); 840 PF_NETLINK, NETLINK_GENERIC, name);
841 genl_lock(); 841 genl_lock();
842 res = genl_family_find_byname(name); 842 res = genl_family_find_byname(name);
diff --git a/net/nfc/llcp/sock.c b/net/nfc/llcp/sock.c
index 3f339b19d140..17a707db40eb 100644
--- a/net/nfc/llcp/sock.c
+++ b/net/nfc/llcp/sock.c
@@ -292,6 +292,9 @@ static int llcp_sock_getname(struct socket *sock, struct sockaddr *addr,
292 292
293 pr_debug("%p\n", sk); 293 pr_debug("%p\n", sk);
294 294
295 if (llcp_sock == NULL)
296 return -EBADFD;
297
295 addr->sa_family = AF_NFC; 298 addr->sa_family = AF_NFC;
296 *len = sizeof(struct sockaddr_nfc_llcp); 299 *len = sizeof(struct sockaddr_nfc_llcp);
297 300
diff --git a/net/rds/ib.h b/net/rds/ib.h
index edfaaaf164eb..8d2b3d5a7c21 100644
--- a/net/rds/ib.h
+++ b/net/rds/ib.h
@@ -186,8 +186,7 @@ struct rds_ib_device {
186 struct work_struct free_work; 186 struct work_struct free_work;
187}; 187};
188 188
189#define pcidev_to_node(pcidev) pcibus_to_node(pcidev->bus) 189#define ibdev_to_node(ibdev) dev_to_node(ibdev->dma_device)
190#define ibdev_to_node(ibdev) pcidev_to_node(to_pci_dev(ibdev->dma_device))
191#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev) 190#define rdsibdev_to_node(rdsibdev) ibdev_to_node(rdsibdev->dev)
192 191
193/* bits for i_ack_flags */ 192/* bits for i_ack_flags */
diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c
index 8522a4793374..ca8e0a57d945 100644
--- a/net/sched/sch_atm.c
+++ b/net/sched/sch_atm.c
@@ -16,8 +16,6 @@
16#include <net/netlink.h> 16#include <net/netlink.h>
17#include <net/pkt_sched.h> 17#include <net/pkt_sched.h>
18 18
19extern struct socket *sockfd_lookup(int fd, int *err); /* @@@ fix this */
20
21/* 19/*
22 * The ATM queuing discipline provides a framework for invoking classifiers 20 * The ATM queuing discipline provides a framework for invoking classifiers
23 * (aka "filters"), which in turn select classes of this queuing discipline. 21 * (aka "filters"), which in turn select classes of this queuing discipline.
diff --git a/net/sunrpc/auth_gss/gss_krb5_wrap.c b/net/sunrpc/auth_gss/gss_krb5_wrap.c
index 38f388c39dce..107c4528654f 100644
--- a/net/sunrpc/auth_gss/gss_krb5_wrap.c
+++ b/net/sunrpc/auth_gss/gss_krb5_wrap.c
@@ -381,21 +381,53 @@ gss_unwrap_kerberos_v1(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
381} 381}
382 382
383/* 383/*
384 * We cannot currently handle tokens with rotated data. We need a 384 * We can shift data by up to LOCAL_BUF_LEN bytes in a pass. If we need
385 * generalized routine to rotate the data in place. It is anticipated 385 * to do more than that, we shift repeatedly. Kevin Coffman reports
386 * that we won't encounter rotated data in the general case. 386 * seeing 28 bytes as the value used by Microsoft clients and servers
387 * with AES, so this constant is chosen to allow handling 28 in one pass
388 * without using too much stack space.
389 *
390 * If that proves to a problem perhaps we could use a more clever
391 * algorithm.
387 */ 392 */
388static u32 393#define LOCAL_BUF_LEN 32u
389rotate_left(struct krb5_ctx *kctx, u32 offset, struct xdr_buf *buf, u16 rrc) 394
395static void rotate_buf_a_little(struct xdr_buf *buf, unsigned int shift)
390{ 396{
391 unsigned int realrrc = rrc % (buf->len - offset - GSS_KRB5_TOK_HDR_LEN); 397 char head[LOCAL_BUF_LEN];
398 char tmp[LOCAL_BUF_LEN];
399 unsigned int this_len, i;
400
401 BUG_ON(shift > LOCAL_BUF_LEN);
392 402
393 if (realrrc == 0) 403 read_bytes_from_xdr_buf(buf, 0, head, shift);
394 return 0; 404 for (i = 0; i + shift < buf->len; i += LOCAL_BUF_LEN) {
405 this_len = min(LOCAL_BUF_LEN, buf->len - (i + shift));
406 read_bytes_from_xdr_buf(buf, i+shift, tmp, this_len);
407 write_bytes_to_xdr_buf(buf, i, tmp, this_len);
408 }
409 write_bytes_to_xdr_buf(buf, buf->len - shift, head, shift);
410}
395 411
396 dprintk("%s: cannot process token with rotated data: " 412static void _rotate_left(struct xdr_buf *buf, unsigned int shift)
397 "rrc %u, realrrc %u\n", __func__, rrc, realrrc); 413{
398 return 1; 414 int shifted = 0;
415 int this_shift;
416
417 shift %= buf->len;
418 while (shifted < shift) {
419 this_shift = min(shift - shifted, LOCAL_BUF_LEN);
420 rotate_buf_a_little(buf, this_shift);
421 shifted += this_shift;
422 }
423}
424
425static void rotate_left(u32 base, struct xdr_buf *buf, unsigned int shift)
426{
427 struct xdr_buf subbuf;
428
429 xdr_buf_subsegment(buf, &subbuf, base, buf->len - base);
430 _rotate_left(&subbuf, shift);
399} 431}
400 432
401static u32 433static u32
@@ -495,11 +527,8 @@ gss_unwrap_kerberos_v2(struct krb5_ctx *kctx, int offset, struct xdr_buf *buf)
495 527
496 seqnum = be64_to_cpup((__be64 *)(ptr + 8)); 528 seqnum = be64_to_cpup((__be64 *)(ptr + 8));
497 529
498 if (rrc != 0) { 530 if (rrc != 0)
499 err = rotate_left(kctx, offset, buf, rrc); 531 rotate_left(offset + 16, buf, rrc);
500 if (err)
501 return GSS_S_FAILURE;
502 }
503 532
504 err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf, 533 err = (*kctx->gk5e->decrypt_v2)(kctx, offset, buf,
505 &headskip, &tailskip); 534 &headskip, &tailskip);
diff --git a/net/sunrpc/auth_gss/svcauth_gss.c b/net/sunrpc/auth_gss/svcauth_gss.c
index 28b62dbb6d1e..73e957386600 100644
--- a/net/sunrpc/auth_gss/svcauth_gss.c
+++ b/net/sunrpc/auth_gss/svcauth_gss.c
@@ -336,7 +336,6 @@ struct rsc {
336 struct svc_cred cred; 336 struct svc_cred cred;
337 struct gss_svc_seq_data seqdata; 337 struct gss_svc_seq_data seqdata;
338 struct gss_ctx *mechctx; 338 struct gss_ctx *mechctx;
339 char *client_name;
340}; 339};
341 340
342static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old); 341static struct rsc *rsc_update(struct cache_detail *cd, struct rsc *new, struct rsc *old);
@@ -347,9 +346,7 @@ static void rsc_free(struct rsc *rsci)
347 kfree(rsci->handle.data); 346 kfree(rsci->handle.data);
348 if (rsci->mechctx) 347 if (rsci->mechctx)
349 gss_delete_sec_context(&rsci->mechctx); 348 gss_delete_sec_context(&rsci->mechctx);
350 if (rsci->cred.cr_group_info) 349 free_svc_cred(&rsci->cred);
351 put_group_info(rsci->cred.cr_group_info);
352 kfree(rsci->client_name);
353} 350}
354 351
355static void rsc_put(struct kref *ref) 352static void rsc_put(struct kref *ref)
@@ -387,7 +384,7 @@ rsc_init(struct cache_head *cnew, struct cache_head *ctmp)
387 tmp->handle.data = NULL; 384 tmp->handle.data = NULL;
388 new->mechctx = NULL; 385 new->mechctx = NULL;
389 new->cred.cr_group_info = NULL; 386 new->cred.cr_group_info = NULL;
390 new->client_name = NULL; 387 new->cred.cr_principal = NULL;
391} 388}
392 389
393static void 390static void
@@ -402,8 +399,8 @@ update_rsc(struct cache_head *cnew, struct cache_head *ctmp)
402 spin_lock_init(&new->seqdata.sd_lock); 399 spin_lock_init(&new->seqdata.sd_lock);
403 new->cred = tmp->cred; 400 new->cred = tmp->cred;
404 tmp->cred.cr_group_info = NULL; 401 tmp->cred.cr_group_info = NULL;
405 new->client_name = tmp->client_name; 402 new->cred.cr_principal = tmp->cred.cr_principal;
406 tmp->client_name = NULL; 403 tmp->cred.cr_principal = NULL;
407} 404}
408 405
409static struct cache_head * 406static struct cache_head *
@@ -501,8 +498,8 @@ static int rsc_parse(struct cache_detail *cd,
501 /* get client name */ 498 /* get client name */
502 len = qword_get(&mesg, buf, mlen); 499 len = qword_get(&mesg, buf, mlen);
503 if (len > 0) { 500 if (len > 0) {
504 rsci.client_name = kstrdup(buf, GFP_KERNEL); 501 rsci.cred.cr_principal = kstrdup(buf, GFP_KERNEL);
505 if (!rsci.client_name) 502 if (!rsci.cred.cr_principal)
506 goto out; 503 goto out;
507 } 504 }
508 505
@@ -932,16 +929,6 @@ struct gss_svc_data {
932 struct rsc *rsci; 929 struct rsc *rsci;
933}; 930};
934 931
935char *svc_gss_principal(struct svc_rqst *rqstp)
936{
937 struct gss_svc_data *gd = (struct gss_svc_data *)rqstp->rq_auth_data;
938
939 if (gd && gd->rsci)
940 return gd->rsci->client_name;
941 return NULL;
942}
943EXPORT_SYMBOL_GPL(svc_gss_principal);
944
945static int 932static int
946svcauth_gss_set_client(struct svc_rqst *rqstp) 933svcauth_gss_set_client(struct svc_rqst *rqstp)
947{ 934{
@@ -969,16 +956,17 @@ svcauth_gss_set_client(struct svc_rqst *rqstp)
969} 956}
970 957
971static inline int 958static inline int
972gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp, struct rsi *rsip) 959gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp,
960 struct xdr_netobj *out_handle, int *major_status)
973{ 961{
974 struct rsc *rsci; 962 struct rsc *rsci;
975 int rc; 963 int rc;
976 964
977 if (rsip->major_status != GSS_S_COMPLETE) 965 if (*major_status != GSS_S_COMPLETE)
978 return gss_write_null_verf(rqstp); 966 return gss_write_null_verf(rqstp);
979 rsci = gss_svc_searchbyctx(cd, &rsip->out_handle); 967 rsci = gss_svc_searchbyctx(cd, out_handle);
980 if (rsci == NULL) { 968 if (rsci == NULL) {
981 rsip->major_status = GSS_S_NO_CONTEXT; 969 *major_status = GSS_S_NO_CONTEXT;
982 return gss_write_null_verf(rqstp); 970 return gss_write_null_verf(rqstp);
983 } 971 }
984 rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN); 972 rc = gss_write_verf(rqstp, rsci->mechctx, GSS_SEQ_WIN);
@@ -986,22 +974,13 @@ gss_write_init_verf(struct cache_detail *cd, struct svc_rqst *rqstp, struct rsi
986 return rc; 974 return rc;
987} 975}
988 976
989/* 977static inline int
990 * Having read the cred already and found we're in the context 978gss_read_verf(struct rpc_gss_wire_cred *gc,
991 * initiation case, read the verifier and initiate (or check the results 979 struct kvec *argv, __be32 *authp,
992 * of) upcalls to userspace for help with context initiation. If 980 struct xdr_netobj *in_handle,
993 * the upcall results are available, write the verifier and result. 981 struct xdr_netobj *in_token)
994 * Otherwise, drop the request pending an answer to the upcall.
995 */
996static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
997 struct rpc_gss_wire_cred *gc, __be32 *authp)
998{ 982{
999 struct kvec *argv = &rqstp->rq_arg.head[0];
1000 struct kvec *resv = &rqstp->rq_res.head[0];
1001 struct xdr_netobj tmpobj; 983 struct xdr_netobj tmpobj;
1002 struct rsi *rsip, rsikey;
1003 int ret;
1004 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
1005 984
1006 /* Read the verifier; should be NULL: */ 985 /* Read the verifier; should be NULL: */
1007 *authp = rpc_autherr_badverf; 986 *authp = rpc_autherr_badverf;
@@ -1011,24 +990,67 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
1011 return SVC_DENIED; 990 return SVC_DENIED;
1012 if (svc_getnl(argv) != 0) 991 if (svc_getnl(argv) != 0)
1013 return SVC_DENIED; 992 return SVC_DENIED;
1014
1015 /* Martial context handle and token for upcall: */ 993 /* Martial context handle and token for upcall: */
1016 *authp = rpc_autherr_badcred; 994 *authp = rpc_autherr_badcred;
1017 if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0) 995 if (gc->gc_proc == RPC_GSS_PROC_INIT && gc->gc_ctx.len != 0)
1018 return SVC_DENIED; 996 return SVC_DENIED;
1019 memset(&rsikey, 0, sizeof(rsikey)); 997 if (dup_netobj(in_handle, &gc->gc_ctx))
1020 if (dup_netobj(&rsikey.in_handle, &gc->gc_ctx))
1021 return SVC_CLOSE; 998 return SVC_CLOSE;
1022 *authp = rpc_autherr_badverf; 999 *authp = rpc_autherr_badverf;
1023 if (svc_safe_getnetobj(argv, &tmpobj)) { 1000 if (svc_safe_getnetobj(argv, &tmpobj)) {
1024 kfree(rsikey.in_handle.data); 1001 kfree(in_handle->data);
1025 return SVC_DENIED; 1002 return SVC_DENIED;
1026 } 1003 }
1027 if (dup_netobj(&rsikey.in_token, &tmpobj)) { 1004 if (dup_netobj(in_token, &tmpobj)) {
1028 kfree(rsikey.in_handle.data); 1005 kfree(in_handle->data);
1029 return SVC_CLOSE; 1006 return SVC_CLOSE;
1030 } 1007 }
1031 1008
1009 return 0;
1010}
1011
1012static inline int
1013gss_write_resv(struct kvec *resv, size_t size_limit,
1014 struct xdr_netobj *out_handle, struct xdr_netobj *out_token,
1015 int major_status, int minor_status)
1016{
1017 if (resv->iov_len + 4 > size_limit)
1018 return -1;
1019 svc_putnl(resv, RPC_SUCCESS);
1020 if (svc_safe_putnetobj(resv, out_handle))
1021 return -1;
1022 if (resv->iov_len + 3 * 4 > size_limit)
1023 return -1;
1024 svc_putnl(resv, major_status);
1025 svc_putnl(resv, minor_status);
1026 svc_putnl(resv, GSS_SEQ_WIN);
1027 if (svc_safe_putnetobj(resv, out_token))
1028 return -1;
1029 return 0;
1030}
1031
1032/*
1033 * Having read the cred already and found we're in the context
1034 * initiation case, read the verifier and initiate (or check the results
1035 * of) upcalls to userspace for help with context initiation. If
1036 * the upcall results are available, write the verifier and result.
1037 * Otherwise, drop the request pending an answer to the upcall.
1038 */
1039static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
1040 struct rpc_gss_wire_cred *gc, __be32 *authp)
1041{
1042 struct kvec *argv = &rqstp->rq_arg.head[0];
1043 struct kvec *resv = &rqstp->rq_res.head[0];
1044 struct rsi *rsip, rsikey;
1045 int ret;
1046 struct sunrpc_net *sn = net_generic(rqstp->rq_xprt->xpt_net, sunrpc_net_id);
1047
1048 memset(&rsikey, 0, sizeof(rsikey));
1049 ret = gss_read_verf(gc, argv, authp,
1050 &rsikey.in_handle, &rsikey.in_token);
1051 if (ret)
1052 return ret;
1053
1032 /* Perform upcall, or find upcall result: */ 1054 /* Perform upcall, or find upcall result: */
1033 rsip = rsi_lookup(sn->rsi_cache, &rsikey); 1055 rsip = rsi_lookup(sn->rsi_cache, &rsikey);
1034 rsi_free(&rsikey); 1056 rsi_free(&rsikey);
@@ -1040,19 +1062,12 @@ static int svcauth_gss_handle_init(struct svc_rqst *rqstp,
1040 1062
1041 ret = SVC_CLOSE; 1063 ret = SVC_CLOSE;
1042 /* Got an answer to the upcall; use it: */ 1064 /* Got an answer to the upcall; use it: */
1043 if (gss_write_init_verf(sn->rsc_cache, rqstp, rsip)) 1065 if (gss_write_init_verf(sn->rsc_cache, rqstp,
1044 goto out; 1066 &rsip->out_handle, &rsip->major_status))
1045 if (resv->iov_len + 4 > PAGE_SIZE)
1046 goto out; 1067 goto out;
1047 svc_putnl(resv, RPC_SUCCESS); 1068 if (gss_write_resv(resv, PAGE_SIZE,
1048 if (svc_safe_putnetobj(resv, &rsip->out_handle)) 1069 &rsip->out_handle, &rsip->out_token,
1049 goto out; 1070 rsip->major_status, rsip->minor_status))
1050 if (resv->iov_len + 3 * 4 > PAGE_SIZE)
1051 goto out;
1052 svc_putnl(resv, rsip->major_status);
1053 svc_putnl(resv, rsip->minor_status);
1054 svc_putnl(resv, GSS_SEQ_WIN);
1055 if (svc_safe_putnetobj(resv, &rsip->out_token))
1056 goto out; 1071 goto out;
1057 1072
1058 ret = SVC_COMPLETE; 1073 ret = SVC_COMPLETE;
@@ -1192,7 +1207,7 @@ svcauth_gss_accept(struct svc_rqst *rqstp, __be32 *authp)
1192 } 1207 }
1193 svcdata->rsci = rsci; 1208 svcdata->rsci = rsci;
1194 cache_get(&rsci->h); 1209 cache_get(&rsci->h);
1195 rqstp->rq_flavor = gss_svc_to_pseudoflavor( 1210 rqstp->rq_cred.cr_flavor = gss_svc_to_pseudoflavor(
1196 rsci->mechctx->mech_type, gc->gc_svc); 1211 rsci->mechctx->mech_type, gc->gc_svc);
1197 ret = SVC_OK; 1212 ret = SVC_OK;
1198 goto out; 1213 goto out;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index 04040476082e..21fde99e5c56 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -71,7 +71,9 @@ static void rpc_purge_list(wait_queue_head_t *waitq, struct list_head *head,
71 msg->errno = err; 71 msg->errno = err;
72 destroy_msg(msg); 72 destroy_msg(msg);
73 } while (!list_empty(head)); 73 } while (!list_empty(head));
74 wake_up(waitq); 74
75 if (waitq)
76 wake_up(waitq);
75} 77}
76 78
77static void 79static void
@@ -91,11 +93,9 @@ rpc_timeout_upcall_queue(struct work_struct *work)
91 } 93 }
92 dentry = dget(pipe->dentry); 94 dentry = dget(pipe->dentry);
93 spin_unlock(&pipe->lock); 95 spin_unlock(&pipe->lock);
94 if (dentry) { 96 rpc_purge_list(dentry ? &RPC_I(dentry->d_inode)->waitq : NULL,
95 rpc_purge_list(&RPC_I(dentry->d_inode)->waitq, 97 &free_list, destroy_msg, -ETIMEDOUT);
96 &free_list, destroy_msg, -ETIMEDOUT); 98 dput(dentry);
97 dput(dentry);
98 }
99} 99}
100 100
101ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg, 101ssize_t rpc_pipe_generic_upcall(struct file *filp, struct rpc_pipe_msg *msg,
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 3c0653439f3d..92509ffe15fc 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -180,14 +180,16 @@ void rpcb_put_local(struct net *net)
180 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); 180 struct sunrpc_net *sn = net_generic(net, sunrpc_net_id);
181 struct rpc_clnt *clnt = sn->rpcb_local_clnt; 181 struct rpc_clnt *clnt = sn->rpcb_local_clnt;
182 struct rpc_clnt *clnt4 = sn->rpcb_local_clnt4; 182 struct rpc_clnt *clnt4 = sn->rpcb_local_clnt4;
183 int shutdown; 183 int shutdown = 0;
184 184
185 spin_lock(&sn->rpcb_clnt_lock); 185 spin_lock(&sn->rpcb_clnt_lock);
186 if (--sn->rpcb_users == 0) { 186 if (sn->rpcb_users) {
187 sn->rpcb_local_clnt = NULL; 187 if (--sn->rpcb_users == 0) {
188 sn->rpcb_local_clnt4 = NULL; 188 sn->rpcb_local_clnt = NULL;
189 sn->rpcb_local_clnt4 = NULL;
190 }
191 shutdown = !sn->rpcb_users;
189 } 192 }
190 shutdown = !sn->rpcb_users;
191 spin_unlock(&sn->rpcb_clnt_lock); 193 spin_unlock(&sn->rpcb_clnt_lock);
192 194
193 if (shutdown) { 195 if (shutdown) {
diff --git a/net/sunrpc/svc.c b/net/sunrpc/svc.c
index 017c0117d154..3ee7461926d8 100644
--- a/net/sunrpc/svc.c
+++ b/net/sunrpc/svc.c
@@ -407,6 +407,14 @@ static int svc_uses_rpcbind(struct svc_serv *serv)
407 return 0; 407 return 0;
408} 408}
409 409
410int svc_bind(struct svc_serv *serv, struct net *net)
411{
412 if (!svc_uses_rpcbind(serv))
413 return 0;
414 return svc_rpcb_setup(serv, net);
415}
416EXPORT_SYMBOL_GPL(svc_bind);
417
410/* 418/*
411 * Create an RPC service 419 * Create an RPC service
412 */ 420 */
@@ -471,15 +479,8 @@ __svc_create(struct svc_program *prog, unsigned int bufsize, int npools,
471 spin_lock_init(&pool->sp_lock); 479 spin_lock_init(&pool->sp_lock);
472 } 480 }
473 481
474 if (svc_uses_rpcbind(serv)) { 482 if (svc_uses_rpcbind(serv) && (!serv->sv_shutdown))
475 if (svc_rpcb_setup(serv, current->nsproxy->net_ns) < 0) { 483 serv->sv_shutdown = svc_rpcb_cleanup;
476 kfree(serv->sv_pools);
477 kfree(serv);
478 return NULL;
479 }
480 if (!serv->sv_shutdown)
481 serv->sv_shutdown = svc_rpcb_cleanup;
482 }
483 484
484 return serv; 485 return serv;
485} 486}
@@ -536,8 +537,6 @@ EXPORT_SYMBOL_GPL(svc_shutdown_net);
536void 537void
537svc_destroy(struct svc_serv *serv) 538svc_destroy(struct svc_serv *serv)
538{ 539{
539 struct net *net = current->nsproxy->net_ns;
540
541 dprintk("svc: svc_destroy(%s, %d)\n", 540 dprintk("svc: svc_destroy(%s, %d)\n",
542 serv->sv_program->pg_name, 541 serv->sv_program->pg_name,
543 serv->sv_nrthreads); 542 serv->sv_nrthreads);
@@ -552,8 +551,6 @@ svc_destroy(struct svc_serv *serv)
552 551
553 del_timer_sync(&serv->sv_temptimer); 552 del_timer_sync(&serv->sv_temptimer);
554 553
555 svc_shutdown_net(serv, net);
556
557 /* 554 /*
558 * The last user is gone and thus all sockets have to be destroyed to 555 * The last user is gone and thus all sockets have to be destroyed to
559 * the point. Check this. 556 * the point. Check this.
@@ -1377,7 +1374,8 @@ bc_svc_process(struct svc_serv *serv, struct rpc_rqst *req,
1377 sizeof(req->rq_snd_buf)); 1374 sizeof(req->rq_snd_buf));
1378 return bc_send(req); 1375 return bc_send(req);
1379 } else { 1376 } else {
1380 /* Nothing to do to drop request */ 1377 /* drop request */
1378 xprt_free_bc_request(req);
1381 return 0; 1379 return 0;
1382 } 1380 }
1383} 1381}
diff --git a/net/sunrpc/svc_xprt.c b/net/sunrpc/svc_xprt.c
index b98ee3514912..88f2bf671960 100644
--- a/net/sunrpc/svc_xprt.c
+++ b/net/sunrpc/svc_xprt.c
@@ -598,6 +598,7 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
598 598
599 /* now allocate needed pages. If we get a failure, sleep briefly */ 599 /* now allocate needed pages. If we get a failure, sleep briefly */
600 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; 600 pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE;
601 BUG_ON(pages >= RPCSVC_MAXPAGES);
601 for (i = 0; i < pages ; i++) 602 for (i = 0; i < pages ; i++)
602 while (rqstp->rq_pages[i] == NULL) { 603 while (rqstp->rq_pages[i] == NULL) {
603 struct page *p = alloc_page(GFP_KERNEL); 604 struct page *p = alloc_page(GFP_KERNEL);
@@ -612,7 +613,6 @@ int svc_recv(struct svc_rqst *rqstp, long timeout)
612 rqstp->rq_pages[i] = p; 613 rqstp->rq_pages[i] = p;
613 } 614 }
614 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ 615 rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */
615 BUG_ON(pages >= RPCSVC_MAXPAGES);
616 616
617 /* Make arg->head point to first page and arg->pages point to rest */ 617 /* Make arg->head point to first page and arg->pages point to rest */
618 arg = &rqstp->rq_arg; 618 arg = &rqstp->rq_arg;
@@ -973,7 +973,7 @@ void svc_close_net(struct svc_serv *serv, struct net *net)
973 svc_clear_pools(serv, net); 973 svc_clear_pools(serv, net);
974 /* 974 /*
975 * At this point the sp_sockets lists will stay empty, since 975 * At this point the sp_sockets lists will stay empty, since
976 * svc_enqueue will not add new entries without taking the 976 * svc_xprt_enqueue will not add new entries without taking the
977 * sp_lock and checking XPT_BUSY. 977 * sp_lock and checking XPT_BUSY.
978 */ 978 */
979 svc_clear_list(&serv->sv_tempsocks, net); 979 svc_clear_list(&serv->sv_tempsocks, net);
diff --git a/net/sunrpc/svcauth_unix.c b/net/sunrpc/svcauth_unix.c
index 71ec8530ec8c..2777fa896645 100644
--- a/net/sunrpc/svcauth_unix.c
+++ b/net/sunrpc/svcauth_unix.c
@@ -347,17 +347,12 @@ static inline int ip_map_update(struct net *net, struct ip_map *ipm,
347 return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry); 347 return __ip_map_update(sn->ip_map_cache, ipm, udom, expiry);
348} 348}
349 349
350 350void svcauth_unix_purge(struct net *net)
351void svcauth_unix_purge(void)
352{ 351{
353 struct net *net; 352 struct sunrpc_net *sn;
354
355 for_each_net(net) {
356 struct sunrpc_net *sn;
357 353
358 sn = net_generic(net, sunrpc_net_id); 354 sn = net_generic(net, sunrpc_net_id);
359 cache_purge(sn->ip_map_cache); 355 cache_purge(sn->ip_map_cache);
360 }
361} 356}
362EXPORT_SYMBOL_GPL(svcauth_unix_purge); 357EXPORT_SYMBOL_GPL(svcauth_unix_purge);
363 358
@@ -751,6 +746,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
751 struct svc_cred *cred = &rqstp->rq_cred; 746 struct svc_cred *cred = &rqstp->rq_cred;
752 747
753 cred->cr_group_info = NULL; 748 cred->cr_group_info = NULL;
749 cred->cr_principal = NULL;
754 rqstp->rq_client = NULL; 750 rqstp->rq_client = NULL;
755 751
756 if (argv->iov_len < 3*4) 752 if (argv->iov_len < 3*4)
@@ -778,7 +774,7 @@ svcauth_null_accept(struct svc_rqst *rqstp, __be32 *authp)
778 svc_putnl(resv, RPC_AUTH_NULL); 774 svc_putnl(resv, RPC_AUTH_NULL);
779 svc_putnl(resv, 0); 775 svc_putnl(resv, 0);
780 776
781 rqstp->rq_flavor = RPC_AUTH_NULL; 777 rqstp->rq_cred.cr_flavor = RPC_AUTH_NULL;
782 return SVC_OK; 778 return SVC_OK;
783} 779}
784 780
@@ -816,6 +812,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
816 int len = argv->iov_len; 812 int len = argv->iov_len;
817 813
818 cred->cr_group_info = NULL; 814 cred->cr_group_info = NULL;
815 cred->cr_principal = NULL;
819 rqstp->rq_client = NULL; 816 rqstp->rq_client = NULL;
820 817
821 if ((len -= 3*4) < 0) 818 if ((len -= 3*4) < 0)
@@ -852,7 +849,7 @@ svcauth_unix_accept(struct svc_rqst *rqstp, __be32 *authp)
852 svc_putnl(resv, RPC_AUTH_NULL); 849 svc_putnl(resv, RPC_AUTH_NULL);
853 svc_putnl(resv, 0); 850 svc_putnl(resv, 0);
854 851
855 rqstp->rq_flavor = RPC_AUTH_UNIX; 852 rqstp->rq_cred.cr_flavor = RPC_AUTH_UNIX;
856 return SVC_OK; 853 return SVC_OK;
857 854
858badcred: 855badcred:
diff --git a/net/wanrouter/Kconfig b/net/wanrouter/Kconfig
index 61ceae0b9566..a157a2e64e18 100644
--- a/net/wanrouter/Kconfig
+++ b/net/wanrouter/Kconfig
@@ -3,7 +3,7 @@
3# 3#
4 4
5config WAN_ROUTER 5config WAN_ROUTER
6 tristate "WAN router" 6 tristate "WAN router (DEPRECATED)"
7 depends on EXPERIMENTAL 7 depends on EXPERIMENTAL
8 ---help--- 8 ---help---
9 Wide Area Networks (WANs), such as X.25, frame relay and leased 9 Wide Area Networks (WANs), such as X.25, frame relay and leased
diff --git a/net/wireless/ibss.c b/net/wireless/ibss.c
index d2a19b0ff71f..89baa3328411 100644
--- a/net/wireless/ibss.c
+++ b/net/wireless/ibss.c
@@ -42,6 +42,7 @@ void __cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid)
42 cfg80211_hold_bss(bss_from_pub(bss)); 42 cfg80211_hold_bss(bss_from_pub(bss));
43 wdev->current_bss = bss_from_pub(bss); 43 wdev->current_bss = bss_from_pub(bss);
44 44
45 wdev->sme_state = CFG80211_SME_CONNECTED;
45 cfg80211_upload_connect_keys(wdev); 46 cfg80211_upload_connect_keys(wdev);
46 47
47 nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid, 48 nl80211_send_ibss_bssid(wiphy_to_dev(wdev->wiphy), dev, bssid,
@@ -60,7 +61,7 @@ void cfg80211_ibss_joined(struct net_device *dev, const u8 *bssid, gfp_t gfp)
60 struct cfg80211_event *ev; 61 struct cfg80211_event *ev;
61 unsigned long flags; 62 unsigned long flags;
62 63
63 CFG80211_DEV_WARN_ON(!wdev->ssid_len); 64 CFG80211_DEV_WARN_ON(wdev->sme_state != CFG80211_SME_CONNECTING);
64 65
65 ev = kzalloc(sizeof(*ev), gfp); 66 ev = kzalloc(sizeof(*ev), gfp);
66 if (!ev) 67 if (!ev)
@@ -115,9 +116,11 @@ int __cfg80211_join_ibss(struct cfg80211_registered_device *rdev,
115#ifdef CONFIG_CFG80211_WEXT 116#ifdef CONFIG_CFG80211_WEXT
116 wdev->wext.ibss.channel = params->channel; 117 wdev->wext.ibss.channel = params->channel;
117#endif 118#endif
119 wdev->sme_state = CFG80211_SME_CONNECTING;
118 err = rdev->ops->join_ibss(&rdev->wiphy, dev, params); 120 err = rdev->ops->join_ibss(&rdev->wiphy, dev, params);
119 if (err) { 121 if (err) {
120 wdev->connect_keys = NULL; 122 wdev->connect_keys = NULL;
123 wdev->sme_state = CFG80211_SME_IDLE;
121 return err; 124 return err;
122 } 125 }
123 126
@@ -169,6 +172,7 @@ static void __cfg80211_clear_ibss(struct net_device *dev, bool nowext)
169 } 172 }
170 173
171 wdev->current_bss = NULL; 174 wdev->current_bss = NULL;
175 wdev->sme_state = CFG80211_SME_IDLE;
172 wdev->ssid_len = 0; 176 wdev->ssid_len = 0;
173#ifdef CONFIG_CFG80211_WEXT 177#ifdef CONFIG_CFG80211_WEXT
174 if (!nowext) 178 if (!nowext)
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 55d99466babb..8f2d68fc3a44 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -935,6 +935,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
935 enum nl80211_iftype iftype) 935 enum nl80211_iftype iftype)
936{ 936{
937 struct wireless_dev *wdev_iter; 937 struct wireless_dev *wdev_iter;
938 u32 used_iftypes = BIT(iftype);
938 int num[NUM_NL80211_IFTYPES]; 939 int num[NUM_NL80211_IFTYPES];
939 int total = 1; 940 int total = 1;
940 int i, j; 941 int i, j;
@@ -961,6 +962,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
961 962
962 num[wdev_iter->iftype]++; 963 num[wdev_iter->iftype]++;
963 total++; 964 total++;
965 used_iftypes |= BIT(wdev_iter->iftype);
964 } 966 }
965 mutex_unlock(&rdev->devlist_mtx); 967 mutex_unlock(&rdev->devlist_mtx);
966 968
@@ -970,6 +972,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
970 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) { 972 for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
971 const struct ieee80211_iface_combination *c; 973 const struct ieee80211_iface_combination *c;
972 struct ieee80211_iface_limit *limits; 974 struct ieee80211_iface_limit *limits;
975 u32 all_iftypes = 0;
973 976
974 c = &rdev->wiphy.iface_combinations[i]; 977 c = &rdev->wiphy.iface_combinations[i];
975 978
@@ -984,6 +987,7 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
984 if (rdev->wiphy.software_iftypes & BIT(iftype)) 987 if (rdev->wiphy.software_iftypes & BIT(iftype))
985 continue; 988 continue;
986 for (j = 0; j < c->n_limits; j++) { 989 for (j = 0; j < c->n_limits; j++) {
990 all_iftypes |= limits[j].types;
987 if (!(limits[j].types & BIT(iftype))) 991 if (!(limits[j].types & BIT(iftype)))
988 continue; 992 continue;
989 if (limits[j].max < num[iftype]) 993 if (limits[j].max < num[iftype])
@@ -991,7 +995,20 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
991 limits[j].max -= num[iftype]; 995 limits[j].max -= num[iftype];
992 } 996 }
993 } 997 }
994 /* yay, it fits */ 998
999 /*
1000 * Finally check that all iftypes that we're currently
1001 * using are actually part of this combination. If they
1002 * aren't then we can't use this combination and have
1003 * to continue to the next.
1004 */
1005 if ((all_iftypes & used_iftypes) != used_iftypes)
1006 goto cont;
1007
1008 /*
1009 * This combination covered all interface types and
1010 * supported the requested numbers, so we're good.
1011 */
995 kfree(limits); 1012 kfree(limits);
996 return 0; 1013 return 0;
997 cont: 1014 cont:
diff --git a/net/xfrm/xfrm_policy.c b/net/xfrm/xfrm_policy.c
index c53e8f42aa75..ccfbd328a69d 100644
--- a/net/xfrm/xfrm_policy.c
+++ b/net/xfrm/xfrm_policy.c
@@ -1921,6 +1921,9 @@ no_transform:
1921 } 1921 }
1922ok: 1922ok:
1923 xfrm_pols_put(pols, drop_pols); 1923 xfrm_pols_put(pols, drop_pols);
1924 if (dst && dst->xfrm &&
1925 dst->xfrm->props.mode == XFRM_MODE_TUNNEL)
1926 dst->flags |= DST_XFRM_TUNNEL;
1924 return dst; 1927 return dst;
1925 1928
1926nopol: 1929nopol:
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index faea0ec612bf..e5bd60ff48e3 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -2382,6 +2382,19 @@ sub process {
2382 } 2382 }
2383 } 2383 }
2384 2384
2385 if ($line =~ /\bprintk\s*\(\s*KERN_([A-Z]+)/) {
2386 my $orig = $1;
2387 my $level = lc($orig);
2388 $level = "warn" if ($level eq "warning");
2389 WARN("PREFER_PR_LEVEL",
2390 "Prefer pr_$level(... to printk(KERN_$1, ...\n" . $herecurr);
2391 }
2392
2393 if ($line =~ /\bpr_warning\s*\(/) {
2394 WARN("PREFER_PR_LEVEL",
2395 "Prefer pr_warn(... to pr_warning(...\n" . $herecurr);
2396 }
2397
2385# function brace can't be on same line, except for #defines of do while, 2398# function brace can't be on same line, except for #defines of do while,
2386# or if closed on same line 2399# or if closed on same line
2387 if (($line=~/$Type\s*$Ident\(.*\).*\s{/) and 2400 if (($line=~/$Type\s*$Ident\(.*\).*\s{/) and
@@ -2448,6 +2461,13 @@ sub process {
2448 "space prohibited between function name and open parenthesis '('\n" . $herecurr); 2461 "space prohibited between function name and open parenthesis '('\n" . $herecurr);
2449 } 2462 }
2450 } 2463 }
2464
2465# check for whitespace before a non-naked semicolon
2466 if ($line =~ /^\+.*\S\s+;/) {
2467 CHK("SPACING",
2468 "space prohibited before semicolon\n" . $herecurr);
2469 }
2470
2451# Check operator spacing. 2471# Check operator spacing.
2452 if (!($line=~/\#\s*include/)) { 2472 if (!($line=~/\#\s*include/)) {
2453 my $ops = qr{ 2473 my $ops = qr{
diff --git a/scripts/get_maintainer.pl b/scripts/get_maintainer.pl
index 0948c6b5a321..8b673dd4627f 100755
--- a/scripts/get_maintainer.pl
+++ b/scripts/get_maintainer.pl
@@ -83,6 +83,8 @@ push(@signature_tags, "Signed-off-by:");
83push(@signature_tags, "Reviewed-by:"); 83push(@signature_tags, "Reviewed-by:");
84push(@signature_tags, "Acked-by:"); 84push(@signature_tags, "Acked-by:");
85 85
86my $signature_pattern = "\(" . join("|", @signature_tags) . "\)";
87
86# rfc822 email address - preloaded methods go here. 88# rfc822 email address - preloaded methods go here.
87my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])"; 89my $rfc822_lwsp = "(?:(?:\\r\\n)?[ \\t])";
88my $rfc822_char = '[\\000-\\377]'; 90my $rfc822_char = '[\\000-\\377]';
@@ -473,7 +475,6 @@ my @subsystem = ();
473my @status = (); 475my @status = ();
474my %deduplicate_name_hash = (); 476my %deduplicate_name_hash = ();
475my %deduplicate_address_hash = (); 477my %deduplicate_address_hash = ();
476my $signature_pattern;
477 478
478my @maintainers = get_maintainers(); 479my @maintainers = get_maintainers();
479 480
diff --git a/security/apparmor/lsm.c b/security/apparmor/lsm.c
index 032daab449b0..8ea39aabe948 100644
--- a/security/apparmor/lsm.c
+++ b/security/apparmor/lsm.c
@@ -490,17 +490,9 @@ static int common_mmap(int op, struct file *file, unsigned long prot,
490 return common_file_perm(op, file, mask); 490 return common_file_perm(op, file, mask);
491} 491}
492 492
493static int apparmor_file_mmap(struct file *file, unsigned long reqprot, 493static int apparmor_mmap_file(struct file *file, unsigned long reqprot,
494 unsigned long prot, unsigned long flags, 494 unsigned long prot, unsigned long flags)
495 unsigned long addr, unsigned long addr_only)
496{ 495{
497 int rc = 0;
498
499 /* do DAC check */
500 rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
501 if (rc || addr_only)
502 return rc;
503
504 return common_mmap(OP_FMMAP, file, prot, flags); 496 return common_mmap(OP_FMMAP, file, prot, flags);
505} 497}
506 498
@@ -646,7 +638,8 @@ static struct security_operations apparmor_ops = {
646 .file_permission = apparmor_file_permission, 638 .file_permission = apparmor_file_permission,
647 .file_alloc_security = apparmor_file_alloc_security, 639 .file_alloc_security = apparmor_file_alloc_security,
648 .file_free_security = apparmor_file_free_security, 640 .file_free_security = apparmor_file_free_security,
649 .file_mmap = apparmor_file_mmap, 641 .mmap_file = apparmor_mmap_file,
642 .mmap_addr = cap_mmap_addr,
650 .file_mprotect = apparmor_file_mprotect, 643 .file_mprotect = apparmor_file_mprotect,
651 .file_lock = apparmor_file_lock, 644 .file_lock = apparmor_file_lock,
652 645
diff --git a/security/capability.c b/security/capability.c
index fca889676c5e..61095df8b89a 100644
--- a/security/capability.c
+++ b/security/capability.c
@@ -949,7 +949,8 @@ void __init security_fixup_ops(struct security_operations *ops)
949 set_to_cap_if_null(ops, file_alloc_security); 949 set_to_cap_if_null(ops, file_alloc_security);
950 set_to_cap_if_null(ops, file_free_security); 950 set_to_cap_if_null(ops, file_free_security);
951 set_to_cap_if_null(ops, file_ioctl); 951 set_to_cap_if_null(ops, file_ioctl);
952 set_to_cap_if_null(ops, file_mmap); 952 set_to_cap_if_null(ops, mmap_addr);
953 set_to_cap_if_null(ops, mmap_file);
953 set_to_cap_if_null(ops, file_mprotect); 954 set_to_cap_if_null(ops, file_mprotect);
954 set_to_cap_if_null(ops, file_lock); 955 set_to_cap_if_null(ops, file_lock);
955 set_to_cap_if_null(ops, file_fcntl); 956 set_to_cap_if_null(ops, file_fcntl);
diff --git a/security/commoncap.c b/security/commoncap.c
index e771cb1b2d79..6dbae4650abe 100644
--- a/security/commoncap.c
+++ b/security/commoncap.c
@@ -958,22 +958,15 @@ int cap_vm_enough_memory(struct mm_struct *mm, long pages)
958} 958}
959 959
960/* 960/*
961 * cap_file_mmap - check if able to map given addr 961 * cap_mmap_addr - check if able to map given addr
962 * @file: unused
963 * @reqprot: unused
964 * @prot: unused
965 * @flags: unused
966 * @addr: address attempting to be mapped 962 * @addr: address attempting to be mapped
967 * @addr_only: unused
968 * 963 *
969 * If the process is attempting to map memory below dac_mmap_min_addr they need 964 * If the process is attempting to map memory below dac_mmap_min_addr they need
970 * CAP_SYS_RAWIO. The other parameters to this function are unused by the 965 * CAP_SYS_RAWIO. The other parameters to this function are unused by the
971 * capability security module. Returns 0 if this mapping should be allowed 966 * capability security module. Returns 0 if this mapping should be allowed
972 * -EPERM if not. 967 * -EPERM if not.
973 */ 968 */
974int cap_file_mmap(struct file *file, unsigned long reqprot, 969int cap_mmap_addr(unsigned long addr)
975 unsigned long prot, unsigned long flags,
976 unsigned long addr, unsigned long addr_only)
977{ 970{
978 int ret = 0; 971 int ret = 0;
979 972
@@ -986,3 +979,9 @@ int cap_file_mmap(struct file *file, unsigned long reqprot,
986 } 979 }
987 return ret; 980 return ret;
988} 981}
982
983int cap_mmap_file(struct file *file, unsigned long reqprot,
984 unsigned long prot, unsigned long flags)
985{
986 return 0;
987}
diff --git a/security/keys/compat.c b/security/keys/compat.c
index fab4f8dda6c6..c92d42b021aa 100644
--- a/security/keys/compat.c
+++ b/security/keys/compat.c
@@ -38,7 +38,7 @@ long compat_keyctl_instantiate_key_iov(
38 38
39 ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc, 39 ret = compat_rw_copy_check_uvector(WRITE, _payload_iov, ioc,
40 ARRAY_SIZE(iovstack), 40 ARRAY_SIZE(iovstack),
41 iovstack, &iov, 1); 41 iovstack, &iov);
42 if (ret < 0) 42 if (ret < 0)
43 return ret; 43 return ret;
44 if (ret == 0) 44 if (ret == 0)
diff --git a/security/keys/internal.h b/security/keys/internal.h
index f711b094ed41..3dcbf86b0d31 100644
--- a/security/keys/internal.h
+++ b/security/keys/internal.h
@@ -14,6 +14,7 @@
14 14
15#include <linux/sched.h> 15#include <linux/sched.h>
16#include <linux/key-type.h> 16#include <linux/key-type.h>
17#include <linux/task_work.h>
17 18
18#ifdef __KDEBUG 19#ifdef __KDEBUG
19#define kenter(FMT, ...) \ 20#define kenter(FMT, ...) \
@@ -148,6 +149,7 @@ extern key_ref_t lookup_user_key(key_serial_t id, unsigned long flags,
148#define KEY_LOOKUP_FOR_UNLINK 0x04 149#define KEY_LOOKUP_FOR_UNLINK 0x04
149 150
150extern long join_session_keyring(const char *name); 151extern long join_session_keyring(const char *name);
152extern void key_change_session_keyring(struct task_work *twork);
151 153
152extern struct work_struct key_gc_work; 154extern struct work_struct key_gc_work;
153extern unsigned key_gc_delay; 155extern unsigned key_gc_delay;
diff --git a/security/keys/keyctl.c b/security/keys/keyctl.c
index ddb3e05bc5fc..0f5b3f027299 100644
--- a/security/keys/keyctl.c
+++ b/security/keys/keyctl.c
@@ -84,7 +84,7 @@ SYSCALL_DEFINE5(add_key, const char __user *, _type,
84 vm = false; 84 vm = false;
85 if (_payload) { 85 if (_payload) {
86 ret = -ENOMEM; 86 ret = -ENOMEM;
87 payload = kmalloc(plen, GFP_KERNEL); 87 payload = kmalloc(plen, GFP_KERNEL | __GFP_NOWARN);
88 if (!payload) { 88 if (!payload) {
89 if (plen <= PAGE_SIZE) 89 if (plen <= PAGE_SIZE)
90 goto error2; 90 goto error2;
@@ -1110,7 +1110,7 @@ long keyctl_instantiate_key_iov(key_serial_t id,
1110 goto no_payload; 1110 goto no_payload;
1111 1111
1112 ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc, 1112 ret = rw_copy_check_uvector(WRITE, _payload_iov, ioc,
1113 ARRAY_SIZE(iovstack), iovstack, &iov, 1); 1113 ARRAY_SIZE(iovstack), iovstack, &iov);
1114 if (ret < 0) 1114 if (ret < 0)
1115 return ret; 1115 return ret;
1116 if (ret == 0) 1116 if (ret == 0)
@@ -1454,50 +1454,57 @@ long keyctl_get_security(key_serial_t keyid,
1454 */ 1454 */
1455long keyctl_session_to_parent(void) 1455long keyctl_session_to_parent(void)
1456{ 1456{
1457#ifdef TIF_NOTIFY_RESUME
1458 struct task_struct *me, *parent; 1457 struct task_struct *me, *parent;
1459 const struct cred *mycred, *pcred; 1458 const struct cred *mycred, *pcred;
1460 struct cred *cred, *oldcred; 1459 struct task_work *newwork, *oldwork;
1461 key_ref_t keyring_r; 1460 key_ref_t keyring_r;
1461 struct cred *cred;
1462 int ret; 1462 int ret;
1463 1463
1464 keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_LINK); 1464 keyring_r = lookup_user_key(KEY_SPEC_SESSION_KEYRING, 0, KEY_LINK);
1465 if (IS_ERR(keyring_r)) 1465 if (IS_ERR(keyring_r))
1466 return PTR_ERR(keyring_r); 1466 return PTR_ERR(keyring_r);
1467 1467
1468 ret = -ENOMEM;
1469 newwork = kmalloc(sizeof(struct task_work), GFP_KERNEL);
1470 if (!newwork)
1471 goto error_keyring;
1472
1468 /* our parent is going to need a new cred struct, a new tgcred struct 1473 /* our parent is going to need a new cred struct, a new tgcred struct
1469 * and new security data, so we allocate them here to prevent ENOMEM in 1474 * and new security data, so we allocate them here to prevent ENOMEM in
1470 * our parent */ 1475 * our parent */
1471 ret = -ENOMEM;
1472 cred = cred_alloc_blank(); 1476 cred = cred_alloc_blank();
1473 if (!cred) 1477 if (!cred)
1474 goto error_keyring; 1478 goto error_newwork;
1475 1479
1476 cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r); 1480 cred->tgcred->session_keyring = key_ref_to_ptr(keyring_r);
1477 keyring_r = NULL; 1481 init_task_work(newwork, key_change_session_keyring, cred);
1478 1482
1479 me = current; 1483 me = current;
1480 rcu_read_lock(); 1484 rcu_read_lock();
1481 write_lock_irq(&tasklist_lock); 1485 write_lock_irq(&tasklist_lock);
1482 1486
1483 parent = me->real_parent;
1484 ret = -EPERM; 1487 ret = -EPERM;
1488 oldwork = NULL;
1489 parent = me->real_parent;
1485 1490
1486 /* the parent mustn't be init and mustn't be a kernel thread */ 1491 /* the parent mustn't be init and mustn't be a kernel thread */
1487 if (parent->pid <= 1 || !parent->mm) 1492 if (parent->pid <= 1 || !parent->mm)
1488 goto not_permitted; 1493 goto unlock;
1489 1494
1490 /* the parent must be single threaded */ 1495 /* the parent must be single threaded */
1491 if (!thread_group_empty(parent)) 1496 if (!thread_group_empty(parent))
1492 goto not_permitted; 1497 goto unlock;
1493 1498
1494 /* the parent and the child must have different session keyrings or 1499 /* the parent and the child must have different session keyrings or
1495 * there's no point */ 1500 * there's no point */
1496 mycred = current_cred(); 1501 mycred = current_cred();
1497 pcred = __task_cred(parent); 1502 pcred = __task_cred(parent);
1498 if (mycred == pcred || 1503 if (mycred == pcred ||
1499 mycred->tgcred->session_keyring == pcred->tgcred->session_keyring) 1504 mycred->tgcred->session_keyring == pcred->tgcred->session_keyring) {
1500 goto already_same; 1505 ret = 0;
1506 goto unlock;
1507 }
1501 1508
1502 /* the parent must have the same effective ownership and mustn't be 1509 /* the parent must have the same effective ownership and mustn't be
1503 * SUID/SGID */ 1510 * SUID/SGID */
@@ -1507,50 +1514,40 @@ long keyctl_session_to_parent(void)
1507 pcred->gid != mycred->egid || 1514 pcred->gid != mycred->egid ||
1508 pcred->egid != mycred->egid || 1515 pcred->egid != mycred->egid ||
1509 pcred->sgid != mycred->egid) 1516 pcred->sgid != mycred->egid)
1510 goto not_permitted; 1517 goto unlock;
1511 1518
1512 /* the keyrings must have the same UID */ 1519 /* the keyrings must have the same UID */
1513 if ((pcred->tgcred->session_keyring && 1520 if ((pcred->tgcred->session_keyring &&
1514 pcred->tgcred->session_keyring->uid != mycred->euid) || 1521 pcred->tgcred->session_keyring->uid != mycred->euid) ||
1515 mycred->tgcred->session_keyring->uid != mycred->euid) 1522 mycred->tgcred->session_keyring->uid != mycred->euid)
1516 goto not_permitted; 1523 goto unlock;
1517 1524
1518 /* if there's an already pending keyring replacement, then we replace 1525 /* cancel an already pending keyring replacement */
1519 * that */ 1526 oldwork = task_work_cancel(parent, key_change_session_keyring);
1520 oldcred = parent->replacement_session_keyring;
1521 1527
1522 /* the replacement session keyring is applied just prior to userspace 1528 /* the replacement session keyring is applied just prior to userspace
1523 * restarting */ 1529 * restarting */
1524 parent->replacement_session_keyring = cred; 1530 ret = task_work_add(parent, newwork, true);
1525 cred = NULL; 1531 if (!ret)
1526 set_ti_thread_flag(task_thread_info(parent), TIF_NOTIFY_RESUME); 1532 newwork = NULL;
1527 1533unlock:
1528 write_unlock_irq(&tasklist_lock);
1529 rcu_read_unlock();
1530 if (oldcred)
1531 put_cred(oldcred);
1532 return 0;
1533
1534already_same:
1535 ret = 0;
1536not_permitted:
1537 write_unlock_irq(&tasklist_lock); 1534 write_unlock_irq(&tasklist_lock);
1538 rcu_read_unlock(); 1535 rcu_read_unlock();
1539 put_cred(cred); 1536 if (oldwork) {
1537 put_cred(oldwork->data);
1538 kfree(oldwork);
1539 }
1540 if (newwork) {
1541 put_cred(newwork->data);
1542 kfree(newwork);
1543 }
1540 return ret; 1544 return ret;
1541 1545
1546error_newwork:
1547 kfree(newwork);
1542error_keyring: 1548error_keyring:
1543 key_ref_put(keyring_r); 1549 key_ref_put(keyring_r);
1544 return ret; 1550 return ret;
1545
1546#else /* !TIF_NOTIFY_RESUME */
1547 /*
1548 * To be removed when TIF_NOTIFY_RESUME has been implemented on
1549 * m68k/xtensa
1550 */
1551#warning TIF_NOTIFY_RESUME not implemented
1552 return -EOPNOTSUPP;
1553#endif /* !TIF_NOTIFY_RESUME */
1554} 1551}
1555 1552
1556/* 1553/*
diff --git a/security/keys/process_keys.c b/security/keys/process_keys.c
index d71056db7b67..4ad54eea1ea4 100644
--- a/security/keys/process_keys.c
+++ b/security/keys/process_keys.c
@@ -834,23 +834,17 @@ error:
834 * Replace a process's session keyring on behalf of one of its children when 834 * Replace a process's session keyring on behalf of one of its children when
835 * the target process is about to resume userspace execution. 835 * the target process is about to resume userspace execution.
836 */ 836 */
837void key_replace_session_keyring(void) 837void key_change_session_keyring(struct task_work *twork)
838{ 838{
839 const struct cred *old; 839 const struct cred *old = current_cred();
840 struct cred *new; 840 struct cred *new = twork->data;
841
842 if (!current->replacement_session_keyring)
843 return;
844 841
845 write_lock_irq(&tasklist_lock); 842 kfree(twork);
846 new = current->replacement_session_keyring; 843 if (unlikely(current->flags & PF_EXITING)) {
847 current->replacement_session_keyring = NULL; 844 put_cred(new);
848 write_unlock_irq(&tasklist_lock);
849
850 if (!new)
851 return; 845 return;
846 }
852 847
853 old = current_cred();
854 new-> uid = old-> uid; 848 new-> uid = old-> uid;
855 new-> euid = old-> euid; 849 new-> euid = old-> euid;
856 new-> suid = old-> suid; 850 new-> suid = old-> suid;
diff --git a/security/keys/request_key.c b/security/keys/request_key.c
index cc3790315d2f..000e75017520 100644
--- a/security/keys/request_key.c
+++ b/security/keys/request_key.c
@@ -93,16 +93,9 @@ static void umh_keys_cleanup(struct subprocess_info *info)
93static int call_usermodehelper_keys(char *path, char **argv, char **envp, 93static int call_usermodehelper_keys(char *path, char **argv, char **envp,
94 struct key *session_keyring, int wait) 94 struct key *session_keyring, int wait)
95{ 95{
96 gfp_t gfp_mask = (wait == UMH_NO_WAIT) ? GFP_ATOMIC : GFP_KERNEL; 96 return call_usermodehelper_fns(path, argv, envp, wait,
97 struct subprocess_info *info = 97 umh_keys_init, umh_keys_cleanup,
98 call_usermodehelper_setup(path, argv, envp, gfp_mask); 98 key_get(session_keyring));
99
100 if (!info)
101 return -ENOMEM;
102
103 call_usermodehelper_setfns(info, umh_keys_init, umh_keys_cleanup,
104 key_get(session_keyring));
105 return call_usermodehelper_exec(info, wait);
106} 99}
107 100
108/* 101/*
diff --git a/security/security.c b/security/security.c
index 5497a57fba01..3efc9b12aef4 100644
--- a/security/security.c
+++ b/security/security.c
@@ -20,6 +20,9 @@
20#include <linux/ima.h> 20#include <linux/ima.h>
21#include <linux/evm.h> 21#include <linux/evm.h>
22#include <linux/fsnotify.h> 22#include <linux/fsnotify.h>
23#include <linux/mman.h>
24#include <linux/mount.h>
25#include <linux/personality.h>
23#include <net/flow.h> 26#include <net/flow.h>
24 27
25#define MAX_LSM_EVM_XATTR 2 28#define MAX_LSM_EVM_XATTR 2
@@ -657,18 +660,56 @@ int security_file_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
657 return security_ops->file_ioctl(file, cmd, arg); 660 return security_ops->file_ioctl(file, cmd, arg);
658} 661}
659 662
660int security_file_mmap(struct file *file, unsigned long reqprot, 663static inline unsigned long mmap_prot(struct file *file, unsigned long prot)
661 unsigned long prot, unsigned long flags,
662 unsigned long addr, unsigned long addr_only)
663{ 664{
664 int ret; 665 /*
666 * Does we have PROT_READ and does the application expect
667 * it to imply PROT_EXEC? If not, nothing to talk about...
668 */
669 if ((prot & (PROT_READ | PROT_EXEC)) != PROT_READ)
670 return prot;
671 if (!(current->personality & READ_IMPLIES_EXEC))
672 return prot;
673 /*
674 * if that's an anonymous mapping, let it.
675 */
676 if (!file)
677 return prot | PROT_EXEC;
678 /*
679 * ditto if it's not on noexec mount, except that on !MMU we need
680 * BDI_CAP_EXEC_MMAP (== VM_MAYEXEC) in this case
681 */
682 if (!(file->f_path.mnt->mnt_flags & MNT_NOEXEC)) {
683#ifndef CONFIG_MMU
684 unsigned long caps = 0;
685 struct address_space *mapping = file->f_mapping;
686 if (mapping && mapping->backing_dev_info)
687 caps = mapping->backing_dev_info->capabilities;
688 if (!(caps & BDI_CAP_EXEC_MAP))
689 return prot;
690#endif
691 return prot | PROT_EXEC;
692 }
693 /* anything on noexec mount won't get PROT_EXEC */
694 return prot;
695}
665 696
666 ret = security_ops->file_mmap(file, reqprot, prot, flags, addr, addr_only); 697int security_mmap_file(struct file *file, unsigned long prot,
698 unsigned long flags)
699{
700 int ret;
701 ret = security_ops->mmap_file(file, prot,
702 mmap_prot(file, prot), flags);
667 if (ret) 703 if (ret)
668 return ret; 704 return ret;
669 return ima_file_mmap(file, prot); 705 return ima_file_mmap(file, prot);
670} 706}
671 707
708int security_mmap_addr(unsigned long addr)
709{
710 return security_ops->mmap_addr(addr);
711}
712
672int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot, 713int security_file_mprotect(struct vm_area_struct *vma, unsigned long reqprot,
673 unsigned long prot) 714 unsigned long prot)
674{ 715{
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index fa2341b68331..372ec6502aa8 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3083,9 +3083,7 @@ error:
3083 return rc; 3083 return rc;
3084} 3084}
3085 3085
3086static int selinux_file_mmap(struct file *file, unsigned long reqprot, 3086static int selinux_mmap_addr(unsigned long addr)
3087 unsigned long prot, unsigned long flags,
3088 unsigned long addr, unsigned long addr_only)
3089{ 3087{
3090 int rc = 0; 3088 int rc = 0;
3091 u32 sid = current_sid(); 3089 u32 sid = current_sid();
@@ -3104,10 +3102,12 @@ static int selinux_file_mmap(struct file *file, unsigned long reqprot,
3104 } 3102 }
3105 3103
3106 /* do DAC check on address space usage */ 3104 /* do DAC check on address space usage */
3107 rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only); 3105 return cap_mmap_addr(addr);
3108 if (rc || addr_only) 3106}
3109 return rc;
3110 3107
3108static int selinux_mmap_file(struct file *file, unsigned long reqprot,
3109 unsigned long prot, unsigned long flags)
3110{
3111 if (selinux_checkreqprot) 3111 if (selinux_checkreqprot)
3112 prot = reqprot; 3112 prot = reqprot;
3113 3113
@@ -5570,7 +5570,8 @@ static struct security_operations selinux_ops = {
5570 .file_alloc_security = selinux_file_alloc_security, 5570 .file_alloc_security = selinux_file_alloc_security,
5571 .file_free_security = selinux_file_free_security, 5571 .file_free_security = selinux_file_free_security,
5572 .file_ioctl = selinux_file_ioctl, 5572 .file_ioctl = selinux_file_ioctl,
5573 .file_mmap = selinux_file_mmap, 5573 .mmap_file = selinux_mmap_file,
5574 .mmap_addr = selinux_mmap_addr,
5574 .file_mprotect = selinux_file_mprotect, 5575 .file_mprotect = selinux_file_mprotect,
5575 .file_lock = selinux_file_lock, 5576 .file_lock = selinux_file_lock,
5576 .file_fcntl = selinux_file_fcntl, 5577 .file_fcntl = selinux_file_fcntl,
diff --git a/security/selinux/selinuxfs.c b/security/selinux/selinuxfs.c
index 4e93f9ef970b..3ad290251288 100644
--- a/security/selinux/selinuxfs.c
+++ b/security/selinux/selinuxfs.c
@@ -1259,12 +1259,8 @@ static int sel_make_bools(void)
1259 if (!inode) 1259 if (!inode)
1260 goto out; 1260 goto out;
1261 1261
1262 ret = -EINVAL;
1263 len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
1264 if (len < 0)
1265 goto out;
1266
1267 ret = -ENAMETOOLONG; 1262 ret = -ENAMETOOLONG;
1263 len = snprintf(page, PAGE_SIZE, "/%s/%s", BOOL_DIR_NAME, names[i]);
1268 if (len >= PAGE_SIZE) 1264 if (len >= PAGE_SIZE)
1269 goto out; 1265 goto out;
1270 1266
@@ -1557,19 +1553,10 @@ static inline u32 sel_ino_to_perm(unsigned long ino)
1557static ssize_t sel_read_class(struct file *file, char __user *buf, 1553static ssize_t sel_read_class(struct file *file, char __user *buf,
1558 size_t count, loff_t *ppos) 1554 size_t count, loff_t *ppos)
1559{ 1555{
1560 ssize_t rc, len;
1561 char *page;
1562 unsigned long ino = file->f_path.dentry->d_inode->i_ino; 1556 unsigned long ino = file->f_path.dentry->d_inode->i_ino;
1563 1557 char res[TMPBUFLEN];
1564 page = (char *)__get_free_page(GFP_KERNEL); 1558 ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_class(ino));
1565 if (!page) 1559 return simple_read_from_buffer(buf, count, ppos, res, len);
1566 return -ENOMEM;
1567
1568 len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_class(ino));
1569 rc = simple_read_from_buffer(buf, count, ppos, page, len);
1570 free_page((unsigned long)page);
1571
1572 return rc;
1573} 1560}
1574 1561
1575static const struct file_operations sel_class_ops = { 1562static const struct file_operations sel_class_ops = {
@@ -1580,19 +1567,10 @@ static const struct file_operations sel_class_ops = {
1580static ssize_t sel_read_perm(struct file *file, char __user *buf, 1567static ssize_t sel_read_perm(struct file *file, char __user *buf,
1581 size_t count, loff_t *ppos) 1568 size_t count, loff_t *ppos)
1582{ 1569{
1583 ssize_t rc, len;
1584 char *page;
1585 unsigned long ino = file->f_path.dentry->d_inode->i_ino; 1570 unsigned long ino = file->f_path.dentry->d_inode->i_ino;
1586 1571 char res[TMPBUFLEN];
1587 page = (char *)__get_free_page(GFP_KERNEL); 1572 ssize_t len = snprintf(res, sizeof(res), "%d", sel_ino_to_perm(ino));
1588 if (!page) 1573 return simple_read_from_buffer(buf, count, ppos, res, len);
1589 return -ENOMEM;
1590
1591 len = snprintf(page, PAGE_SIZE, "%d", sel_ino_to_perm(ino));
1592 rc = simple_read_from_buffer(buf, count, ppos, page, len);
1593 free_page((unsigned long)page);
1594
1595 return rc;
1596} 1574}
1597 1575
1598static const struct file_operations sel_perm_ops = { 1576static const struct file_operations sel_perm_ops = {
diff --git a/security/smack/smack_lsm.c b/security/smack/smack_lsm.c
index d583c0545808..ee0bb5735f35 100644
--- a/security/smack/smack_lsm.c
+++ b/security/smack/smack_lsm.c
@@ -1171,7 +1171,7 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
1171} 1171}
1172 1172
1173/** 1173/**
1174 * smack_file_mmap : 1174 * smack_mmap_file :
1175 * Check permissions for a mmap operation. The @file may be NULL, e.g. 1175 * Check permissions for a mmap operation. The @file may be NULL, e.g.
1176 * if mapping anonymous memory. 1176 * if mapping anonymous memory.
1177 * @file contains the file structure for file to map (may be NULL). 1177 * @file contains the file structure for file to map (may be NULL).
@@ -1180,10 +1180,9 @@ static int smack_file_fcntl(struct file *file, unsigned int cmd,
1180 * @flags contains the operational flags. 1180 * @flags contains the operational flags.
1181 * Return 0 if permission is granted. 1181 * Return 0 if permission is granted.
1182 */ 1182 */
1183static int smack_file_mmap(struct file *file, 1183static int smack_mmap_file(struct file *file,
1184 unsigned long reqprot, unsigned long prot, 1184 unsigned long reqprot, unsigned long prot,
1185 unsigned long flags, unsigned long addr, 1185 unsigned long flags)
1186 unsigned long addr_only)
1187{ 1186{
1188 struct smack_known *skp; 1187 struct smack_known *skp;
1189 struct smack_rule *srp; 1188 struct smack_rule *srp;
@@ -1198,11 +1197,6 @@ static int smack_file_mmap(struct file *file,
1198 int tmay; 1197 int tmay;
1199 int rc; 1198 int rc;
1200 1199
1201 /* do DAC check on address space usage */
1202 rc = cap_file_mmap(file, reqprot, prot, flags, addr, addr_only);
1203 if (rc || addr_only)
1204 return rc;
1205
1206 if (file == NULL || file->f_dentry == NULL) 1200 if (file == NULL || file->f_dentry == NULL)
1207 return 0; 1201 return 0;
1208 1202
@@ -3482,7 +3476,8 @@ struct security_operations smack_ops = {
3482 .file_ioctl = smack_file_ioctl, 3476 .file_ioctl = smack_file_ioctl,
3483 .file_lock = smack_file_lock, 3477 .file_lock = smack_file_lock,
3484 .file_fcntl = smack_file_fcntl, 3478 .file_fcntl = smack_file_fcntl,
3485 .file_mmap = smack_file_mmap, 3479 .mmap_file = smack_mmap_file,
3480 .mmap_addr = cap_mmap_addr,
3486 .file_set_fowner = smack_file_set_fowner, 3481 .file_set_fowner = smack_file_set_fowner,
3487 .file_send_sigiotask = smack_file_send_sigiotask, 3482 .file_send_sigiotask = smack_file_send_sigiotask,
3488 .file_receive = smack_file_receive, 3483 .file_receive = smack_file_receive,
diff --git a/sound/core/compress_offload.c b/sound/core/compress_offload.c
index a68aed7fce02..ec2118d0e27a 100644
--- a/sound/core/compress_offload.c
+++ b/sound/core/compress_offload.c
@@ -502,10 +502,8 @@ static int snd_compr_pause(struct snd_compr_stream *stream)
502 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING) 502 if (stream->runtime->state != SNDRV_PCM_STATE_RUNNING)
503 return -EPERM; 503 return -EPERM;
504 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH); 504 retval = stream->ops->trigger(stream, SNDRV_PCM_TRIGGER_PAUSE_PUSH);
505 if (!retval) { 505 if (!retval)
506 stream->runtime->state = SNDRV_PCM_STATE_PAUSED; 506 stream->runtime->state = SNDRV_PCM_STATE_PAUSED;
507 wake_up(&stream->runtime->sleep);
508 }
509 return retval; 507 return retval;
510} 508}
511 509
@@ -544,6 +542,10 @@ static int snd_compr_stop(struct snd_compr_stream *stream)
544 if (!retval) { 542 if (!retval) {
545 stream->runtime->state = SNDRV_PCM_STATE_SETUP; 543 stream->runtime->state = SNDRV_PCM_STATE_SETUP;
546 wake_up(&stream->runtime->sleep); 544 wake_up(&stream->runtime->sleep);
545 stream->runtime->hw_pointer = 0;
546 stream->runtime->app_pointer = 0;
547 stream->runtime->total_bytes_available = 0;
548 stream->runtime->total_bytes_transferred = 0;
547 } 549 }
548 return retval; 550 return retval;
549} 551}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index 41ca803a1fff..7504e62188d6 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -4393,20 +4393,19 @@ void snd_hda_update_power_acct(struct hda_codec *codec)
4393 codec->power_jiffies += delta; 4393 codec->power_jiffies += delta;
4394} 4394}
4395 4395
4396/** 4396/* Transition to powered up, if wait_power_down then wait for a pending
4397 * snd_hda_power_up - Power-up the codec 4397 * transition to D3 to complete. A pending D3 transition is indicated
4398 * @codec: HD-audio codec 4398 * with power_transition == -1. */
4399 * 4399static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down)
4400 * Increment the power-up counter and power up the hardware really when
4401 * not turned on yet.
4402 */
4403void snd_hda_power_up(struct hda_codec *codec)
4404{ 4400{
4405 struct hda_bus *bus = codec->bus; 4401 struct hda_bus *bus = codec->bus;
4406 4402
4407 spin_lock(&codec->power_lock); 4403 spin_lock(&codec->power_lock);
4408 codec->power_count++; 4404 codec->power_count++;
4409 if (codec->power_on || codec->power_transition > 0) { 4405 /* Return if power_on or transitioning to power_on, unless currently
4406 * powering down. */
4407 if ((codec->power_on || codec->power_transition > 0) &&
4408 !(wait_power_down && codec->power_transition < 0)) {
4410 spin_unlock(&codec->power_lock); 4409 spin_unlock(&codec->power_lock);
4411 return; 4410 return;
4412 } 4411 }
@@ -4430,8 +4429,37 @@ void snd_hda_power_up(struct hda_codec *codec)
4430 codec->power_transition = 0; 4429 codec->power_transition = 0;
4431 spin_unlock(&codec->power_lock); 4430 spin_unlock(&codec->power_lock);
4432} 4431}
4432
4433/**
4434 * snd_hda_power_up - Power-up the codec
4435 * @codec: HD-audio codec
4436 *
4437 * Increment the power-up counter and power up the hardware really when
4438 * not turned on yet.
4439 */
4440void snd_hda_power_up(struct hda_codec *codec)
4441{
4442 __snd_hda_power_up(codec, false);
4443}
4433EXPORT_SYMBOL_HDA(snd_hda_power_up); 4444EXPORT_SYMBOL_HDA(snd_hda_power_up);
4434 4445
4446/**
4447 * snd_hda_power_up_d3wait - Power-up the codec after waiting for any pending
4448 * D3 transition to complete. This differs from snd_hda_power_up() when
4449 * power_transition == -1. snd_hda_power_up sees this case as a nop,
4450 * snd_hda_power_up_d3wait waits for the D3 transition to complete then powers
4451 * back up.
4452 * @codec: HD-audio codec
4453 *
4454 * Cancel any power down operation hapenning on the work queue, then power up.
4455 */
4456void snd_hda_power_up_d3wait(struct hda_codec *codec)
4457{
4458 /* This will cancel and wait for pending power_work to complete. */
4459 __snd_hda_power_up(codec, true);
4460}
4461EXPORT_SYMBOL_HDA(snd_hda_power_up_d3wait);
4462
4435#define power_save(codec) \ 4463#define power_save(codec) \
4436 ((codec)->bus->power_save ? *(codec)->bus->power_save : 0) 4464 ((codec)->bus->power_save ? *(codec)->bus->power_save : 0)
4437 4465
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 4fc3960c8591..2fdaadbb4326 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -1056,10 +1056,12 @@ const char *snd_hda_get_jack_location(u32 cfg);
1056 */ 1056 */
1057#ifdef CONFIG_SND_HDA_POWER_SAVE 1057#ifdef CONFIG_SND_HDA_POWER_SAVE
1058void snd_hda_power_up(struct hda_codec *codec); 1058void snd_hda_power_up(struct hda_codec *codec);
1059void snd_hda_power_up_d3wait(struct hda_codec *codec);
1059void snd_hda_power_down(struct hda_codec *codec); 1060void snd_hda_power_down(struct hda_codec *codec);
1060void snd_hda_update_power_acct(struct hda_codec *codec); 1061void snd_hda_update_power_acct(struct hda_codec *codec);
1061#else 1062#else
1062static inline void snd_hda_power_up(struct hda_codec *codec) {} 1063static inline void snd_hda_power_up(struct hda_codec *codec) {}
1064static inline void snd_hda_power_up_d3wait(struct hda_codec *codec) {}
1063static inline void snd_hda_power_down(struct hda_codec *codec) {} 1065static inline void snd_hda_power_down(struct hda_codec *codec) {}
1064#endif 1066#endif
1065 1067
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 2b6392be451c..7757536b9d5f 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -1766,7 +1766,7 @@ static int azx_pcm_open(struct snd_pcm_substream *substream)
1766 buff_step); 1766 buff_step);
1767 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 1767 snd_pcm_hw_constraint_step(runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_BYTES,
1768 buff_step); 1768 buff_step);
1769 snd_hda_power_up(apcm->codec); 1769 snd_hda_power_up_d3wait(apcm->codec);
1770 err = hinfo->ops.open(hinfo, apcm->codec, substream); 1770 err = hinfo->ops.open(hinfo, apcm->codec, substream);
1771 if (err < 0) { 1771 if (err < 0) {
1772 azx_release_device(azx_dev); 1772 azx_release_device(azx_dev);
@@ -2484,9 +2484,9 @@ static void azx_notifier_unregister(struct azx *chip)
2484static int DELAYED_INIT_MARK azx_first_init(struct azx *chip); 2484static int DELAYED_INIT_MARK azx_first_init(struct azx *chip);
2485static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip); 2485static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip);
2486 2486
2487#ifdef SUPPORT_VGA_SWITCHEROO
2487static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci); 2488static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci);
2488 2489
2489#ifdef SUPPORT_VGA_SWITCHEROO
2490static void azx_vs_set_state(struct pci_dev *pci, 2490static void azx_vs_set_state(struct pci_dev *pci,
2491 enum vga_switcheroo_state state) 2491 enum vga_switcheroo_state state)
2492{ 2492{
@@ -2578,6 +2578,7 @@ static int __devinit register_vga_switcheroo(struct azx *chip)
2578#else 2578#else
2579#define init_vga_switcheroo(chip) /* NOP */ 2579#define init_vga_switcheroo(chip) /* NOP */
2580#define register_vga_switcheroo(chip) 0 2580#define register_vga_switcheroo(chip) 0
2581#define check_hdmi_disabled(pci) false
2581#endif /* SUPPORT_VGA_SWITCHER */ 2582#endif /* SUPPORT_VGA_SWITCHER */
2582 2583
2583/* 2584/*
@@ -2638,6 +2639,7 @@ static int azx_dev_free(struct snd_device *device)
2638 return azx_free(device->device_data); 2639 return azx_free(device->device_data);
2639} 2640}
2640 2641
2642#ifdef SUPPORT_VGA_SWITCHEROO
2641/* 2643/*
2642 * Check of disabled HDMI controller by vga-switcheroo 2644 * Check of disabled HDMI controller by vga-switcheroo
2643 */ 2645 */
@@ -2670,12 +2672,13 @@ static bool __devinit check_hdmi_disabled(struct pci_dev *pci)
2670 struct pci_dev *p = get_bound_vga(pci); 2672 struct pci_dev *p = get_bound_vga(pci);
2671 2673
2672 if (p) { 2674 if (p) {
2673 if (vga_default_device() && p != vga_default_device()) 2675 if (vga_switcheroo_get_client_state(p) == VGA_SWITCHEROO_OFF)
2674 vga_inactive = true; 2676 vga_inactive = true;
2675 pci_dev_put(p); 2677 pci_dev_put(p);
2676 } 2678 }
2677 return vga_inactive; 2679 return vga_inactive;
2678} 2680}
2681#endif /* SUPPORT_VGA_SWITCHEROO */
2679 2682
2680/* 2683/*
2681 * white/black-listing for position_fix 2684 * white/black-listing for position_fix
@@ -3351,6 +3354,11 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
3351 { PCI_DEVICE(0x6549, 0x1200), 3354 { PCI_DEVICE(0x6549, 0x1200),
3352 .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT }, 3355 .driver_data = AZX_DRIVER_TERA | AZX_DCAPS_NO_64BIT },
3353 /* Creative X-Fi (CA0110-IBG) */ 3356 /* Creative X-Fi (CA0110-IBG) */
3357 /* CTHDA chips */
3358 { PCI_DEVICE(0x1102, 0x0010),
3359 .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
3360 { PCI_DEVICE(0x1102, 0x0012),
3361 .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
3354#if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE) 3362#if !defined(CONFIG_SND_CTXFI) && !defined(CONFIG_SND_CTXFI_MODULE)
3355 /* the following entry conflicts with snd-ctxfi driver, 3363 /* the following entry conflicts with snd-ctxfi driver,
3356 * as ctxfi driver mutates from HD-audio to native mode with 3364 * as ctxfi driver mutates from HD-audio to native mode with
@@ -3367,11 +3375,6 @@ static DEFINE_PCI_DEVICE_TABLE(azx_ids) = {
3367 .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND | 3375 .driver_data = AZX_DRIVER_CTX | AZX_DCAPS_CTX_WORKAROUND |
3368 AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB }, 3376 AZX_DCAPS_RIRB_PRE_DELAY | AZX_DCAPS_POSFIX_LPIB },
3369#endif 3377#endif
3370 /* CTHDA chips */
3371 { PCI_DEVICE(0x1102, 0x0010),
3372 .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
3373 { PCI_DEVICE(0x1102, 0x0012),
3374 .driver_data = AZX_DRIVER_CTHDA | AZX_DCAPS_PRESET_CTHDA },
3375 /* Vortex86MX */ 3378 /* Vortex86MX */
3376 { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC }, 3379 { PCI_DEVICE(0x17f3, 0x3010), .driver_data = AZX_DRIVER_GENERIC },
3377 /* VMware HDAudio */ 3380 /* VMware HDAudio */
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index 3acb5824ad39..172370b3793b 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -4061,7 +4061,7 @@ static void cx_auto_init_digital(struct hda_codec *codec)
4061static int cx_auto_init(struct hda_codec *codec) 4061static int cx_auto_init(struct hda_codec *codec)
4062{ 4062{
4063 struct conexant_spec *spec = codec->spec; 4063 struct conexant_spec *spec = codec->spec;
4064 /*snd_hda_sequence_write(codec, cx_auto_init_verbs);*/ 4064 snd_hda_gen_apply_verbs(codec);
4065 cx_auto_init_output(codec); 4065 cx_auto_init_output(codec);
4066 cx_auto_init_input(codec); 4066 cx_auto_init_input(codec);
4067 cx_auto_init_digital(codec); 4067 cx_auto_init_digital(codec);
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 224410e8e9e7..f8f4906e498d 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -1896,6 +1896,7 @@ static int alc_init(struct hda_codec *codec)
1896 alc_fix_pll(codec); 1896 alc_fix_pll(codec);
1897 alc_auto_init_amp(codec, spec->init_amp); 1897 alc_auto_init_amp(codec, spec->init_amp);
1898 1898
1899 snd_hda_gen_apply_verbs(codec);
1899 alc_init_special_input_src(codec); 1900 alc_init_special_input_src(codec);
1900 alc_auto_init_std(codec); 1901 alc_auto_init_std(codec);
1901 1902
@@ -6439,6 +6440,7 @@ enum {
6439 ALC662_FIXUP_ASUS_MODE7, 6440 ALC662_FIXUP_ASUS_MODE7,
6440 ALC662_FIXUP_ASUS_MODE8, 6441 ALC662_FIXUP_ASUS_MODE8,
6441 ALC662_FIXUP_NO_JACK_DETECT, 6442 ALC662_FIXUP_NO_JACK_DETECT,
6443 ALC662_FIXUP_ZOTAC_Z68,
6442}; 6444};
6443 6445
6444static const struct alc_fixup alc662_fixups[] = { 6446static const struct alc_fixup alc662_fixups[] = {
@@ -6588,6 +6590,13 @@ static const struct alc_fixup alc662_fixups[] = {
6588 .type = ALC_FIXUP_FUNC, 6590 .type = ALC_FIXUP_FUNC,
6589 .v.func = alc_fixup_no_jack_detect, 6591 .v.func = alc_fixup_no_jack_detect,
6590 }, 6592 },
6593 [ALC662_FIXUP_ZOTAC_Z68] = {
6594 .type = ALC_FIXUP_PINS,
6595 .v.pins = (const struct alc_pincfg[]) {
6596 { 0x1b, 0x02214020 }, /* Front HP */
6597 { }
6598 }
6599 },
6591}; 6600};
6592 6601
6593static const struct snd_pci_quirk alc662_fixup_tbl[] = { 6602static const struct snd_pci_quirk alc662_fixup_tbl[] = {
@@ -6601,6 +6610,7 @@ static const struct snd_pci_quirk alc662_fixup_tbl[] = {
6601 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD), 6610 SND_PCI_QUIRK(0x144d, 0xc051, "Samsung R720", ALC662_FIXUP_IDEAPAD),
6602 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD), 6611 SND_PCI_QUIRK(0x17aa, 0x38af, "Lenovo Ideapad Y550P", ALC662_FIXUP_IDEAPAD),
6603 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD), 6612 SND_PCI_QUIRK(0x17aa, 0x3a0d, "Lenovo Ideapad Y550", ALC662_FIXUP_IDEAPAD),
6613 SND_PCI_QUIRK(0x19da, 0xa130, "Zotac Z68", ALC662_FIXUP_ZOTAC_Z68),
6604 SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T), 6614 SND_PCI_QUIRK(0x1b35, 0x2206, "CZC P10T", ALC662_FIXUP_CZC_P10T),
6605 6615
6606#if 0 6616#if 0
diff --git a/sound/pci/rme9652/hdspm.c b/sound/pci/rme9652/hdspm.c
index 0a5027b94714..b8ac8710f47f 100644
--- a/sound/pci/rme9652/hdspm.c
+++ b/sound/pci/rme9652/hdspm.c
@@ -1988,6 +1988,13 @@ static int hdspm_get_system_sample_rate(struct hdspm *hdspm)
1988 period = hdspm_read(hdspm, HDSPM_RD_PLL_FREQ); 1988 period = hdspm_read(hdspm, HDSPM_RD_PLL_FREQ);
1989 rate = hdspm_calc_dds_value(hdspm, period); 1989 rate = hdspm_calc_dds_value(hdspm, period);
1990 1990
1991 if (rate > 207000) {
1992 /* Unreasonable high sample rate as seen on PCI MADI cards.
1993 * Use the cached value instead.
1994 */
1995 rate = hdspm->system_sample_rate;
1996 }
1997
1991 return rate; 1998 return rate;
1992} 1999}
1993 2000
diff --git a/sound/soc/codecs/wm2000.c b/sound/soc/codecs/wm2000.c
index a75c3766aede..0418fa11e6bd 100644
--- a/sound/soc/codecs/wm2000.c
+++ b/sound/soc/codecs/wm2000.c
@@ -99,8 +99,9 @@ static void wm2000_reset(struct wm2000_priv *wm2000)
99} 99}
100 100
101static int wm2000_poll_bit(struct i2c_client *i2c, 101static int wm2000_poll_bit(struct i2c_client *i2c,
102 unsigned int reg, u8 mask, int timeout) 102 unsigned int reg, u8 mask)
103{ 103{
104 int timeout = 4000;
104 int val; 105 int val;
105 106
106 val = wm2000_read(i2c, reg); 107 val = wm2000_read(i2c, reg);
@@ -119,7 +120,7 @@ static int wm2000_poll_bit(struct i2c_client *i2c,
119static int wm2000_power_up(struct i2c_client *i2c, int analogue) 120static int wm2000_power_up(struct i2c_client *i2c, int analogue)
120{ 121{
121 struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); 122 struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
122 int ret, timeout; 123 int ret;
123 124
124 BUG_ON(wm2000->anc_mode != ANC_OFF); 125 BUG_ON(wm2000->anc_mode != ANC_OFF);
125 126
@@ -140,13 +141,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
140 141
141 /* Wait for ANC engine to become ready */ 142 /* Wait for ANC engine to become ready */
142 if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, 143 if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT,
143 WM2000_ANC_ENG_IDLE, 1)) { 144 WM2000_ANC_ENG_IDLE)) {
144 dev_err(&i2c->dev, "ANC engine failed to reset\n"); 145 dev_err(&i2c->dev, "ANC engine failed to reset\n");
145 return -ETIMEDOUT; 146 return -ETIMEDOUT;
146 } 147 }
147 148
148 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 149 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
149 WM2000_STATUS_BOOT_COMPLETE, 1)) { 150 WM2000_STATUS_BOOT_COMPLETE)) {
150 dev_err(&i2c->dev, "ANC engine failed to initialise\n"); 151 dev_err(&i2c->dev, "ANC engine failed to initialise\n");
151 return -ETIMEDOUT; 152 return -ETIMEDOUT;
152 } 153 }
@@ -173,16 +174,13 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
173 dev_dbg(&i2c->dev, "Download complete\n"); 174 dev_dbg(&i2c->dev, "Download complete\n");
174 175
175 if (analogue) { 176 if (analogue) {
176 timeout = 248; 177 wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4);
177 wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4);
178 178
179 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 179 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
180 WM2000_MODE_ANA_SEQ_INCLUDE | 180 WM2000_MODE_ANA_SEQ_INCLUDE |
181 WM2000_MODE_MOUSE_ENABLE | 181 WM2000_MODE_MOUSE_ENABLE |
182 WM2000_MODE_THERMAL_ENABLE); 182 WM2000_MODE_THERMAL_ENABLE);
183 } else { 183 } else {
184 timeout = 10;
185
186 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 184 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
187 WM2000_MODE_MOUSE_ENABLE | 185 WM2000_MODE_MOUSE_ENABLE |
188 WM2000_MODE_THERMAL_ENABLE); 186 WM2000_MODE_THERMAL_ENABLE);
@@ -201,9 +199,8 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
201 wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); 199 wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR);
202 200
203 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 201 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
204 WM2000_STATUS_MOUSE_ACTIVE, timeout)) { 202 WM2000_STATUS_MOUSE_ACTIVE)) {
205 dev_err(&i2c->dev, "Timed out waiting for device after %dms\n", 203 dev_err(&i2c->dev, "Timed out waiting for device\n");
206 timeout * 10);
207 return -ETIMEDOUT; 204 return -ETIMEDOUT;
208 } 205 }
209 206
@@ -218,28 +215,25 @@ static int wm2000_power_up(struct i2c_client *i2c, int analogue)
218static int wm2000_power_down(struct i2c_client *i2c, int analogue) 215static int wm2000_power_down(struct i2c_client *i2c, int analogue)
219{ 216{
220 struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); 217 struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
221 int timeout;
222 218
223 if (analogue) { 219 if (analogue) {
224 timeout = 248; 220 wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4);
225 wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4);
226 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 221 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
227 WM2000_MODE_ANA_SEQ_INCLUDE | 222 WM2000_MODE_ANA_SEQ_INCLUDE |
228 WM2000_MODE_POWER_DOWN); 223 WM2000_MODE_POWER_DOWN);
229 } else { 224 } else {
230 timeout = 10;
231 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 225 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
232 WM2000_MODE_POWER_DOWN); 226 WM2000_MODE_POWER_DOWN);
233 } 227 }
234 228
235 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 229 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
236 WM2000_STATUS_POWER_DOWN_COMPLETE, timeout)) { 230 WM2000_STATUS_POWER_DOWN_COMPLETE)) {
237 dev_err(&i2c->dev, "Timeout waiting for ANC power down\n"); 231 dev_err(&i2c->dev, "Timeout waiting for ANC power down\n");
238 return -ETIMEDOUT; 232 return -ETIMEDOUT;
239 } 233 }
240 234
241 if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, 235 if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT,
242 WM2000_ANC_ENG_IDLE, 1)) { 236 WM2000_ANC_ENG_IDLE)) {
243 dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); 237 dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n");
244 return -ETIMEDOUT; 238 return -ETIMEDOUT;
245 } 239 }
@@ -268,13 +262,13 @@ static int wm2000_enter_bypass(struct i2c_client *i2c, int analogue)
268 } 262 }
269 263
270 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 264 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
271 WM2000_STATUS_ANC_DISABLED, 10)) { 265 WM2000_STATUS_ANC_DISABLED)) {
272 dev_err(&i2c->dev, "Timeout waiting for ANC disable\n"); 266 dev_err(&i2c->dev, "Timeout waiting for ANC disable\n");
273 return -ETIMEDOUT; 267 return -ETIMEDOUT;
274 } 268 }
275 269
276 if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, 270 if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT,
277 WM2000_ANC_ENG_IDLE, 1)) { 271 WM2000_ANC_ENG_IDLE)) {
278 dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n"); 272 dev_err(&i2c->dev, "Timeout waiting for ANC engine idle\n");
279 return -ETIMEDOUT; 273 return -ETIMEDOUT;
280 } 274 }
@@ -311,7 +305,7 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue)
311 wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); 305 wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR);
312 306
313 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 307 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
314 WM2000_STATUS_MOUSE_ACTIVE, 10)) { 308 WM2000_STATUS_MOUSE_ACTIVE)) {
315 dev_err(&i2c->dev, "Timed out waiting for MOUSE\n"); 309 dev_err(&i2c->dev, "Timed out waiting for MOUSE\n");
316 return -ETIMEDOUT; 310 return -ETIMEDOUT;
317 } 311 }
@@ -325,38 +319,32 @@ static int wm2000_exit_bypass(struct i2c_client *i2c, int analogue)
325static int wm2000_enter_standby(struct i2c_client *i2c, int analogue) 319static int wm2000_enter_standby(struct i2c_client *i2c, int analogue)
326{ 320{
327 struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); 321 struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
328 int timeout;
329 322
330 BUG_ON(wm2000->anc_mode != ANC_ACTIVE); 323 BUG_ON(wm2000->anc_mode != ANC_ACTIVE);
331 324
332 if (analogue) { 325 if (analogue) {
333 timeout = 248; 326 wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, 248 / 4);
334 wm2000_write(i2c, WM2000_REG_ANA_VMID_PD_TIME, timeout / 4);
335 327
336 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 328 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
337 WM2000_MODE_ANA_SEQ_INCLUDE | 329 WM2000_MODE_ANA_SEQ_INCLUDE |
338 WM2000_MODE_THERMAL_ENABLE | 330 WM2000_MODE_THERMAL_ENABLE |
339 WM2000_MODE_STANDBY_ENTRY); 331 WM2000_MODE_STANDBY_ENTRY);
340 } else { 332 } else {
341 timeout = 10;
342
343 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 333 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
344 WM2000_MODE_THERMAL_ENABLE | 334 WM2000_MODE_THERMAL_ENABLE |
345 WM2000_MODE_STANDBY_ENTRY); 335 WM2000_MODE_STANDBY_ENTRY);
346 } 336 }
347 337
348 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 338 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
349 WM2000_STATUS_ANC_DISABLED, timeout)) { 339 WM2000_STATUS_ANC_DISABLED)) {
350 dev_err(&i2c->dev, 340 dev_err(&i2c->dev,
351 "Timed out waiting for ANC disable after 1ms\n"); 341 "Timed out waiting for ANC disable after 1ms\n");
352 return -ETIMEDOUT; 342 return -ETIMEDOUT;
353 } 343 }
354 344
355 if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE, 345 if (!wm2000_poll_bit(i2c, WM2000_REG_ANC_STAT, WM2000_ANC_ENG_IDLE)) {
356 1)) {
357 dev_err(&i2c->dev, 346 dev_err(&i2c->dev,
358 "Timed out waiting for standby after %dms\n", 347 "Timed out waiting for standby\n");
359 timeout * 10);
360 return -ETIMEDOUT; 348 return -ETIMEDOUT;
361 } 349 }
362 350
@@ -374,23 +362,19 @@ static int wm2000_enter_standby(struct i2c_client *i2c, int analogue)
374static int wm2000_exit_standby(struct i2c_client *i2c, int analogue) 362static int wm2000_exit_standby(struct i2c_client *i2c, int analogue)
375{ 363{
376 struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev); 364 struct wm2000_priv *wm2000 = dev_get_drvdata(&i2c->dev);
377 int timeout;
378 365
379 BUG_ON(wm2000->anc_mode != ANC_STANDBY); 366 BUG_ON(wm2000->anc_mode != ANC_STANDBY);
380 367
381 wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0); 368 wm2000_write(i2c, WM2000_REG_SYS_CTL1, 0);
382 369
383 if (analogue) { 370 if (analogue) {
384 timeout = 248; 371 wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, 248 / 4);
385 wm2000_write(i2c, WM2000_REG_ANA_VMID_PU_TIME, timeout / 4);
386 372
387 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 373 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
388 WM2000_MODE_ANA_SEQ_INCLUDE | 374 WM2000_MODE_ANA_SEQ_INCLUDE |
389 WM2000_MODE_THERMAL_ENABLE | 375 WM2000_MODE_THERMAL_ENABLE |
390 WM2000_MODE_MOUSE_ENABLE); 376 WM2000_MODE_MOUSE_ENABLE);
391 } else { 377 } else {
392 timeout = 10;
393
394 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL, 378 wm2000_write(i2c, WM2000_REG_SYS_MODE_CNTRL,
395 WM2000_MODE_THERMAL_ENABLE | 379 WM2000_MODE_THERMAL_ENABLE |
396 WM2000_MODE_MOUSE_ENABLE); 380 WM2000_MODE_MOUSE_ENABLE);
@@ -400,9 +384,8 @@ static int wm2000_exit_standby(struct i2c_client *i2c, int analogue)
400 wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR); 384 wm2000_write(i2c, WM2000_REG_SYS_CTL2, WM2000_ANC_INT_N_CLR);
401 385
402 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS, 386 if (!wm2000_poll_bit(i2c, WM2000_REG_SYS_STATUS,
403 WM2000_STATUS_MOUSE_ACTIVE, timeout)) { 387 WM2000_STATUS_MOUSE_ACTIVE)) {
404 dev_err(&i2c->dev, "Timed out waiting for MOUSE after %dms\n", 388 dev_err(&i2c->dev, "Timed out waiting for MOUSE\n");
405 timeout * 10);
406 return -ETIMEDOUT; 389 return -ETIMEDOUT;
407 } 390 }
408 391
diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c
index 65d525d74c54..812acd83fb48 100644
--- a/sound/soc/codecs/wm8904.c
+++ b/sound/soc/codecs/wm8904.c
@@ -1863,6 +1863,7 @@ static int wm8904_set_bias_level(struct snd_soc_codec *codec,
1863 return ret; 1863 return ret;
1864 } 1864 }
1865 1865
1866 regcache_cache_only(wm8904->regmap, false);
1866 regcache_sync(wm8904->regmap); 1867 regcache_sync(wm8904->regmap);
1867 1868
1868 /* Enable bias */ 1869 /* Enable bias */
@@ -1899,14 +1900,8 @@ static int wm8904_set_bias_level(struct snd_soc_codec *codec,
1899 snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0, 1900 snd_soc_update_bits(codec, WM8904_BIAS_CONTROL_0,
1900 WM8904_BIAS_ENA, 0); 1901 WM8904_BIAS_ENA, 0);
1901 1902
1902#ifdef CONFIG_REGULATOR 1903 regcache_cache_only(wm8904->regmap, true);
1903 /* Post 2.6.34 we will be able to get a callback when 1904 regcache_mark_dirty(wm8904->regmap);
1904 * the regulators are disabled which we can use but
1905 * for now just assume that the power will be cut if
1906 * the regulator API is in use.
1907 */
1908 codec->cache_sync = 1;
1909#endif
1910 1905
1911 regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies), 1906 regulator_bulk_disable(ARRAY_SIZE(wm8904->supplies),
1912 wm8904->supplies); 1907 wm8904->supplies);
@@ -2084,10 +2079,8 @@ static int wm8904_probe(struct snd_soc_codec *codec)
2084{ 2079{
2085 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec); 2080 struct wm8904_priv *wm8904 = snd_soc_codec_get_drvdata(codec);
2086 struct wm8904_pdata *pdata = wm8904->pdata; 2081 struct wm8904_pdata *pdata = wm8904->pdata;
2087 u16 *reg_cache = codec->reg_cache;
2088 int ret, i; 2082 int ret, i;
2089 2083
2090 codec->cache_sync = 1;
2091 codec->control_data = wm8904->regmap; 2084 codec->control_data = wm8904->regmap;
2092 2085
2093 switch (wm8904->devtype) { 2086 switch (wm8904->devtype) {
@@ -2150,6 +2143,7 @@ static int wm8904_probe(struct snd_soc_codec *codec)
2150 goto err_enable; 2143 goto err_enable;
2151 } 2144 }
2152 2145
2146 regcache_cache_only(wm8904->regmap, true);
2153 /* Change some default settings - latch VU and enable ZC */ 2147 /* Change some default settings - latch VU and enable ZC */
2154 snd_soc_update_bits(codec, WM8904_ADC_DIGITAL_VOLUME_LEFT, 2148 snd_soc_update_bits(codec, WM8904_ADC_DIGITAL_VOLUME_LEFT,
2155 WM8904_ADC_VU, WM8904_ADC_VU); 2149 WM8904_ADC_VU, WM8904_ADC_VU);
@@ -2180,14 +2174,18 @@ static int wm8904_probe(struct snd_soc_codec *codec)
2180 if (!pdata->gpio_cfg[i]) 2174 if (!pdata->gpio_cfg[i])
2181 continue; 2175 continue;
2182 2176
2183 reg_cache[WM8904_GPIO_CONTROL_1 + i] 2177 regmap_update_bits(wm8904->regmap,
2184 = pdata->gpio_cfg[i] & 0xffff; 2178 WM8904_GPIO_CONTROL_1 + i,
2179 0xffff,
2180 pdata->gpio_cfg[i]);
2185 } 2181 }
2186 2182
2187 /* Zero is the default value for these anyway */ 2183 /* Zero is the default value for these anyway */
2188 for (i = 0; i < WM8904_MIC_REGS; i++) 2184 for (i = 0; i < WM8904_MIC_REGS; i++)
2189 reg_cache[WM8904_MIC_BIAS_CONTROL_0 + i] 2185 regmap_update_bits(wm8904->regmap,
2190 = pdata->mic_cfg[i]; 2186 WM8904_MIC_BIAS_CONTROL_0 + i,
2187 0xffff,
2188 pdata->mic_cfg[i]);
2191 } 2189 }
2192 2190
2193 /* Set Class W by default - this will be managed by the Class 2191 /* Set Class W by default - this will be managed by the Class
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 993639d694ce..aa8c98b628da 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -46,6 +46,39 @@
46#define WM8994_NUM_DRC 3 46#define WM8994_NUM_DRC 3
47#define WM8994_NUM_EQ 3 47#define WM8994_NUM_EQ 3
48 48
49static struct {
50 unsigned int reg;
51 unsigned int mask;
52} wm8994_vu_bits[] = {
53 { WM8994_LEFT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
54 { WM8994_RIGHT_LINE_INPUT_1_2_VOLUME, WM8994_IN1_VU },
55 { WM8994_LEFT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU },
56 { WM8994_RIGHT_LINE_INPUT_3_4_VOLUME, WM8994_IN2_VU },
57 { WM8994_SPEAKER_VOLUME_LEFT, WM8994_SPKOUT_VU },
58 { WM8994_SPEAKER_VOLUME_RIGHT, WM8994_SPKOUT_VU },
59 { WM8994_LEFT_OUTPUT_VOLUME, WM8994_HPOUT1_VU },
60 { WM8994_RIGHT_OUTPUT_VOLUME, WM8994_HPOUT1_VU },
61 { WM8994_LEFT_OPGA_VOLUME, WM8994_MIXOUT_VU },
62 { WM8994_RIGHT_OPGA_VOLUME, WM8994_MIXOUT_VU },
63
64 { WM8994_AIF1_DAC1_LEFT_VOLUME, WM8994_AIF1DAC1_VU },
65 { WM8994_AIF1_DAC1_RIGHT_VOLUME, WM8994_AIF1DAC1_VU },
66 { WM8994_AIF1_DAC2_LEFT_VOLUME, WM8994_AIF1DAC2_VU },
67 { WM8994_AIF1_DAC2_RIGHT_VOLUME, WM8994_AIF1DAC2_VU },
68 { WM8994_AIF2_DAC_LEFT_VOLUME, WM8994_AIF2DAC_VU },
69 { WM8994_AIF2_DAC_RIGHT_VOLUME, WM8994_AIF2DAC_VU },
70 { WM8994_AIF1_ADC1_LEFT_VOLUME, WM8994_AIF1ADC1_VU },
71 { WM8994_AIF1_ADC1_RIGHT_VOLUME, WM8994_AIF1ADC1_VU },
72 { WM8994_AIF1_ADC2_LEFT_VOLUME, WM8994_AIF1ADC2_VU },
73 { WM8994_AIF1_ADC2_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
74 { WM8994_AIF2_ADC_LEFT_VOLUME, WM8994_AIF2ADC_VU },
75 { WM8994_AIF2_ADC_RIGHT_VOLUME, WM8994_AIF1ADC2_VU },
76 { WM8994_DAC1_LEFT_VOLUME, WM8994_DAC1_VU },
77 { WM8994_DAC1_RIGHT_VOLUME, WM8994_DAC1_VU },
78 { WM8994_DAC2_LEFT_VOLUME, WM8994_DAC2_VU },
79 { WM8994_DAC2_RIGHT_VOLUME, WM8994_DAC2_VU },
80};
81
49static int wm8994_drc_base[] = { 82static int wm8994_drc_base[] = {
50 WM8994_AIF1_DRC1_1, 83 WM8994_AIF1_DRC1_1,
51 WM8994_AIF1_DRC2_1, 84 WM8994_AIF1_DRC2_1,
@@ -989,6 +1022,7 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
989 struct snd_soc_codec *codec = w->codec; 1022 struct snd_soc_codec *codec = w->codec;
990 struct wm8994 *control = codec->control_data; 1023 struct wm8994 *control = codec->control_data;
991 int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA; 1024 int mask = WM8994_AIF1DAC1L_ENA | WM8994_AIF1DAC1R_ENA;
1025 int i;
992 int dac; 1026 int dac;
993 int adc; 1027 int adc;
994 int val; 1028 int val;
@@ -1047,6 +1081,13 @@ static int aif1clk_ev(struct snd_soc_dapm_widget *w,
1047 WM8994_AIF1DAC2L_ENA); 1081 WM8994_AIF1DAC2L_ENA);
1048 break; 1082 break;
1049 1083
1084 case SND_SOC_DAPM_POST_PMU:
1085 for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
1086 snd_soc_write(codec, wm8994_vu_bits[i].reg,
1087 snd_soc_read(codec,
1088 wm8994_vu_bits[i].reg));
1089 break;
1090
1050 case SND_SOC_DAPM_PRE_PMD: 1091 case SND_SOC_DAPM_PRE_PMD:
1051 case SND_SOC_DAPM_POST_PMD: 1092 case SND_SOC_DAPM_POST_PMD:
1052 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, 1093 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
@@ -1072,6 +1113,7 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
1072 struct snd_kcontrol *kcontrol, int event) 1113 struct snd_kcontrol *kcontrol, int event)
1073{ 1114{
1074 struct snd_soc_codec *codec = w->codec; 1115 struct snd_soc_codec *codec = w->codec;
1116 int i;
1075 int dac; 1117 int dac;
1076 int adc; 1118 int adc;
1077 int val; 1119 int val;
@@ -1122,6 +1164,13 @@ static int aif2clk_ev(struct snd_soc_dapm_widget *w,
1122 WM8994_AIF2DACR_ENA); 1164 WM8994_AIF2DACR_ENA);
1123 break; 1165 break;
1124 1166
1167 case SND_SOC_DAPM_POST_PMU:
1168 for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
1169 snd_soc_write(codec, wm8994_vu_bits[i].reg,
1170 snd_soc_read(codec,
1171 wm8994_vu_bits[i].reg));
1172 break;
1173
1125 case SND_SOC_DAPM_PRE_PMD: 1174 case SND_SOC_DAPM_PRE_PMD:
1126 case SND_SOC_DAPM_POST_PMD: 1175 case SND_SOC_DAPM_POST_PMD:
1127 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5, 1176 snd_soc_update_bits(codec, WM8994_POWER_MANAGEMENT_5,
@@ -1190,17 +1239,19 @@ static int late_enable_ev(struct snd_soc_dapm_widget *w,
1190 switch (event) { 1239 switch (event) {
1191 case SND_SOC_DAPM_PRE_PMU: 1240 case SND_SOC_DAPM_PRE_PMU:
1192 if (wm8994->aif1clk_enable) { 1241 if (wm8994->aif1clk_enable) {
1193 aif1clk_ev(w, kcontrol, event); 1242 aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU);
1194 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1243 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
1195 WM8994_AIF1CLK_ENA_MASK, 1244 WM8994_AIF1CLK_ENA_MASK,
1196 WM8994_AIF1CLK_ENA); 1245 WM8994_AIF1CLK_ENA);
1246 aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU);
1197 wm8994->aif1clk_enable = 0; 1247 wm8994->aif1clk_enable = 0;
1198 } 1248 }
1199 if (wm8994->aif2clk_enable) { 1249 if (wm8994->aif2clk_enable) {
1200 aif2clk_ev(w, kcontrol, event); 1250 aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMU);
1201 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1251 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
1202 WM8994_AIF2CLK_ENA_MASK, 1252 WM8994_AIF2CLK_ENA_MASK,
1203 WM8994_AIF2CLK_ENA); 1253 WM8994_AIF2CLK_ENA);
1254 aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMU);
1204 wm8994->aif2clk_enable = 0; 1255 wm8994->aif2clk_enable = 0;
1205 } 1256 }
1206 break; 1257 break;
@@ -1221,15 +1272,17 @@ static int late_disable_ev(struct snd_soc_dapm_widget *w,
1221 switch (event) { 1272 switch (event) {
1222 case SND_SOC_DAPM_POST_PMD: 1273 case SND_SOC_DAPM_POST_PMD:
1223 if (wm8994->aif1clk_disable) { 1274 if (wm8994->aif1clk_disable) {
1275 aif1clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD);
1224 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1, 1276 snd_soc_update_bits(codec, WM8994_AIF1_CLOCKING_1,
1225 WM8994_AIF1CLK_ENA_MASK, 0); 1277 WM8994_AIF1CLK_ENA_MASK, 0);
1226 aif1clk_ev(w, kcontrol, event); 1278 aif1clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD);
1227 wm8994->aif1clk_disable = 0; 1279 wm8994->aif1clk_disable = 0;
1228 } 1280 }
1229 if (wm8994->aif2clk_disable) { 1281 if (wm8994->aif2clk_disable) {
1282 aif2clk_ev(w, kcontrol, SND_SOC_DAPM_PRE_PMD);
1230 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1, 1283 snd_soc_update_bits(codec, WM8994_AIF2_CLOCKING_1,
1231 WM8994_AIF2CLK_ENA_MASK, 0); 1284 WM8994_AIF2CLK_ENA_MASK, 0);
1232 aif2clk_ev(w, kcontrol, event); 1285 aif2clk_ev(w, kcontrol, SND_SOC_DAPM_POST_PMD);
1233 wm8994->aif2clk_disable = 0; 1286 wm8994->aif2clk_disable = 0;
1234 } 1287 }
1235 break; 1288 break;
@@ -1527,9 +1580,11 @@ SND_SOC_DAPM_POST("Late Disable PGA", late_disable_ev)
1527 1580
1528static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = { 1581static const struct snd_soc_dapm_widget wm8994_lateclk_widgets[] = {
1529SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev, 1582SND_SOC_DAPM_SUPPLY("AIF1CLK", WM8994_AIF1_CLOCKING_1, 0, 0, aif1clk_ev,
1530 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), 1583 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
1584 SND_SOC_DAPM_PRE_PMD),
1531SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev, 1585SND_SOC_DAPM_SUPPLY("AIF2CLK", WM8994_AIF2_CLOCKING_1, 0, 0, aif2clk_ev,
1532 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_PRE_PMD), 1586 SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
1587 SND_SOC_DAPM_PRE_PMD),
1533SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0), 1588SND_SOC_DAPM_PGA("Direct Voice", SND_SOC_NOPM, 0, 0, NULL, 0),
1534SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0, 1589SND_SOC_DAPM_MIXER("SPKL", WM8994_POWER_MANAGEMENT_3, 8, 0,
1535 left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)), 1590 left_speaker_mixer, ARRAY_SIZE(left_speaker_mixer)),
@@ -3879,39 +3934,11 @@ static int wm8994_codec_probe(struct snd_soc_codec *codec)
3879 3934
3880 pm_runtime_put(codec->dev); 3935 pm_runtime_put(codec->dev);
3881 3936
3882 /* Latch volume updates (right only; we always do left then right). */ 3937 /* Latch volume update bits */
3883 snd_soc_update_bits(codec, WM8994_AIF1_DAC1_LEFT_VOLUME, 3938 for (i = 0; i < ARRAY_SIZE(wm8994_vu_bits); i++)
3884 WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); 3939 snd_soc_update_bits(codec, wm8994_vu_bits[i].reg,
3885 snd_soc_update_bits(codec, WM8994_AIF1_DAC1_RIGHT_VOLUME, 3940 wm8994_vu_bits[i].mask,
3886 WM8994_AIF1DAC1_VU, WM8994_AIF1DAC1_VU); 3941 wm8994_vu_bits[i].mask);
3887 snd_soc_update_bits(codec, WM8994_AIF1_DAC2_LEFT_VOLUME,
3888 WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
3889 snd_soc_update_bits(codec, WM8994_AIF1_DAC2_RIGHT_VOLUME,
3890 WM8994_AIF1DAC2_VU, WM8994_AIF1DAC2_VU);
3891 snd_soc_update_bits(codec, WM8994_AIF2_DAC_LEFT_VOLUME,
3892 WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
3893 snd_soc_update_bits(codec, WM8994_AIF2_DAC_RIGHT_VOLUME,
3894 WM8994_AIF2DAC_VU, WM8994_AIF2DAC_VU);
3895 snd_soc_update_bits(codec, WM8994_AIF1_ADC1_LEFT_VOLUME,
3896 WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
3897 snd_soc_update_bits(codec, WM8994_AIF1_ADC1_RIGHT_VOLUME,
3898 WM8994_AIF1ADC1_VU, WM8994_AIF1ADC1_VU);
3899 snd_soc_update_bits(codec, WM8994_AIF1_ADC2_LEFT_VOLUME,
3900 WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
3901 snd_soc_update_bits(codec, WM8994_AIF1_ADC2_RIGHT_VOLUME,
3902 WM8994_AIF1ADC2_VU, WM8994_AIF1ADC2_VU);
3903 snd_soc_update_bits(codec, WM8994_AIF2_ADC_LEFT_VOLUME,
3904 WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
3905 snd_soc_update_bits(codec, WM8994_AIF2_ADC_RIGHT_VOLUME,
3906 WM8994_AIF2ADC_VU, WM8994_AIF1ADC2_VU);
3907 snd_soc_update_bits(codec, WM8994_DAC1_LEFT_VOLUME,
3908 WM8994_DAC1_VU, WM8994_DAC1_VU);
3909 snd_soc_update_bits(codec, WM8994_DAC1_RIGHT_VOLUME,
3910 WM8994_DAC1_VU, WM8994_DAC1_VU);
3911 snd_soc_update_bits(codec, WM8994_DAC2_LEFT_VOLUME,
3912 WM8994_DAC2_VU, WM8994_DAC2_VU);
3913 snd_soc_update_bits(codec, WM8994_DAC2_RIGHT_VOLUME,
3914 WM8994_DAC2_VU, WM8994_DAC2_VU);
3915 3942
3916 /* Set the low bit of the 3D stereo depth so TLV matches */ 3943 /* Set the low bit of the 3D stereo depth so TLV matches */
3917 snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2, 3944 snd_soc_update_bits(codec, WM8994_AIF1_DAC1_FILTERS_2,
diff --git a/sound/soc/codecs/wm8996.c b/sound/soc/codecs/wm8996.c
index 8af422e38fd0..dc9b42b7fc4d 100644
--- a/sound/soc/codecs/wm8996.c
+++ b/sound/soc/codecs/wm8996.c
@@ -2837,8 +2837,6 @@ static int wm8996_probe(struct snd_soc_codec *codec)
2837 } 2837 }
2838 } 2838 }
2839 2839
2840 regcache_cache_only(codec->control_data, true);
2841
2842 /* Apply platform data settings */ 2840 /* Apply platform data settings */
2843 snd_soc_update_bits(codec, WM8996_LINE_INPUT_CONTROL, 2841 snd_soc_update_bits(codec, WM8996_LINE_INPUT_CONTROL,
2844 WM8996_INL_MODE_MASK | WM8996_INR_MODE_MASK, 2842 WM8996_INL_MODE_MASK | WM8996_INR_MODE_MASK,
@@ -3051,7 +3049,6 @@ static int wm8996_remove(struct snd_soc_codec *codec)
3051 for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++) 3049 for (i = 0; i < ARRAY_SIZE(wm8996->supplies); i++)
3052 regulator_unregister_notifier(wm8996->supplies[i].consumer, 3050 regulator_unregister_notifier(wm8996->supplies[i].consumer,
3053 &wm8996->disable_nb[i]); 3051 &wm8996->disable_nb[i]);
3054 regulator_bulk_free(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
3055 3052
3056 return 0; 3053 return 0;
3057} 3054}
@@ -3206,14 +3203,15 @@ static __devinit int wm8996_i2c_probe(struct i2c_client *i2c,
3206 dev_info(&i2c->dev, "revision %c\n", 3203 dev_info(&i2c->dev, "revision %c\n",
3207 (reg & WM8996_CHIP_REV_MASK) + 'A'); 3204 (reg & WM8996_CHIP_REV_MASK) + 'A');
3208 3205
3209 regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
3210
3211 ret = wm8996_reset(wm8996); 3206 ret = wm8996_reset(wm8996);
3212 if (ret < 0) { 3207 if (ret < 0) {
3213 dev_err(&i2c->dev, "Failed to issue reset\n"); 3208 dev_err(&i2c->dev, "Failed to issue reset\n");
3214 goto err_regmap; 3209 goto err_regmap;
3215 } 3210 }
3216 3211
3212 regcache_cache_only(wm8996->regmap, true);
3213 regulator_bulk_disable(ARRAY_SIZE(wm8996->supplies), wm8996->supplies);
3214
3217 wm8996_init_gpio(wm8996); 3215 wm8996_init_gpio(wm8996);
3218 3216
3219 ret = snd_soc_register_codec(&i2c->dev, 3217 ret = snd_soc_register_codec(&i2c->dev,
diff --git a/sound/soc/fsl/imx-audmux.c b/sound/soc/fsl/imx-audmux.c
index f23700359c67..080327414c6b 100644
--- a/sound/soc/fsl/imx-audmux.c
+++ b/sound/soc/fsl/imx-audmux.c
@@ -26,6 +26,7 @@
26#include <linux/of_device.h> 26#include <linux/of_device.h>
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/pinctrl/consumer.h>
29 30
30#include "imx-audmux.h" 31#include "imx-audmux.h"
31 32
@@ -249,6 +250,7 @@ EXPORT_SYMBOL_GPL(imx_audmux_v2_configure_port);
249static int __devinit imx_audmux_probe(struct platform_device *pdev) 250static int __devinit imx_audmux_probe(struct platform_device *pdev)
250{ 251{
251 struct resource *res; 252 struct resource *res;
253 struct pinctrl *pinctrl;
252 const struct of_device_id *of_id = 254 const struct of_device_id *of_id =
253 of_match_device(imx_audmux_dt_ids, &pdev->dev); 255 of_match_device(imx_audmux_dt_ids, &pdev->dev);
254 256
@@ -257,6 +259,12 @@ static int __devinit imx_audmux_probe(struct platform_device *pdev)
257 if (!audmux_base) 259 if (!audmux_base)
258 return -EADDRNOTAVAIL; 260 return -EADDRNOTAVAIL;
259 261
262 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
263 if (IS_ERR(pinctrl)) {
264 dev_err(&pdev->dev, "setup pinctrl failed!");
265 return PTR_ERR(pinctrl);
266 }
267
260 audmux_clk = clk_get(&pdev->dev, "audmux"); 268 audmux_clk = clk_get(&pdev->dev, "audmux");
261 if (IS_ERR(audmux_clk)) { 269 if (IS_ERR(audmux_clk)) {
262 dev_dbg(&pdev->dev, "cannot get clock: %ld\n", 270 dev_dbg(&pdev->dev, "cannot get clock: %ld\n",
diff --git a/sound/soc/fsl/imx-ssi.c b/sound/soc/fsl/imx-ssi.c
index cf3ed0362c9c..28dd76c7cb1c 100644
--- a/sound/soc/fsl/imx-ssi.c
+++ b/sound/soc/fsl/imx-ssi.c
@@ -543,7 +543,7 @@ static int imx_ssi_probe(struct platform_device *pdev)
543 ret); 543 ret);
544 goto failed_clk; 544 goto failed_clk;
545 } 545 }
546 clk_enable(ssi->clk); 546 clk_prepare_enable(ssi->clk);
547 547
548 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 548 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
549 if (!res) { 549 if (!res) {
@@ -641,7 +641,7 @@ failed_ac97:
641failed_ioremap: 641failed_ioremap:
642 release_mem_region(res->start, resource_size(res)); 642 release_mem_region(res->start, resource_size(res));
643failed_get_resource: 643failed_get_resource:
644 clk_disable(ssi->clk); 644 clk_disable_unprepare(ssi->clk);
645 clk_put(ssi->clk); 645 clk_put(ssi->clk);
646failed_clk: 646failed_clk:
647 kfree(ssi); 647 kfree(ssi);
@@ -664,7 +664,7 @@ static int __devexit imx_ssi_remove(struct platform_device *pdev)
664 664
665 iounmap(ssi->base); 665 iounmap(ssi->base);
666 release_mem_region(res->start, resource_size(res)); 666 release_mem_region(res->start, resource_size(res));
667 clk_disable(ssi->clk); 667 clk_disable_unprepare(ssi->clk);
668 clk_put(ssi->clk); 668 clk_put(ssi->clk);
669 kfree(ssi); 669 kfree(ssi);
670 670
diff --git a/sound/soc/pxa/pxa-ssp.c b/sound/soc/pxa/pxa-ssp.c
index 1c2aa7fab3fd..4da5fc55c7ee 100644
--- a/sound/soc/pxa/pxa-ssp.c
+++ b/sound/soc/pxa/pxa-ssp.c
@@ -33,7 +33,6 @@
33 33
34#include <mach/hardware.h> 34#include <mach/hardware.h>
35#include <mach/dma.h> 35#include <mach/dma.h>
36#include <mach/audio.h>
37 36
38#include "../../arm/pxa2xx-pcm.h" 37#include "../../arm/pxa2xx-pcm.h"
39#include "pxa-ssp.h" 38#include "pxa-ssp.h"
@@ -194,7 +193,7 @@ static void pxa_ssp_set_scr(struct ssp_device *ssp, u32 div)
194{ 193{
195 u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0); 194 u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
196 195
197 if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) { 196 if (ssp->type == PXA25x_SSP) {
198 sscr0 &= ~0x0000ff00; 197 sscr0 &= ~0x0000ff00;
199 sscr0 |= ((div - 2)/2) << 8; /* 2..512 */ 198 sscr0 |= ((div - 2)/2) << 8; /* 2..512 */
200 } else { 199 } else {
@@ -212,7 +211,7 @@ static u32 pxa_ssp_get_scr(struct ssp_device *ssp)
212 u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0); 211 u32 sscr0 = pxa_ssp_read_reg(ssp, SSCR0);
213 u32 div; 212 u32 div;
214 213
215 if (cpu_is_pxa25x() && ssp->type == PXA25x_SSP) 214 if (ssp->type == PXA25x_SSP)
216 div = ((sscr0 >> 8) & 0xff) * 2 + 2; 215 div = ((sscr0 >> 8) & 0xff) * 2 + 2;
217 else 216 else
218 div = ((sscr0 >> 8) & 0xfff) + 1; 217 div = ((sscr0 >> 8) & 0xfff) + 1;
@@ -242,7 +241,7 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
242 break; 241 break;
243 case PXA_SSP_CLK_PLL: 242 case PXA_SSP_CLK_PLL:
244 /* Internal PLL is fixed */ 243 /* Internal PLL is fixed */
245 if (cpu_is_pxa25x()) 244 if (ssp->type == PXA25x_SSP)
246 priv->sysclk = 1843200; 245 priv->sysclk = 1843200;
247 else 246 else
248 priv->sysclk = 13000000; 247 priv->sysclk = 13000000;
@@ -266,11 +265,11 @@ static int pxa_ssp_set_dai_sysclk(struct snd_soc_dai *cpu_dai,
266 265
267 /* The SSP clock must be disabled when changing SSP clock mode 266 /* The SSP clock must be disabled when changing SSP clock mode
268 * on PXA2xx. On PXA3xx it must be enabled when doing so. */ 267 * on PXA2xx. On PXA3xx it must be enabled when doing so. */
269 if (!cpu_is_pxa3xx()) 268 if (ssp->type != PXA3xx_SSP)
270 clk_disable(ssp->clk); 269 clk_disable(ssp->clk);
271 val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0; 270 val = pxa_ssp_read_reg(ssp, SSCR0) | sscr0;
272 pxa_ssp_write_reg(ssp, SSCR0, val); 271 pxa_ssp_write_reg(ssp, SSCR0, val);
273 if (!cpu_is_pxa3xx()) 272 if (ssp->type != PXA3xx_SSP)
274 clk_enable(ssp->clk); 273 clk_enable(ssp->clk);
275 274
276 return 0; 275 return 0;
@@ -294,24 +293,20 @@ static int pxa_ssp_set_dai_clkdiv(struct snd_soc_dai *cpu_dai,
294 case PXA_SSP_AUDIO_DIV_SCDB: 293 case PXA_SSP_AUDIO_DIV_SCDB:
295 val = pxa_ssp_read_reg(ssp, SSACD); 294 val = pxa_ssp_read_reg(ssp, SSACD);
296 val &= ~SSACD_SCDB; 295 val &= ~SSACD_SCDB;
297#if defined(CONFIG_PXA3xx) 296 if (ssp->type == PXA3xx_SSP)
298 if (cpu_is_pxa3xx())
299 val &= ~SSACD_SCDX8; 297 val &= ~SSACD_SCDX8;
300#endif
301 switch (div) { 298 switch (div) {
302 case PXA_SSP_CLK_SCDB_1: 299 case PXA_SSP_CLK_SCDB_1:
303 val |= SSACD_SCDB; 300 val |= SSACD_SCDB;
304 break; 301 break;
305 case PXA_SSP_CLK_SCDB_4: 302 case PXA_SSP_CLK_SCDB_4:
306 break; 303 break;
307#if defined(CONFIG_PXA3xx)
308 case PXA_SSP_CLK_SCDB_8: 304 case PXA_SSP_CLK_SCDB_8:
309 if (cpu_is_pxa3xx()) 305 if (ssp->type == PXA3xx_SSP)
310 val |= SSACD_SCDX8; 306 val |= SSACD_SCDX8;
311 else 307 else
312 return -EINVAL; 308 return -EINVAL;
313 break; 309 break;
314#endif
315 default: 310 default:
316 return -EINVAL; 311 return -EINVAL;
317 } 312 }
@@ -337,10 +332,8 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
337 struct ssp_device *ssp = priv->ssp; 332 struct ssp_device *ssp = priv->ssp;
338 u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70; 333 u32 ssacd = pxa_ssp_read_reg(ssp, SSACD) & ~0x70;
339 334
340#if defined(CONFIG_PXA3xx) 335 if (ssp->type == PXA3xx_SSP)
341 if (cpu_is_pxa3xx())
342 pxa_ssp_write_reg(ssp, SSACDD, 0); 336 pxa_ssp_write_reg(ssp, SSACDD, 0);
343#endif
344 337
345 switch (freq_out) { 338 switch (freq_out) {
346 case 5622000: 339 case 5622000:
@@ -365,11 +358,10 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
365 break; 358 break;
366 359
367 default: 360 default:
368#ifdef CONFIG_PXA3xx
369 /* PXA3xx has a clock ditherer which can be used to generate 361 /* PXA3xx has a clock ditherer which can be used to generate
370 * a wider range of frequencies - calculate a value for it. 362 * a wider range of frequencies - calculate a value for it.
371 */ 363 */
372 if (cpu_is_pxa3xx()) { 364 if (ssp->type == PXA3xx_SSP) {
373 u32 val; 365 u32 val;
374 u64 tmp = 19968; 366 u64 tmp = 19968;
375 tmp *= 1000000; 367 tmp *= 1000000;
@@ -386,7 +378,6 @@ static int pxa_ssp_set_dai_pll(struct snd_soc_dai *cpu_dai, int pll_id,
386 val, freq_out); 378 val, freq_out);
387 break; 379 break;
388 } 380 }
389#endif
390 381
391 return -EINVAL; 382 return -EINVAL;
392 } 383 }
@@ -590,10 +581,8 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
590 /* bit size */ 581 /* bit size */
591 switch (params_format(params)) { 582 switch (params_format(params)) {
592 case SNDRV_PCM_FORMAT_S16_LE: 583 case SNDRV_PCM_FORMAT_S16_LE:
593#ifdef CONFIG_PXA3xx 584 if (ssp->type == PXA3xx_SSP)
594 if (cpu_is_pxa3xx())
595 sscr0 |= SSCR0_FPCKE; 585 sscr0 |= SSCR0_FPCKE;
596#endif
597 sscr0 |= SSCR0_DataSize(16); 586 sscr0 |= SSCR0_DataSize(16);
598 break; 587 break;
599 case SNDRV_PCM_FORMAT_S24_LE: 588 case SNDRV_PCM_FORMAT_S24_LE:
@@ -618,9 +607,7 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
618 * trying and failing a lot; some of the registers 607 * trying and failing a lot; some of the registers
619 * needed for that mode are only available on PXA3xx. 608 * needed for that mode are only available on PXA3xx.
620 */ 609 */
621 610 if (ssp->type != PXA3xx_SSP)
622#ifdef CONFIG_PXA3xx
623 if (!cpu_is_pxa3xx())
624 return -EINVAL; 611 return -EINVAL;
625 612
626 sspsp |= SSPSP_SFRMWDTH(width * 2); 613 sspsp |= SSPSP_SFRMWDTH(width * 2);
@@ -628,9 +615,6 @@ static int pxa_ssp_hw_params(struct snd_pcm_substream *substream,
628 sspsp |= SSPSP_EDMYSTOP(3); 615 sspsp |= SSPSP_EDMYSTOP(3);
629 sspsp |= SSPSP_DMYSTOP(3); 616 sspsp |= SSPSP_DMYSTOP(3);
630 sspsp |= SSPSP_DMYSTRT(1); 617 sspsp |= SSPSP_DMYSTRT(1);
631#else
632 return -EINVAL;
633#endif
634 } else { 618 } else {
635 /* The frame width is the width the LRCLK is 619 /* The frame width is the width the LRCLK is
636 * asserted for; the delay is expressed in 620 * asserted for; the delay is expressed in
diff --git a/sound/soc/sh/fsi.c b/sound/soc/sh/fsi.c
index 7cee22515d9d..2ef98536f1da 100644
--- a/sound/soc/sh/fsi.c
+++ b/sound/soc/sh/fsi.c
@@ -1052,6 +1052,13 @@ static int fsi_dma_quit(struct fsi_priv *fsi, struct fsi_stream *io)
1052 return 0; 1052 return 0;
1053} 1053}
1054 1054
1055static dma_addr_t fsi_dma_get_area(struct fsi_stream *io)
1056{
1057 struct snd_pcm_runtime *runtime = io->substream->runtime;
1058
1059 return io->dma + samples_to_bytes(runtime, io->buff_sample_pos);
1060}
1061
1055static void fsi_dma_complete(void *data) 1062static void fsi_dma_complete(void *data)
1056{ 1063{
1057 struct fsi_stream *io = (struct fsi_stream *)data; 1064 struct fsi_stream *io = (struct fsi_stream *)data;
@@ -1061,7 +1068,7 @@ static void fsi_dma_complete(void *data)
1061 enum dma_data_direction dir = fsi_stream_is_play(fsi, io) ? 1068 enum dma_data_direction dir = fsi_stream_is_play(fsi, io) ?
1062 DMA_TO_DEVICE : DMA_FROM_DEVICE; 1069 DMA_TO_DEVICE : DMA_FROM_DEVICE;
1063 1070
1064 dma_sync_single_for_cpu(dai->dev, io->dma, 1071 dma_sync_single_for_cpu(dai->dev, fsi_dma_get_area(io),
1065 samples_to_bytes(runtime, io->period_samples), dir); 1072 samples_to_bytes(runtime, io->period_samples), dir);
1066 1073
1067 io->buff_sample_pos += io->period_samples; 1074 io->buff_sample_pos += io->period_samples;
@@ -1078,13 +1085,6 @@ static void fsi_dma_complete(void *data)
1078 snd_pcm_period_elapsed(io->substream); 1085 snd_pcm_period_elapsed(io->substream);
1079} 1086}
1080 1087
1081static dma_addr_t fsi_dma_get_area(struct fsi_stream *io)
1082{
1083 struct snd_pcm_runtime *runtime = io->substream->runtime;
1084
1085 return io->dma + samples_to_bytes(runtime, io->buff_sample_pos);
1086}
1087
1088static void fsi_dma_do_tasklet(unsigned long data) 1088static void fsi_dma_do_tasklet(unsigned long data)
1089{ 1089{
1090 struct fsi_stream *io = (struct fsi_stream *)data; 1090 struct fsi_stream *io = (struct fsi_stream *)data;
@@ -1110,7 +1110,7 @@ static void fsi_dma_do_tasklet(unsigned long data)
1110 len = samples_to_bytes(runtime, io->period_samples); 1110 len = samples_to_bytes(runtime, io->period_samples);
1111 buf = fsi_dma_get_area(io); 1111 buf = fsi_dma_get_area(io);
1112 1112
1113 dma_sync_single_for_device(dai->dev, io->dma, len, dir); 1113 dma_sync_single_for_device(dai->dev, buf, len, dir);
1114 1114
1115 sg_init_table(&sg, 1); 1115 sg_init_table(&sg, 1);
1116 sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)), 1116 sg_set_page(&sg, pfn_to_page(PFN_DOWN(buf)),
@@ -1172,9 +1172,16 @@ static int fsi_dma_transfer(struct fsi_priv *fsi, struct fsi_stream *io)
1172static void fsi_dma_push_start_stop(struct fsi_priv *fsi, struct fsi_stream *io, 1172static void fsi_dma_push_start_stop(struct fsi_priv *fsi, struct fsi_stream *io,
1173 int start) 1173 int start)
1174{ 1174{
1175 struct fsi_master *master = fsi_get_master(fsi);
1176 u32 clk = fsi_is_port_a(fsi) ? CRA : CRB;
1175 u32 enable = start ? DMA_ON : 0; 1177 u32 enable = start ? DMA_ON : 0;
1176 1178
1177 fsi_reg_mask_set(fsi, OUT_DMAC, DMA_ON, enable); 1179 fsi_reg_mask_set(fsi, OUT_DMAC, DMA_ON, enable);
1180
1181 dmaengine_terminate_all(io->chan);
1182
1183 if (fsi_is_clk_master(fsi))
1184 fsi_master_mask_set(master, CLK_RST, clk, (enable) ? clk : 0);
1178} 1185}
1179 1186
1180static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io) 1187static int fsi_dma_probe(struct fsi_priv *fsi, struct fsi_stream *io)
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 90ee77d2409d..89eae93445cf 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -913,7 +913,7 @@ static int is_connected_input_ep(struct snd_soc_dapm_widget *widget,
913 /* do we need to add this widget to the list ? */ 913 /* do we need to add this widget to the list ? */
914 if (list) { 914 if (list) {
915 int err; 915 int err;
916 err = dapm_list_add_widget(list, path->sink); 916 err = dapm_list_add_widget(list, path->source);
917 if (err < 0) { 917 if (err < 0) {
918 dev_err(widget->dapm->dev, "could not add widget %s\n", 918 dev_err(widget->dapm->dev, "could not add widget %s\n",
919 widget->name); 919 widget->name);
@@ -954,7 +954,7 @@ int snd_soc_dapm_dai_get_connected_widgets(struct snd_soc_dai *dai, int stream,
954 if (stream == SNDRV_PCM_STREAM_PLAYBACK) 954 if (stream == SNDRV_PCM_STREAM_PLAYBACK)
955 paths = is_connected_output_ep(dai->playback_widget, list); 955 paths = is_connected_output_ep(dai->playback_widget, list);
956 else 956 else
957 paths = is_connected_input_ep(dai->playback_widget, list); 957 paths = is_connected_input_ep(dai->capture_widget, list);
958 958
959 trace_snd_soc_dapm_connected(paths, stream); 959 trace_snd_soc_dapm_connected(paths, stream);
960 dapm_clear_walk(&card->dapm); 960 dapm_clear_walk(&card->dapm);
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index bedd1717a373..48fd15b312c1 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -794,6 +794,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
794 for (i = 0; i < card->num_links; i++) { 794 for (i = 0; i < card->num_links; i++) {
795 be = &card->rtd[i]; 795 be = &card->rtd[i];
796 796
797 if (!be->dai_link->no_pcm)
798 continue;
799
797 if (be->cpu_dai->playback_widget == widget || 800 if (be->cpu_dai->playback_widget == widget ||
798 be->codec_dai->playback_widget == widget) 801 be->codec_dai->playback_widget == widget)
799 return be; 802 return be;
@@ -803,6 +806,9 @@ static struct snd_soc_pcm_runtime *dpcm_get_be(struct snd_soc_card *card,
803 for (i = 0; i < card->num_links; i++) { 806 for (i = 0; i < card->num_links; i++) {
804 be = &card->rtd[i]; 807 be = &card->rtd[i];
805 808
809 if (!be->dai_link->no_pcm)
810 continue;
811
806 if (be->cpu_dai->capture_widget == widget || 812 if (be->cpu_dai->capture_widget == widget ||
807 be->codec_dai->capture_widget == widget) 813 be->codec_dai->capture_widget == widget)
808 return be; 814 return be;
diff --git a/sound/soc/tegra/tegra30_ahub.c b/sound/soc/tegra/tegra30_ahub.c
index 57cd419f743e..f43edb364a18 100644
--- a/sound/soc/tegra/tegra30_ahub.c
+++ b/sound/soc/tegra/tegra30_ahub.c
@@ -629,3 +629,4 @@ MODULE_AUTHOR("Stephen Warren <swarren@nvidia.com>");
629MODULE_DESCRIPTION("Tegra30 AHUB driver"); 629MODULE_DESCRIPTION("Tegra30 AHUB driver");
630MODULE_LICENSE("GPL v2"); 630MODULE_LICENSE("GPL v2");
631MODULE_ALIAS("platform:" DRV_NAME); 631MODULE_ALIAS("platform:" DRV_NAME);
632MODULE_DEVICE_TABLE(of, tegra30_ahub_of_match);
diff --git a/sound/soc/tegra/tegra_wm8903.c b/sound/soc/tegra/tegra_wm8903.c
index 0b0df49d9d33..3b6da91188a9 100644
--- a/sound/soc/tegra/tegra_wm8903.c
+++ b/sound/soc/tegra/tegra_wm8903.c
@@ -346,6 +346,17 @@ static int tegra_wm8903_init(struct snd_soc_pcm_runtime *rtd)
346 return 0; 346 return 0;
347} 347}
348 348
349static int tegra_wm8903_remove(struct snd_soc_card *card)
350{
351 struct snd_soc_pcm_runtime *rtd = &(card->rtd[0]);
352 struct snd_soc_dai *codec_dai = rtd->codec_dai;
353 struct snd_soc_codec *codec = codec_dai->codec;
354
355 wm8903_mic_detect(codec, NULL, 0, 0);
356
357 return 0;
358}
359
349static struct snd_soc_dai_link tegra_wm8903_dai = { 360static struct snd_soc_dai_link tegra_wm8903_dai = {
350 .name = "WM8903", 361 .name = "WM8903",
351 .stream_name = "WM8903 PCM", 362 .stream_name = "WM8903 PCM",
@@ -363,6 +374,8 @@ static struct snd_soc_card snd_soc_tegra_wm8903 = {
363 .dai_link = &tegra_wm8903_dai, 374 .dai_link = &tegra_wm8903_dai,
364 .num_links = 1, 375 .num_links = 1,
365 376
377 .remove = tegra_wm8903_remove,
378
366 .controls = tegra_wm8903_controls, 379 .controls = tegra_wm8903_controls,
367 .num_controls = ARRAY_SIZE(tegra_wm8903_controls), 380 .num_controls = ARRAY_SIZE(tegra_wm8903_controls),
368 .dapm_widgets = tegra_wm8903_dapm_widgets, 381 .dapm_widgets = tegra_wm8903_dapm_widgets,
diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c
index 6f9715ab32fe..56ad923bf6b5 100644
--- a/sound/usb/6fire/firmware.c
+++ b/sound/usb/6fire/firmware.c
@@ -209,7 +209,7 @@ static int usb6fire_fw_ezusb_upload(
209 int ret; 209 int ret;
210 u8 data; 210 u8 data;
211 struct usb_device *device = interface_to_usbdev(intf); 211 struct usb_device *device = interface_to_usbdev(intf);
212 const struct firmware *fw = 0; 212 const struct firmware *fw = NULL;
213 struct ihex_record *rec = kmalloc(sizeof(struct ihex_record), 213 struct ihex_record *rec = kmalloc(sizeof(struct ihex_record),
214 GFP_KERNEL); 214 GFP_KERNEL);
215 215
diff --git a/sound/usb/card.h b/sound/usb/card.h
index 0d37238b8457..2b9fffff23b6 100644
--- a/sound/usb/card.h
+++ b/sound/usb/card.h
@@ -119,6 +119,7 @@ struct snd_usb_substream {
119 unsigned long unlink_mask; /* bitmask of unlinked urbs */ 119 unsigned long unlink_mask; /* bitmask of unlinked urbs */
120 120
121 /* data and sync endpoints for this stream */ 121 /* data and sync endpoints for this stream */
122 unsigned int ep_num; /* the endpoint number */
122 struct snd_usb_endpoint *data_endpoint; 123 struct snd_usb_endpoint *data_endpoint;
123 struct snd_usb_endpoint *sync_endpoint; 124 struct snd_usb_endpoint *sync_endpoint;
124 unsigned long flags; 125 unsigned long flags;
diff --git a/sound/usb/pcm.c b/sound/usb/pcm.c
index 24839d932648..54607f8c4f66 100644
--- a/sound/usb/pcm.c
+++ b/sound/usb/pcm.c
@@ -354,17 +354,21 @@ static int set_format(struct snd_usb_substream *subs, struct audioformat *fmt)
354 (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && 354 (get_endpoint(alts, 1)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
355 get_endpoint(alts, 1)->bSynchAddress != 0 && 355 get_endpoint(alts, 1)->bSynchAddress != 0 &&
356 !implicit_fb)) { 356 !implicit_fb)) {
357 snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n", 357 snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. bmAttributes %02x, bLength %d, bSynchAddress %02x\n",
358 dev->devnum, fmt->iface, fmt->altsetting); 358 dev->devnum, fmt->iface, fmt->altsetting,
359 get_endpoint(alts, 1)->bmAttributes,
360 get_endpoint(alts, 1)->bLength,
361 get_endpoint(alts, 1)->bSynchAddress);
359 return -EINVAL; 362 return -EINVAL;
360 } 363 }
361 ep = get_endpoint(alts, 1)->bEndpointAddress; 364 ep = get_endpoint(alts, 1)->bEndpointAddress;
362 if (get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE && 365 if (!implicit_fb &&
366 get_endpoint(alts, 0)->bLength >= USB_DT_ENDPOINT_AUDIO_SIZE &&
363 (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) || 367 (( is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress | USB_DIR_IN)) ||
364 (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)) || 368 (!is_playback && ep != (unsigned int)(get_endpoint(alts, 0)->bSynchAddress & ~USB_DIR_IN)))) {
365 ( is_playback && !implicit_fb))) { 369 snd_printk(KERN_ERR "%d:%d:%d : invalid sync pipe. is_playback %d, ep %02x, bSynchAddress %02x\n",
366 snd_printk(KERN_ERR "%d:%d:%d : invalid synch pipe\n", 370 dev->devnum, fmt->iface, fmt->altsetting,
367 dev->devnum, fmt->iface, fmt->altsetting); 371 is_playback, ep, get_endpoint(alts, 0)->bSynchAddress);
368 return -EINVAL; 372 return -EINVAL;
369 } 373 }
370 374
@@ -788,6 +792,9 @@ static int snd_usb_pcm_check_knot(struct snd_pcm_runtime *runtime,
788 int count = 0, needs_knot = 0; 792 int count = 0, needs_knot = 0;
789 int err; 793 int err;
790 794
795 kfree(subs->rate_list.list);
796 subs->rate_list.list = NULL;
797
791 list_for_each_entry(fp, &subs->fmt_list, list) { 798 list_for_each_entry(fp, &subs->fmt_list, list) {
792 if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS) 799 if (fp->rates & SNDRV_PCM_RATE_CONTINUOUS)
793 return 0; 800 return 0;
@@ -1144,7 +1151,8 @@ static int snd_usb_substream_playback_trigger(struct snd_pcm_substream *substrea
1144 return -EINVAL; 1151 return -EINVAL;
1145} 1152}
1146 1153
1147int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream, int cmd) 1154static int snd_usb_substream_capture_trigger(struct snd_pcm_substream *substream,
1155 int cmd)
1148{ 1156{
1149 int err; 1157 int err;
1150 struct snd_usb_substream *subs = substream->runtime->private_data; 1158 struct snd_usb_substream *subs = substream->runtime->private_data;
diff --git a/sound/usb/stream.c b/sound/usb/stream.c
index 6b7d7a2b7baa..083ed81160e5 100644
--- a/sound/usb/stream.c
+++ b/sound/usb/stream.c
@@ -97,6 +97,7 @@ static void snd_usb_init_substream(struct snd_usb_stream *as,
97 subs->formats |= fp->formats; 97 subs->formats |= fp->formats;
98 subs->num_formats++; 98 subs->num_formats++;
99 subs->fmt_type = fp->fmt_type; 99 subs->fmt_type = fp->fmt_type;
100 subs->ep_num = fp->endpoint;
100} 101}
101 102
102/* 103/*
@@ -119,9 +120,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
119 if (as->fmt_type != fp->fmt_type) 120 if (as->fmt_type != fp->fmt_type)
120 continue; 121 continue;
121 subs = &as->substream[stream]; 122 subs = &as->substream[stream];
122 if (!subs->data_endpoint) 123 if (subs->ep_num == fp->endpoint) {
123 continue;
124 if (subs->data_endpoint->ep_num == fp->endpoint) {
125 list_add_tail(&fp->list, &subs->fmt_list); 124 list_add_tail(&fp->list, &subs->fmt_list);
126 subs->num_formats++; 125 subs->num_formats++;
127 subs->formats |= fp->formats; 126 subs->formats |= fp->formats;
@@ -134,7 +133,7 @@ int snd_usb_add_audio_stream(struct snd_usb_audio *chip,
134 if (as->fmt_type != fp->fmt_type) 133 if (as->fmt_type != fp->fmt_type)
135 continue; 134 continue;
136 subs = &as->substream[stream]; 135 subs = &as->substream[stream];
137 if (subs->data_endpoint) 136 if (subs->ep_num)
138 continue; 137 continue;
139 err = snd_pcm_new_stream(as->pcm, stream, 1); 138 err = snd_pcm_new_stream(as->pcm, stream, 1);
140 if (err < 0) 139 if (err < 0)
diff --git a/tools/hv/hv_kvp_daemon.c b/tools/hv/hv_kvp_daemon.c
index 146fd6147e84..d9834b362943 100644
--- a/tools/hv/hv_kvp_daemon.c
+++ b/tools/hv/hv_kvp_daemon.c
@@ -701,14 +701,18 @@ int main(void)
701 pfd.fd = fd; 701 pfd.fd = fd;
702 702
703 while (1) { 703 while (1) {
704 struct sockaddr *addr_p = (struct sockaddr *) &addr;
705 socklen_t addr_l = sizeof(addr);
704 pfd.events = POLLIN; 706 pfd.events = POLLIN;
705 pfd.revents = 0; 707 pfd.revents = 0;
706 poll(&pfd, 1, -1); 708 poll(&pfd, 1, -1);
707 709
708 len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0); 710 len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0,
711 addr_p, &addr_l);
709 712
710 if (len < 0) { 713 if (len < 0 || addr.nl_pid) {
711 syslog(LOG_ERR, "recv failed; error:%d", len); 714 syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s",
715 addr.nl_pid, errno, strerror(errno));
712 close(fd); 716 close(fd);
713 return -1; 717 return -1;
714 } 718 }
diff --git a/tools/perf/MANIFEST b/tools/perf/MANIFEST
index 5476bc0a1eac..b4b572e8c100 100644
--- a/tools/perf/MANIFEST
+++ b/tools/perf/MANIFEST
@@ -1,4 +1,6 @@
1tools/perf 1tools/perf
2tools/scripts
3tools/lib/traceevent
2include/linux/const.h 4include/linux/const.h
3include/linux/perf_event.h 5include/linux/perf_event.h
4include/linux/rbtree.h 6include/linux/rbtree.h
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index 8c767c6bca91..25249f76329d 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -152,7 +152,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
152 152
153 if (symbol_conf.use_callchain) { 153 if (symbol_conf.use_callchain) {
154 err = callchain_append(he->callchain, 154 err = callchain_append(he->callchain,
155 &evsel->hists.callchain_cursor, 155 &callchain_cursor,
156 sample->period); 156 sample->period);
157 if (err) 157 if (err)
158 return err; 158 return err;
@@ -162,7 +162,7 @@ static int perf_evsel__add_hist_entry(struct perf_evsel *evsel,
162 * so we don't allocated the extra space needed because the stdio 162 * so we don't allocated the extra space needed because the stdio
163 * code will not use it. 163 * code will not use it.
164 */ 164 */
165 if (al->sym != NULL && use_browser > 0) { 165 if (he->ms.sym != NULL && use_browser > 0) {
166 struct annotation *notes = symbol__annotation(he->ms.sym); 166 struct annotation *notes = symbol__annotation(he->ms.sym);
167 167
168 assert(evsel != NULL); 168 assert(evsel != NULL);
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 62ae30d34fa6..07b5c7703dd1 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -1129,7 +1129,7 @@ static int add_default_attributes(void)
1129 return 0; 1129 return 0;
1130 1130
1131 if (!evsel_list->nr_entries) { 1131 if (!evsel_list->nr_entries) {
1132 if (perf_evlist__add_attrs_array(evsel_list, default_attrs) < 0) 1132 if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
1133 return -1; 1133 return -1;
1134 } 1134 }
1135 1135
@@ -1139,21 +1139,21 @@ static int add_default_attributes(void)
1139 return 0; 1139 return 0;
1140 1140
1141 /* Append detailed run extra attributes: */ 1141 /* Append detailed run extra attributes: */
1142 if (perf_evlist__add_attrs_array(evsel_list, detailed_attrs) < 0) 1142 if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
1143 return -1; 1143 return -1;
1144 1144
1145 if (detailed_run < 2) 1145 if (detailed_run < 2)
1146 return 0; 1146 return 0;
1147 1147
1148 /* Append very detailed run extra attributes: */ 1148 /* Append very detailed run extra attributes: */
1149 if (perf_evlist__add_attrs_array(evsel_list, very_detailed_attrs) < 0) 1149 if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
1150 return -1; 1150 return -1;
1151 1151
1152 if (detailed_run < 3) 1152 if (detailed_run < 3)
1153 return 0; 1153 return 0;
1154 1154
1155 /* Append very, very detailed run extra attributes: */ 1155 /* Append very, very detailed run extra attributes: */
1156 return perf_evlist__add_attrs_array(evsel_list, very_very_detailed_attrs); 1156 return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
1157} 1157}
1158 1158
1159int cmd_stat(int argc, const char **argv, const char *prefix __used) 1159int cmd_stat(int argc, const char **argv, const char *prefix __used)
@@ -1179,6 +1179,12 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
1179 fprintf(stderr, "cannot use both --output and --log-fd\n"); 1179 fprintf(stderr, "cannot use both --output and --log-fd\n");
1180 usage_with_options(stat_usage, options); 1180 usage_with_options(stat_usage, options);
1181 } 1181 }
1182
1183 if (output_fd < 0) {
1184 fprintf(stderr, "argument to --log-fd must be a > 0\n");
1185 usage_with_options(stat_usage, options);
1186 }
1187
1182 if (!output) { 1188 if (!output) {
1183 struct timespec tm; 1189 struct timespec tm;
1184 mode = append_file ? "a" : "w"; 1190 mode = append_file ? "a" : "w";
@@ -1190,7 +1196,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
1190 } 1196 }
1191 clock_gettime(CLOCK_REALTIME, &tm); 1197 clock_gettime(CLOCK_REALTIME, &tm);
1192 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec)); 1198 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
1193 } else if (output_fd != 2) { 1199 } else if (output_fd > 0) {
1194 mode = append_file ? "a" : "w"; 1200 mode = append_file ? "a" : "w";
1195 output = fdopen(output_fd, mode); 1201 output = fdopen(output_fd, mode);
1196 if (!output) { 1202 if (!output) {
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 871b540293e1..6bb0277b7dfe 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -787,7 +787,7 @@ static void perf_event__process_sample(struct perf_tool *tool,
787 } 787 }
788 788
789 if (symbol_conf.use_callchain) { 789 if (symbol_conf.use_callchain) {
790 err = callchain_append(he->callchain, &evsel->hists.callchain_cursor, 790 err = callchain_append(he->callchain, &callchain_cursor,
791 sample->period); 791 sample->period);
792 if (err) 792 if (err)
793 return; 793 return;
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index bd0bb1b1279b..67e5d0cace85 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -409,14 +409,15 @@ Counters can be enabled and disabled in two ways: via ioctl and via
409prctl. When a counter is disabled, it doesn't count or generate 409prctl. When a counter is disabled, it doesn't count or generate
410events but does continue to exist and maintain its count value. 410events but does continue to exist and maintain its count value.
411 411
412An individual counter or counter group can be enabled with 412An individual counter can be enabled with
413 413
414 ioctl(fd, PERF_EVENT_IOC_ENABLE); 414 ioctl(fd, PERF_EVENT_IOC_ENABLE, 0);
415 415
416or disabled with 416or disabled with
417 417
418 ioctl(fd, PERF_EVENT_IOC_DISABLE); 418 ioctl(fd, PERF_EVENT_IOC_DISABLE, 0);
419 419
420For a counter group, pass PERF_IOC_FLAG_GROUP as the third argument.
420Enabling or disabling the leader of a group enables or disables the 421Enabling or disabling the leader of a group enables or disables the
421whole group; that is, while the group leader is disabled, none of the 422whole group; that is, while the group leader is disabled, none of the
422counters in the group will count. Enabling or disabling a member of a 423counters in the group will count. Enabling or disabling a member of a
diff --git a/tools/perf/ui/browsers/annotate.c b/tools/perf/ui/browsers/annotate.c
index 4deea6aaf927..34b1c46eaf42 100644
--- a/tools/perf/ui/browsers/annotate.c
+++ b/tools/perf/ui/browsers/annotate.c
@@ -668,7 +668,7 @@ static int annotate_browser__run(struct annotate_browser *browser, int evidx,
668 "q/ESC/CTRL+C Exit\n\n" 668 "q/ESC/CTRL+C Exit\n\n"
669 "-> Go to target\n" 669 "-> Go to target\n"
670 "<- Exit\n" 670 "<- Exit\n"
671 "h Cycle thru hottest instructions\n" 671 "H Cycle thru hottest instructions\n"
672 "j Toggle showing jump to target arrows\n" 672 "j Toggle showing jump to target arrows\n"
673 "J Toggle showing number of jump sources on targets\n" 673 "J Toggle showing number of jump sources on targets\n"
674 "n Search next string\n" 674 "n Search next string\n"
diff --git a/tools/perf/util/PERF-VERSION-GEN b/tools/perf/util/PERF-VERSION-GEN
index ad73300f7bac..95264f304179 100755
--- a/tools/perf/util/PERF-VERSION-GEN
+++ b/tools/perf/util/PERF-VERSION-GEN
@@ -12,7 +12,7 @@ LF='
12# First check if there is a .git to get the version from git describe 12# First check if there is a .git to get the version from git describe
13# otherwise try to get the version from the kernel makefile 13# otherwise try to get the version from the kernel makefile
14if test -d ../../.git -o -f ../../.git && 14if test -d ../../.git -o -f ../../.git &&
15 VN=$(git describe --abbrev=4 HEAD 2>/dev/null) && 15 VN=$(git describe --match 'v[0-9].[0-9]*' --abbrev=4 HEAD 2>/dev/null) &&
16 case "$VN" in 16 case "$VN" in
17 *$LF*) (exit 1) ;; 17 *$LF*) (exit 1) ;;
18 v[0-9]*) 18 v[0-9]*)
diff --git a/tools/perf/util/callchain.c b/tools/perf/util/callchain.c
index 9f7106a8d9a4..3a6bff47614f 100644
--- a/tools/perf/util/callchain.c
+++ b/tools/perf/util/callchain.c
@@ -18,6 +18,8 @@
18#include "util.h" 18#include "util.h"
19#include "callchain.h" 19#include "callchain.h"
20 20
21__thread struct callchain_cursor callchain_cursor;
22
21bool ip_callchain__valid(struct ip_callchain *chain, 23bool ip_callchain__valid(struct ip_callchain *chain,
22 const union perf_event *event) 24 const union perf_event *event)
23{ 25{
diff --git a/tools/perf/util/callchain.h b/tools/perf/util/callchain.h
index 7f9c0f1ae3a9..3bdb407f9cd9 100644
--- a/tools/perf/util/callchain.h
+++ b/tools/perf/util/callchain.h
@@ -76,6 +76,8 @@ struct callchain_cursor {
76 struct callchain_cursor_node *curr; 76 struct callchain_cursor_node *curr;
77}; 77};
78 78
79extern __thread struct callchain_cursor callchain_cursor;
80
79static inline void callchain_init(struct callchain_root *root) 81static inline void callchain_init(struct callchain_root *root)
80{ 82{
81 INIT_LIST_HEAD(&root->node.siblings); 83 INIT_LIST_HEAD(&root->node.siblings);
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 4ac5f5ae4ce9..7400fb3fc50c 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -159,6 +159,17 @@ out_delete_partial_list:
159 return -1; 159 return -1;
160} 160}
161 161
162int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
163 struct perf_event_attr *attrs, size_t nr_attrs)
164{
165 size_t i;
166
167 for (i = 0; i < nr_attrs; i++)
168 event_attr_init(attrs + i);
169
170 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
171}
172
162static int trace_event__id(const char *evname) 173static int trace_event__id(const char *evname)
163{ 174{
164 char *filename, *colon; 175 char *filename, *colon;
@@ -263,7 +274,8 @@ void perf_evlist__disable(struct perf_evlist *evlist)
263 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 274 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
264 list_for_each_entry(pos, &evlist->entries, node) { 275 list_for_each_entry(pos, &evlist->entries, node) {
265 for (thread = 0; thread < evlist->threads->nr; thread++) 276 for (thread = 0; thread < evlist->threads->nr; thread++)
266 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE); 277 ioctl(FD(pos, cpu, thread),
278 PERF_EVENT_IOC_DISABLE, 0);
267 } 279 }
268 } 280 }
269} 281}
@@ -276,7 +288,8 @@ void perf_evlist__enable(struct perf_evlist *evlist)
276 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 288 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
277 list_for_each_entry(pos, &evlist->entries, node) { 289 list_for_each_entry(pos, &evlist->entries, node) {
278 for (thread = 0; thread < evlist->threads->nr; thread++) 290 for (thread = 0; thread < evlist->threads->nr; thread++)
279 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE); 291 ioctl(FD(pos, cpu, thread),
292 PERF_EVENT_IOC_ENABLE, 0);
280 } 293 }
281 } 294 }
282} 295}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 58abb63ac13a..989bee9624c2 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -54,6 +54,8 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
54int perf_evlist__add_default(struct perf_evlist *evlist); 54int perf_evlist__add_default(struct perf_evlist *evlist);
55int perf_evlist__add_attrs(struct perf_evlist *evlist, 55int perf_evlist__add_attrs(struct perf_evlist *evlist,
56 struct perf_event_attr *attrs, size_t nr_attrs); 56 struct perf_event_attr *attrs, size_t nr_attrs);
57int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
58 struct perf_event_attr *attrs, size_t nr_attrs);
57int perf_evlist__add_tracepoints(struct perf_evlist *evlist, 59int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
58 const char *tracepoints[], size_t nr_tracepoints); 60 const char *tracepoints[], size_t nr_tracepoints);
59int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist, 61int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
@@ -62,6 +64,8 @@ int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
62 64
63#define perf_evlist__add_attrs_array(evlist, array) \ 65#define perf_evlist__add_attrs_array(evlist, array) \
64 perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array)) 66 perf_evlist__add_attrs(evlist, array, ARRAY_SIZE(array))
67#define perf_evlist__add_default_attrs(evlist, array) \
68 __perf_evlist__add_default_attrs(evlist, array, ARRAY_SIZE(array))
65 69
66#define perf_evlist__add_tracepoints_array(evlist, array) \ 70#define perf_evlist__add_tracepoints_array(evlist, array) \
67 perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array)) 71 perf_evlist__add_tracepoints(evlist, array, ARRAY_SIZE(array))
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index 91d19138f3ec..9f6cebd798ee 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -494,16 +494,24 @@ int perf_evsel__open_per_thread(struct perf_evsel *evsel,
494} 494}
495 495
496static int perf_event__parse_id_sample(const union perf_event *event, u64 type, 496static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
497 struct perf_sample *sample) 497 struct perf_sample *sample,
498 bool swapped)
498{ 499{
499 const u64 *array = event->sample.array; 500 const u64 *array = event->sample.array;
501 union u64_swap u;
500 502
501 array += ((event->header.size - 503 array += ((event->header.size -
502 sizeof(event->header)) / sizeof(u64)) - 1; 504 sizeof(event->header)) / sizeof(u64)) - 1;
503 505
504 if (type & PERF_SAMPLE_CPU) { 506 if (type & PERF_SAMPLE_CPU) {
505 u32 *p = (u32 *)array; 507 u.val64 = *array;
506 sample->cpu = *p; 508 if (swapped) {
509 /* undo swap of u64, then swap on individual u32s */
510 u.val64 = bswap_64(u.val64);
511 u.val32[0] = bswap_32(u.val32[0]);
512 }
513
514 sample->cpu = u.val32[0];
507 array--; 515 array--;
508 } 516 }
509 517
@@ -523,9 +531,16 @@ static int perf_event__parse_id_sample(const union perf_event *event, u64 type,
523 } 531 }
524 532
525 if (type & PERF_SAMPLE_TID) { 533 if (type & PERF_SAMPLE_TID) {
526 u32 *p = (u32 *)array; 534 u.val64 = *array;
527 sample->pid = p[0]; 535 if (swapped) {
528 sample->tid = p[1]; 536 /* undo swap of u64, then swap on individual u32s */
537 u.val64 = bswap_64(u.val64);
538 u.val32[0] = bswap_32(u.val32[0]);
539 u.val32[1] = bswap_32(u.val32[1]);
540 }
541
542 sample->pid = u.val32[0];
543 sample->tid = u.val32[1];
529 } 544 }
530 545
531 return 0; 546 return 0;
@@ -562,7 +577,7 @@ int perf_event__parse_sample(const union perf_event *event, u64 type,
562 if (event->header.type != PERF_RECORD_SAMPLE) { 577 if (event->header.type != PERF_RECORD_SAMPLE) {
563 if (!sample_id_all) 578 if (!sample_id_all)
564 return 0; 579 return 0;
565 return perf_event__parse_id_sample(event, type, data); 580 return perf_event__parse_id_sample(event, type, data, swapped);
566 } 581 }
567 582
568 array = event->sample.array; 583 array = event->sample.array;
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 2dd5edf161b7..e909d43cf542 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -1942,7 +1942,6 @@ int perf_file_header__read(struct perf_file_header *header,
1942 else 1942 else
1943 return -1; 1943 return -1;
1944 } else if (ph->needs_swap) { 1944 } else if (ph->needs_swap) {
1945 unsigned int i;
1946 /* 1945 /*
1947 * feature bitmap is declared as an array of unsigned longs -- 1946 * feature bitmap is declared as an array of unsigned longs --
1948 * not good since its size can differ between the host that 1947 * not good since its size can differ between the host that
@@ -1958,14 +1957,17 @@ int perf_file_header__read(struct perf_file_header *header,
1958 * file), punt and fallback to the original behavior -- 1957 * file), punt and fallback to the original behavior --
1959 * clearing all feature bits and setting buildid. 1958 * clearing all feature bits and setting buildid.
1960 */ 1959 */
1961 for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) 1960 mem_bswap_64(&header->adds_features,
1962 header->adds_features[i] = bswap_64(header->adds_features[i]); 1961 BITS_TO_U64(HEADER_FEAT_BITS));
1963 1962
1964 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 1963 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
1965 for (i = 0; i < BITS_TO_LONGS(HEADER_FEAT_BITS); ++i) { 1964 /* unswap as u64 */
1966 header->adds_features[i] = bswap_64(header->adds_features[i]); 1965 mem_bswap_64(&header->adds_features,
1967 header->adds_features[i] = bswap_32(header->adds_features[i]); 1966 BITS_TO_U64(HEADER_FEAT_BITS));
1968 } 1967
1968 /* unswap as u32 */
1969 mem_bswap_32(&header->adds_features,
1970 BITS_TO_U32(HEADER_FEAT_BITS));
1969 } 1971 }
1970 1972
1971 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) { 1973 if (!test_bit(HEADER_HOSTNAME, header->adds_features)) {
@@ -2091,6 +2093,35 @@ static int read_attr(int fd, struct perf_header *ph,
2091 return ret <= 0 ? -1 : 0; 2093 return ret <= 0 ? -1 : 0;
2092} 2094}
2093 2095
2096static int perf_evsel__set_tracepoint_name(struct perf_evsel *evsel)
2097{
2098 struct event_format *event = trace_find_event(evsel->attr.config);
2099 char bf[128];
2100
2101 if (event == NULL)
2102 return -1;
2103
2104 snprintf(bf, sizeof(bf), "%s:%s", event->system, event->name);
2105 evsel->name = strdup(bf);
2106 if (event->name == NULL)
2107 return -1;
2108
2109 return 0;
2110}
2111
2112static int perf_evlist__set_tracepoint_names(struct perf_evlist *evlist)
2113{
2114 struct perf_evsel *pos;
2115
2116 list_for_each_entry(pos, &evlist->entries, node) {
2117 if (pos->attr.type == PERF_TYPE_TRACEPOINT &&
2118 perf_evsel__set_tracepoint_name(pos))
2119 return -1;
2120 }
2121
2122 return 0;
2123}
2124
2094int perf_session__read_header(struct perf_session *session, int fd) 2125int perf_session__read_header(struct perf_session *session, int fd)
2095{ 2126{
2096 struct perf_header *header = &session->header; 2127 struct perf_header *header = &session->header;
@@ -2172,6 +2203,9 @@ int perf_session__read_header(struct perf_session *session, int fd)
2172 2203
2173 lseek(fd, header->data_offset, SEEK_SET); 2204 lseek(fd, header->data_offset, SEEK_SET);
2174 2205
2206 if (perf_evlist__set_tracepoint_names(session->evlist))
2207 goto out_delete_evlist;
2208
2175 header->frozen = 1; 2209 header->frozen = 1;
2176 return 0; 2210 return 0;
2177out_errno: 2211out_errno:
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 1293b5ebea4d..514e2a4b367d 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -378,7 +378,7 @@ void hist_entry__free(struct hist_entry *he)
378 * collapse the histogram 378 * collapse the histogram
379 */ 379 */
380 380
381static bool hists__collapse_insert_entry(struct hists *hists, 381static bool hists__collapse_insert_entry(struct hists *hists __used,
382 struct rb_root *root, 382 struct rb_root *root,
383 struct hist_entry *he) 383 struct hist_entry *he)
384{ 384{
@@ -397,8 +397,9 @@ static bool hists__collapse_insert_entry(struct hists *hists,
397 iter->period += he->period; 397 iter->period += he->period;
398 iter->nr_events += he->nr_events; 398 iter->nr_events += he->nr_events;
399 if (symbol_conf.use_callchain) { 399 if (symbol_conf.use_callchain) {
400 callchain_cursor_reset(&hists->callchain_cursor); 400 callchain_cursor_reset(&callchain_cursor);
401 callchain_merge(&hists->callchain_cursor, iter->callchain, 401 callchain_merge(&callchain_cursor,
402 iter->callchain,
402 he->callchain); 403 he->callchain);
403 } 404 }
404 hist_entry__free(he); 405 hist_entry__free(he);
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index cfc64e293f90..34bb556d6219 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -67,8 +67,6 @@ struct hists {
67 struct events_stats stats; 67 struct events_stats stats;
68 u64 event_stream; 68 u64 event_stream;
69 u16 col_len[HISTC_NR_COLS]; 69 u16 col_len[HISTC_NR_COLS];
70 /* Best would be to reuse the session callchain cursor */
71 struct callchain_cursor callchain_cursor;
72}; 70};
73 71
74struct hist_entry *__hists__add_entry(struct hists *self, 72struct hist_entry *__hists__add_entry(struct hists *self,
diff --git a/tools/perf/util/include/linux/bitops.h b/tools/perf/util/include/linux/bitops.h
index f1584833bd22..587a230d2075 100644
--- a/tools/perf/util/include/linux/bitops.h
+++ b/tools/perf/util/include/linux/bitops.h
@@ -8,6 +8,8 @@
8#define BITS_PER_LONG __WORDSIZE 8#define BITS_PER_LONG __WORDSIZE
9#define BITS_PER_BYTE 8 9#define BITS_PER_BYTE 8
10#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long)) 10#define BITS_TO_LONGS(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(long))
11#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
12#define BITS_TO_U32(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u32))
11 13
12#define for_each_set_bit(bit, addr, size) \ 14#define for_each_set_bit(bit, addr, size) \
13 for ((bit) = find_first_bit((addr), (size)); \ 15 for ((bit) = find_first_bit((addr), (size)); \
diff --git a/tools/perf/util/pager.c b/tools/perf/util/pager.c
index 1915de20dcac..3322b8446e89 100644
--- a/tools/perf/util/pager.c
+++ b/tools/perf/util/pager.c
@@ -57,6 +57,10 @@ void setup_pager(void)
57 } 57 }
58 if (!pager) 58 if (!pager)
59 pager = getenv("PAGER"); 59 pager = getenv("PAGER");
60 if (!pager) {
61 if (!access("/usr/bin/pager", X_OK))
62 pager = "/usr/bin/pager";
63 }
60 if (!pager) 64 if (!pager)
61 pager = "less"; 65 pager = "less";
62 else if (!*pager || !strcmp(pager, "cat")) 66 else if (!*pager || !strcmp(pager, "cat"))
diff --git a/tools/perf/util/probe-event.c b/tools/perf/util/probe-event.c
index 59dccc98b554..0dda25d82d06 100644
--- a/tools/perf/util/probe-event.c
+++ b/tools/perf/util/probe-event.c
@@ -2164,16 +2164,12 @@ int del_perf_probe_events(struct strlist *dellist)
2164 2164
2165error: 2165error:
2166 if (kfd >= 0) { 2166 if (kfd >= 0) {
2167 if (namelist) 2167 strlist__delete(namelist);
2168 strlist__delete(namelist);
2169
2170 close(kfd); 2168 close(kfd);
2171 } 2169 }
2172 2170
2173 if (ufd >= 0) { 2171 if (ufd >= 0) {
2174 if (unamelist) 2172 strlist__delete(unamelist);
2175 strlist__delete(unamelist);
2176
2177 close(ufd); 2173 close(ufd);
2178 } 2174 }
2179 2175
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 93d355d27109..c3e399bcf18d 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -288,7 +288,8 @@ struct branch_info *machine__resolve_bstack(struct machine *self,
288 return bi; 288 return bi;
289} 289}
290 290
291int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel, 291int machine__resolve_callchain(struct machine *self,
292 struct perf_evsel *evsel __used,
292 struct thread *thread, 293 struct thread *thread,
293 struct ip_callchain *chain, 294 struct ip_callchain *chain,
294 struct symbol **parent) 295 struct symbol **parent)
@@ -297,7 +298,12 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
297 unsigned int i; 298 unsigned int i;
298 int err; 299 int err;
299 300
300 callchain_cursor_reset(&evsel->hists.callchain_cursor); 301 callchain_cursor_reset(&callchain_cursor);
302
303 if (chain->nr > PERF_MAX_STACK_DEPTH) {
304 pr_warning("corrupted callchain. skipping...\n");
305 return 0;
306 }
301 307
302 for (i = 0; i < chain->nr; i++) { 308 for (i = 0; i < chain->nr; i++) {
303 u64 ip; 309 u64 ip;
@@ -317,7 +323,14 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
317 case PERF_CONTEXT_USER: 323 case PERF_CONTEXT_USER:
318 cpumode = PERF_RECORD_MISC_USER; break; 324 cpumode = PERF_RECORD_MISC_USER; break;
319 default: 325 default:
320 break; 326 pr_debug("invalid callchain context: "
327 "%"PRId64"\n", (s64) ip);
328 /*
329 * It seems the callchain is corrupted.
330 * Discard all.
331 */
332 callchain_cursor_reset(&callchain_cursor);
333 return 0;
321 } 334 }
322 continue; 335 continue;
323 } 336 }
@@ -333,7 +346,7 @@ int machine__resolve_callchain(struct machine *self, struct perf_evsel *evsel,
333 break; 346 break;
334 } 347 }
335 348
336 err = callchain_cursor_append(&evsel->hists.callchain_cursor, 349 err = callchain_cursor_append(&callchain_cursor,
337 ip, al.map, al.sym); 350 ip, al.map, al.sym);
338 if (err) 351 if (err)
339 return err; 352 return err;
@@ -429,6 +442,16 @@ static void perf_tool__fill_defaults(struct perf_tool *tool)
429 tool->finished_round = process_finished_round_stub; 442 tool->finished_round = process_finished_round_stub;
430 } 443 }
431} 444}
445
446void mem_bswap_32(void *src, int byte_size)
447{
448 u32 *m = src;
449 while (byte_size > 0) {
450 *m = bswap_32(*m);
451 byte_size -= sizeof(u32);
452 ++m;
453 }
454}
432 455
433void mem_bswap_64(void *src, int byte_size) 456void mem_bswap_64(void *src, int byte_size)
434{ 457{
@@ -441,37 +464,65 @@ void mem_bswap_64(void *src, int byte_size)
441 } 464 }
442} 465}
443 466
444static void perf_event__all64_swap(union perf_event *event) 467static void swap_sample_id_all(union perf_event *event, void *data)
468{
469 void *end = (void *) event + event->header.size;
470 int size = end - data;
471
472 BUG_ON(size % sizeof(u64));
473 mem_bswap_64(data, size);
474}
475
476static void perf_event__all64_swap(union perf_event *event,
477 bool sample_id_all __used)
445{ 478{
446 struct perf_event_header *hdr = &event->header; 479 struct perf_event_header *hdr = &event->header;
447 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr)); 480 mem_bswap_64(hdr + 1, event->header.size - sizeof(*hdr));
448} 481}
449 482
450static void perf_event__comm_swap(union perf_event *event) 483static void perf_event__comm_swap(union perf_event *event, bool sample_id_all)
451{ 484{
452 event->comm.pid = bswap_32(event->comm.pid); 485 event->comm.pid = bswap_32(event->comm.pid);
453 event->comm.tid = bswap_32(event->comm.tid); 486 event->comm.tid = bswap_32(event->comm.tid);
487
488 if (sample_id_all) {
489 void *data = &event->comm.comm;
490
491 data += ALIGN(strlen(data) + 1, sizeof(u64));
492 swap_sample_id_all(event, data);
493 }
454} 494}
455 495
456static void perf_event__mmap_swap(union perf_event *event) 496static void perf_event__mmap_swap(union perf_event *event,
497 bool sample_id_all)
457{ 498{
458 event->mmap.pid = bswap_32(event->mmap.pid); 499 event->mmap.pid = bswap_32(event->mmap.pid);
459 event->mmap.tid = bswap_32(event->mmap.tid); 500 event->mmap.tid = bswap_32(event->mmap.tid);
460 event->mmap.start = bswap_64(event->mmap.start); 501 event->mmap.start = bswap_64(event->mmap.start);
461 event->mmap.len = bswap_64(event->mmap.len); 502 event->mmap.len = bswap_64(event->mmap.len);
462 event->mmap.pgoff = bswap_64(event->mmap.pgoff); 503 event->mmap.pgoff = bswap_64(event->mmap.pgoff);
504
505 if (sample_id_all) {
506 void *data = &event->mmap.filename;
507
508 data += ALIGN(strlen(data) + 1, sizeof(u64));
509 swap_sample_id_all(event, data);
510 }
463} 511}
464 512
465static void perf_event__task_swap(union perf_event *event) 513static void perf_event__task_swap(union perf_event *event, bool sample_id_all)
466{ 514{
467 event->fork.pid = bswap_32(event->fork.pid); 515 event->fork.pid = bswap_32(event->fork.pid);
468 event->fork.tid = bswap_32(event->fork.tid); 516 event->fork.tid = bswap_32(event->fork.tid);
469 event->fork.ppid = bswap_32(event->fork.ppid); 517 event->fork.ppid = bswap_32(event->fork.ppid);
470 event->fork.ptid = bswap_32(event->fork.ptid); 518 event->fork.ptid = bswap_32(event->fork.ptid);
471 event->fork.time = bswap_64(event->fork.time); 519 event->fork.time = bswap_64(event->fork.time);
520
521 if (sample_id_all)
522 swap_sample_id_all(event, &event->fork + 1);
472} 523}
473 524
474static void perf_event__read_swap(union perf_event *event) 525static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
475{ 526{
476 event->read.pid = bswap_32(event->read.pid); 527 event->read.pid = bswap_32(event->read.pid);
477 event->read.tid = bswap_32(event->read.tid); 528 event->read.tid = bswap_32(event->read.tid);
@@ -479,6 +530,9 @@ static void perf_event__read_swap(union perf_event *event)
479 event->read.time_enabled = bswap_64(event->read.time_enabled); 530 event->read.time_enabled = bswap_64(event->read.time_enabled);
480 event->read.time_running = bswap_64(event->read.time_running); 531 event->read.time_running = bswap_64(event->read.time_running);
481 event->read.id = bswap_64(event->read.id); 532 event->read.id = bswap_64(event->read.id);
533
534 if (sample_id_all)
535 swap_sample_id_all(event, &event->read + 1);
482} 536}
483 537
484static u8 revbyte(u8 b) 538static u8 revbyte(u8 b)
@@ -530,7 +584,8 @@ void perf_event__attr_swap(struct perf_event_attr *attr)
530 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64)); 584 swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
531} 585}
532 586
533static void perf_event__hdr_attr_swap(union perf_event *event) 587static void perf_event__hdr_attr_swap(union perf_event *event,
588 bool sample_id_all __used)
534{ 589{
535 size_t size; 590 size_t size;
536 591
@@ -541,18 +596,21 @@ static void perf_event__hdr_attr_swap(union perf_event *event)
541 mem_bswap_64(event->attr.id, size); 596 mem_bswap_64(event->attr.id, size);
542} 597}
543 598
544static void perf_event__event_type_swap(union perf_event *event) 599static void perf_event__event_type_swap(union perf_event *event,
600 bool sample_id_all __used)
545{ 601{
546 event->event_type.event_type.event_id = 602 event->event_type.event_type.event_id =
547 bswap_64(event->event_type.event_type.event_id); 603 bswap_64(event->event_type.event_type.event_id);
548} 604}
549 605
550static void perf_event__tracing_data_swap(union perf_event *event) 606static void perf_event__tracing_data_swap(union perf_event *event,
607 bool sample_id_all __used)
551{ 608{
552 event->tracing_data.size = bswap_32(event->tracing_data.size); 609 event->tracing_data.size = bswap_32(event->tracing_data.size);
553} 610}
554 611
555typedef void (*perf_event__swap_op)(union perf_event *event); 612typedef void (*perf_event__swap_op)(union perf_event *event,
613 bool sample_id_all);
556 614
557static perf_event__swap_op perf_event__swap_ops[] = { 615static perf_event__swap_op perf_event__swap_ops[] = {
558 [PERF_RECORD_MMAP] = perf_event__mmap_swap, 616 [PERF_RECORD_MMAP] = perf_event__mmap_swap,
@@ -986,6 +1044,15 @@ static int perf_session__process_user_event(struct perf_session *session, union
986 } 1044 }
987} 1045}
988 1046
1047static void event_swap(union perf_event *event, bool sample_id_all)
1048{
1049 perf_event__swap_op swap;
1050
1051 swap = perf_event__swap_ops[event->header.type];
1052 if (swap)
1053 swap(event, sample_id_all);
1054}
1055
989static int perf_session__process_event(struct perf_session *session, 1056static int perf_session__process_event(struct perf_session *session,
990 union perf_event *event, 1057 union perf_event *event,
991 struct perf_tool *tool, 1058 struct perf_tool *tool,
@@ -994,9 +1061,8 @@ static int perf_session__process_event(struct perf_session *session,
994 struct perf_sample sample; 1061 struct perf_sample sample;
995 int ret; 1062 int ret;
996 1063
997 if (session->header.needs_swap && 1064 if (session->header.needs_swap)
998 perf_event__swap_ops[event->header.type]) 1065 event_swap(event, session->sample_id_all);
999 perf_event__swap_ops[event->header.type](event);
1000 1066
1001 if (event->header.type >= PERF_RECORD_HEADER_MAX) 1067 if (event->header.type >= PERF_RECORD_HEADER_MAX)
1002 return -EINVAL; 1068 return -EINVAL;
@@ -1428,7 +1494,6 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
1428 int print_sym, int print_dso, int print_symoffset) 1494 int print_sym, int print_dso, int print_symoffset)
1429{ 1495{
1430 struct addr_location al; 1496 struct addr_location al;
1431 struct callchain_cursor *cursor = &evsel->hists.callchain_cursor;
1432 struct callchain_cursor_node *node; 1497 struct callchain_cursor_node *node;
1433 1498
1434 if (perf_event__preprocess_sample(event, machine, &al, sample, 1499 if (perf_event__preprocess_sample(event, machine, &al, sample,
@@ -1446,10 +1511,10 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
1446 error("Failed to resolve callchain. Skipping\n"); 1511 error("Failed to resolve callchain. Skipping\n");
1447 return; 1512 return;
1448 } 1513 }
1449 callchain_cursor_commit(cursor); 1514 callchain_cursor_commit(&callchain_cursor);
1450 1515
1451 while (1) { 1516 while (1) {
1452 node = callchain_cursor_current(cursor); 1517 node = callchain_cursor_current(&callchain_cursor);
1453 if (!node) 1518 if (!node)
1454 break; 1519 break;
1455 1520
@@ -1460,12 +1525,12 @@ void perf_event__print_ip(union perf_event *event, struct perf_sample *sample,
1460 } 1525 }
1461 if (print_dso) { 1526 if (print_dso) {
1462 printf(" ("); 1527 printf(" (");
1463 map__fprintf_dsoname(al.map, stdout); 1528 map__fprintf_dsoname(node->map, stdout);
1464 printf(")"); 1529 printf(")");
1465 } 1530 }
1466 printf("\n"); 1531 printf("\n");
1467 1532
1468 callchain_cursor_advance(cursor); 1533 callchain_cursor_advance(&callchain_cursor);
1469 } 1534 }
1470 1535
1471 } else { 1536 } else {
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index 7a5434c00565..0c702e3f0a36 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -80,6 +80,7 @@ struct branch_info *machine__resolve_bstack(struct machine *self,
80bool perf_session__has_traces(struct perf_session *self, const char *msg); 80bool perf_session__has_traces(struct perf_session *self, const char *msg);
81 81
82void mem_bswap_64(void *src, int byte_size); 82void mem_bswap_64(void *src, int byte_size);
83void mem_bswap_32(void *src, int byte_size);
83void perf_event__attr_swap(struct perf_event_attr *attr); 84void perf_event__attr_swap(struct perf_event_attr *attr);
84 85
85int perf_session__create_kernel_maps(struct perf_session *self); 86int perf_session__create_kernel_maps(struct perf_session *self);
diff --git a/tools/perf/util/symbol.c b/tools/perf/util/symbol.c
index e2ba8858f3e1..3e2e5ea0f03f 100644
--- a/tools/perf/util/symbol.c
+++ b/tools/perf/util/symbol.c
@@ -323,6 +323,7 @@ struct dso *dso__new(const char *name)
323 dso->sorted_by_name = 0; 323 dso->sorted_by_name = 0;
324 dso->has_build_id = 0; 324 dso->has_build_id = 0;
325 dso->kernel = DSO_TYPE_USER; 325 dso->kernel = DSO_TYPE_USER;
326 dso->needs_swap = DSO_SWAP__UNSET;
326 INIT_LIST_HEAD(&dso->node); 327 INIT_LIST_HEAD(&dso->node);
327 } 328 }
328 329
@@ -1156,6 +1157,33 @@ static size_t elf_addr_to_index(Elf *elf, GElf_Addr addr)
1156 return -1; 1157 return -1;
1157} 1158}
1158 1159
1160static int dso__swap_init(struct dso *dso, unsigned char eidata)
1161{
1162 static unsigned int const endian = 1;
1163
1164 dso->needs_swap = DSO_SWAP__NO;
1165
1166 switch (eidata) {
1167 case ELFDATA2LSB:
1168 /* We are big endian, DSO is little endian. */
1169 if (*(unsigned char const *)&endian != 1)
1170 dso->needs_swap = DSO_SWAP__YES;
1171 break;
1172
1173 case ELFDATA2MSB:
1174 /* We are little endian, DSO is big endian. */
1175 if (*(unsigned char const *)&endian != 0)
1176 dso->needs_swap = DSO_SWAP__YES;
1177 break;
1178
1179 default:
1180 pr_err("unrecognized DSO data encoding %d\n", eidata);
1181 return -EINVAL;
1182 }
1183
1184 return 0;
1185}
1186
1159static int dso__load_sym(struct dso *dso, struct map *map, const char *name, 1187static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
1160 int fd, symbol_filter_t filter, int kmodule, 1188 int fd, symbol_filter_t filter, int kmodule,
1161 int want_symtab) 1189 int want_symtab)
@@ -1187,6 +1215,9 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
1187 goto out_elf_end; 1215 goto out_elf_end;
1188 } 1216 }
1189 1217
1218 if (dso__swap_init(dso, ehdr.e_ident[EI_DATA]))
1219 goto out_elf_end;
1220
1190 /* Always reject images with a mismatched build-id: */ 1221 /* Always reject images with a mismatched build-id: */
1191 if (dso->has_build_id) { 1222 if (dso->has_build_id) {
1192 u8 build_id[BUILD_ID_SIZE]; 1223 u8 build_id[BUILD_ID_SIZE];
@@ -1272,7 +1303,7 @@ static int dso__load_sym(struct dso *dso, struct map *map, const char *name,
1272 if (opdsec && sym.st_shndx == opdidx) { 1303 if (opdsec && sym.st_shndx == opdidx) {
1273 u32 offset = sym.st_value - opdshdr.sh_addr; 1304 u32 offset = sym.st_value - opdshdr.sh_addr;
1274 u64 *opd = opddata->d_buf + offset; 1305 u64 *opd = opddata->d_buf + offset;
1275 sym.st_value = *opd; 1306 sym.st_value = DSO__SWAP(dso, u64, *opd);
1276 sym.st_shndx = elf_addr_to_index(elf, sym.st_value); 1307 sym.st_shndx = elf_addr_to_index(elf, sym.st_value);
1277 } 1308 }
1278 1309
@@ -2786,8 +2817,11 @@ int machine__load_vmlinux_path(struct machine *machine, enum map_type type,
2786 2817
2787struct map *dso__new_map(const char *name) 2818struct map *dso__new_map(const char *name)
2788{ 2819{
2820 struct map *map = NULL;
2789 struct dso *dso = dso__new(name); 2821 struct dso *dso = dso__new(name);
2790 struct map *map = map__new2(0, dso, MAP__FUNCTION); 2822
2823 if (dso)
2824 map = map__new2(0, dso, MAP__FUNCTION);
2791 2825
2792 return map; 2826 return map;
2793} 2827}
diff --git a/tools/perf/util/symbol.h b/tools/perf/util/symbol.h
index 5649d63798cb..af0752b1aca1 100644
--- a/tools/perf/util/symbol.h
+++ b/tools/perf/util/symbol.h
@@ -9,6 +9,7 @@
9#include <linux/list.h> 9#include <linux/list.h>
10#include <linux/rbtree.h> 10#include <linux/rbtree.h>
11#include <stdio.h> 11#include <stdio.h>
12#include <byteswap.h>
12 13
13#ifdef HAVE_CPLUS_DEMANGLE 14#ifdef HAVE_CPLUS_DEMANGLE
14extern char *cplus_demangle(const char *, int); 15extern char *cplus_demangle(const char *, int);
@@ -160,11 +161,18 @@ enum dso_kernel_type {
160 DSO_TYPE_GUEST_KERNEL 161 DSO_TYPE_GUEST_KERNEL
161}; 162};
162 163
164enum dso_swap_type {
165 DSO_SWAP__UNSET,
166 DSO_SWAP__NO,
167 DSO_SWAP__YES,
168};
169
163struct dso { 170struct dso {
164 struct list_head node; 171 struct list_head node;
165 struct rb_root symbols[MAP__NR_TYPES]; 172 struct rb_root symbols[MAP__NR_TYPES];
166 struct rb_root symbol_names[MAP__NR_TYPES]; 173 struct rb_root symbol_names[MAP__NR_TYPES];
167 enum dso_kernel_type kernel; 174 enum dso_kernel_type kernel;
175 enum dso_swap_type needs_swap;
168 u8 adjust_symbols:1; 176 u8 adjust_symbols:1;
169 u8 has_build_id:1; 177 u8 has_build_id:1;
170 u8 hit:1; 178 u8 hit:1;
@@ -182,6 +190,28 @@ struct dso {
182 char name[0]; 190 char name[0];
183}; 191};
184 192
193#define DSO__SWAP(dso, type, val) \
194({ \
195 type ____r = val; \
196 BUG_ON(dso->needs_swap == DSO_SWAP__UNSET); \
197 if (dso->needs_swap == DSO_SWAP__YES) { \
198 switch (sizeof(____r)) { \
199 case 2: \
200 ____r = bswap_16(val); \
201 break; \
202 case 4: \
203 ____r = bswap_32(val); \
204 break; \
205 case 8: \
206 ____r = bswap_64(val); \
207 break; \
208 default: \
209 BUG_ON(1); \
210 } \
211 } \
212 ____r; \
213})
214
185struct dso *dso__new(const char *name); 215struct dso *dso__new(const char *name);
186void dso__delete(struct dso *dso); 216void dso__delete(struct dso *dso);
187 217
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index ab2f682fd44c..16de7ad4850f 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -73,8 +73,8 @@ int backwards_count;
73char *progname; 73char *progname;
74 74
75int num_cpus; 75int num_cpus;
76cpu_set_t *cpu_mask; 76cpu_set_t *cpu_present_set, *cpu_mask;
77size_t cpu_mask_size; 77size_t cpu_present_setsize, cpu_mask_size;
78 78
79struct counters { 79struct counters {
80 unsigned long long tsc; /* per thread */ 80 unsigned long long tsc; /* per thread */
@@ -103,6 +103,12 @@ struct timeval tv_even;
103struct timeval tv_odd; 103struct timeval tv_odd;
104struct timeval tv_delta; 104struct timeval tv_delta;
105 105
106int mark_cpu_present(int pkg, int core, int cpu)
107{
108 CPU_SET_S(cpu, cpu_present_setsize, cpu_present_set);
109 return 0;
110}
111
106/* 112/*
107 * cpu_mask_init(ncpus) 113 * cpu_mask_init(ncpus)
108 * 114 *
@@ -118,6 +124,18 @@ void cpu_mask_init(int ncpus)
118 } 124 }
119 cpu_mask_size = CPU_ALLOC_SIZE(ncpus); 125 cpu_mask_size = CPU_ALLOC_SIZE(ncpus);
120 CPU_ZERO_S(cpu_mask_size, cpu_mask); 126 CPU_ZERO_S(cpu_mask_size, cpu_mask);
127
128 /*
129 * Allocate and initialize cpu_present_set
130 */
131 cpu_present_set = CPU_ALLOC(ncpus);
132 if (cpu_present_set == NULL) {
133 perror("CPU_ALLOC");
134 exit(3);
135 }
136 cpu_present_setsize = CPU_ALLOC_SIZE(ncpus);
137 CPU_ZERO_S(cpu_present_setsize, cpu_present_set);
138 for_all_cpus(mark_cpu_present);
121} 139}
122 140
123void cpu_mask_uninit() 141void cpu_mask_uninit()
@@ -125,6 +143,9 @@ void cpu_mask_uninit()
125 CPU_FREE(cpu_mask); 143 CPU_FREE(cpu_mask);
126 cpu_mask = NULL; 144 cpu_mask = NULL;
127 cpu_mask_size = 0; 145 cpu_mask_size = 0;
146 CPU_FREE(cpu_present_set);
147 cpu_present_set = NULL;
148 cpu_present_setsize = 0;
128} 149}
129 150
130int cpu_migrate(int cpu) 151int cpu_migrate(int cpu)
@@ -912,6 +933,8 @@ int is_snb(unsigned int family, unsigned int model)
912 switch (model) { 933 switch (model) {
913 case 0x2A: 934 case 0x2A:
914 case 0x2D: 935 case 0x2D:
936 case 0x3A: /* IVB */
937 case 0x3D: /* IVB Xeon */
915 return 1; 938 return 1;
916 } 939 }
917 return 0; 940 return 0;
@@ -1047,6 +1070,9 @@ int fork_it(char **argv)
1047 int retval; 1070 int retval;
1048 pid_t child_pid; 1071 pid_t child_pid;
1049 get_counters(cnt_even); 1072 get_counters(cnt_even);
1073
1074 /* clear affinity side-effect of get_counters() */
1075 sched_setaffinity(0, cpu_present_setsize, cpu_present_set);
1050 gettimeofday(&tv_even, (struct timezone *)NULL); 1076 gettimeofday(&tv_even, (struct timezone *)NULL);
1051 1077
1052 child_pid = fork(); 1078 child_pid = fork();
diff --git a/tools/testing/selftests/Makefile b/tools/testing/selftests/Makefile
index 28bc57ee757c..a4162e15c25f 100644
--- a/tools/testing/selftests/Makefile
+++ b/tools/testing/selftests/Makefile
@@ -1,4 +1,4 @@
1TARGETS = breakpoints vm 1TARGETS = breakpoints kcmp mqueue vm
2 2
3all: 3all:
4 for TARGET in $(TARGETS); do \ 4 for TARGET in $(TARGETS); do \
diff --git a/tools/testing/selftests/kcmp/Makefile b/tools/testing/selftests/kcmp/Makefile
new file mode 100644
index 000000000000..dc79b86ea65c
--- /dev/null
+++ b/tools/testing/selftests/kcmp/Makefile
@@ -0,0 +1,29 @@
1uname_M := $(shell uname -m 2>/dev/null || echo not)
2ARCH ?= $(shell echo $(uname_M) | sed -e s/i.86/i386/)
3ifeq ($(ARCH),i386)
4 ARCH := X86
5 CFLAGS := -DCONFIG_X86_32 -D__i386__
6endif
7ifeq ($(ARCH),x86_64)
8 ARCH := X86
9 CFLAGS := -DCONFIG_X86_64 -D__x86_64__
10endif
11
12CFLAGS += -I../../../../arch/x86/include/generated/
13CFLAGS += -I../../../../include/
14CFLAGS += -I../../../../usr/include/
15CFLAGS += -I../../../../arch/x86/include/
16
17all:
18ifeq ($(ARCH),X86)
19 gcc $(CFLAGS) kcmp_test.c -o run_test
20else
21 echo "Not an x86 target, can't build kcmp selftest"
22endif
23
24run-tests: all
25 ./kcmp_test
26
27clean:
28 rm -fr ./run_test
29 rm -fr ./test-file
diff --git a/tools/testing/selftests/kcmp/kcmp_test.c b/tools/testing/selftests/kcmp/kcmp_test.c
new file mode 100644
index 000000000000..358cc6bfa35d
--- /dev/null
+++ b/tools/testing/selftests/kcmp/kcmp_test.c
@@ -0,0 +1,94 @@
1#define _GNU_SOURCE
2
3#include <stdio.h>
4#include <stdlib.h>
5#include <signal.h>
6#include <limits.h>
7#include <unistd.h>
8#include <errno.h>
9#include <string.h>
10#include <fcntl.h>
11
12#include <linux/unistd.h>
13#include <linux/kcmp.h>
14
15#include <sys/syscall.h>
16#include <sys/types.h>
17#include <sys/stat.h>
18#include <sys/wait.h>
19
20static long sys_kcmp(int pid1, int pid2, int type, int fd1, int fd2)
21{
22 return syscall(__NR_kcmp, pid1, pid2, type, fd1, fd2);
23}
24
25int main(int argc, char **argv)
26{
27 const char kpath[] = "kcmp-test-file";
28 int pid1, pid2;
29 int fd1, fd2;
30 int status;
31
32 fd1 = open(kpath, O_RDWR | O_CREAT | O_TRUNC, 0644);
33 pid1 = getpid();
34
35 if (fd1 < 0) {
36 perror("Can't create file");
37 exit(1);
38 }
39
40 pid2 = fork();
41 if (pid2 < 0) {
42 perror("fork failed");
43 exit(1);
44 }
45
46 if (!pid2) {
47 int pid2 = getpid();
48 int ret;
49
50 fd2 = open(kpath, O_RDWR, 0644);
51 if (fd2 < 0) {
52 perror("Can't open file");
53 exit(1);
54 }
55
56 /* An example of output and arguments */
57 printf("pid1: %6d pid2: %6d FD: %2ld FILES: %2ld VM: %2ld "
58 "FS: %2ld SIGHAND: %2ld IO: %2ld SYSVSEM: %2ld "
59 "INV: %2ld\n",
60 pid1, pid2,
61 sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd2),
62 sys_kcmp(pid1, pid2, KCMP_FILES, 0, 0),
63 sys_kcmp(pid1, pid2, KCMP_VM, 0, 0),
64 sys_kcmp(pid1, pid2, KCMP_FS, 0, 0),
65 sys_kcmp(pid1, pid2, KCMP_SIGHAND, 0, 0),
66 sys_kcmp(pid1, pid2, KCMP_IO, 0, 0),
67 sys_kcmp(pid1, pid2, KCMP_SYSVSEM, 0, 0),
68
69 /* This one should fail */
70 sys_kcmp(pid1, pid2, KCMP_TYPES + 1, 0, 0));
71
72 /* This one should return same fd */
73 ret = sys_kcmp(pid1, pid2, KCMP_FILE, fd1, fd1);
74 if (ret) {
75 printf("FAIL: 0 expected but %d returned\n", ret);
76 ret = -1;
77 } else
78 printf("PASS: 0 returned as expected\n");
79
80 /* Compare with self */
81 ret = sys_kcmp(pid1, pid1, KCMP_VM, 0, 0);
82 if (ret) {
83 printf("FAIL: 0 expected but %li returned\n", ret);
84 ret = -1;
85 } else
86 printf("PASS: 0 returned as expected\n");
87
88 exit(ret);
89 }
90
91 waitpid(pid2, &status, P_ALL);
92
93 return 0;
94}
diff --git a/tools/testing/selftests/mqueue/.gitignore b/tools/testing/selftests/mqueue/.gitignore
new file mode 100644
index 000000000000..d8d42377205a
--- /dev/null
+++ b/tools/testing/selftests/mqueue/.gitignore
@@ -0,0 +1,2 @@
1mq_open_tests
2mq_perf_tests
diff --git a/tools/testing/selftests/mqueue/Makefile b/tools/testing/selftests/mqueue/Makefile
new file mode 100644
index 000000000000..54c0aad2b47c
--- /dev/null
+++ b/tools/testing/selftests/mqueue/Makefile
@@ -0,0 +1,10 @@
1all:
2 gcc -O2 -lrt mq_open_tests.c -o mq_open_tests
3 gcc -O2 -lrt -lpthread -lpopt -o mq_perf_tests mq_perf_tests.c
4
5run_tests:
6 ./mq_open_tests /test1
7 ./mq_perf_tests
8
9clean:
10 rm -f mq_open_tests mq_perf_tests
diff --git a/tools/testing/selftests/mqueue/mq_open_tests.c b/tools/testing/selftests/mqueue/mq_open_tests.c
new file mode 100644
index 000000000000..711cc2923047
--- /dev/null
+++ b/tools/testing/selftests/mqueue/mq_open_tests.c
@@ -0,0 +1,492 @@
1/*
2 * This application is Copyright 2012 Red Hat, Inc.
3 * Doug Ledford <dledford@redhat.com>
4 *
5 * mq_open_tests is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, version 3.
8 *
9 * mq_open_tests is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * For the full text of the license, see <http://www.gnu.org/licenses/>.
15 *
16 * mq_open_tests.c
17 * Tests the various situations that should either succeed or fail to
18 * open a posix message queue and then reports whether or not they
19 * did as they were supposed to.
20 *
21 */
22#include <stdio.h>
23#include <stdlib.h>
24#include <unistd.h>
25#include <fcntl.h>
26#include <string.h>
27#include <limits.h>
28#include <errno.h>
29#include <sys/types.h>
30#include <sys/time.h>
31#include <sys/resource.h>
32#include <sys/stat.h>
33#include <mqueue.h>
34
35static char *usage =
36"Usage:\n"
37" %s path\n"
38"\n"
39" path Path name of the message queue to create\n"
40"\n"
41" Note: this program must be run as root in order to enable all tests\n"
42"\n";
43
44char *DEF_MSGS = "/proc/sys/fs/mqueue/msg_default";
45char *DEF_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_default";
46char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
47char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
48
49int default_settings;
50struct rlimit saved_limits, cur_limits;
51int saved_def_msgs, saved_def_msgsize, saved_max_msgs, saved_max_msgsize;
52int cur_def_msgs, cur_def_msgsize, cur_max_msgs, cur_max_msgsize;
53FILE *def_msgs, *def_msgsize, *max_msgs, *max_msgsize;
54char *queue_path;
55mqd_t queue = -1;
56
57static inline void __set(FILE *stream, int value, char *err_msg);
58void shutdown(int exit_val, char *err_cause, int line_no);
59static inline int get(FILE *stream);
60static inline void set(FILE *stream, int value);
61static inline void getr(int type, struct rlimit *rlim);
62static inline void setr(int type, struct rlimit *rlim);
63void validate_current_settings();
64static inline void test_queue(struct mq_attr *attr, struct mq_attr *result);
65static inline int test_queue_fail(struct mq_attr *attr, struct mq_attr *result);
66
67static inline void __set(FILE *stream, int value, char *err_msg)
68{
69 rewind(stream);
70 if (fprintf(stream, "%d", value) < 0)
71 perror(err_msg);
72}
73
74
75void shutdown(int exit_val, char *err_cause, int line_no)
76{
77 static int in_shutdown = 0;
78
79 /* In case we get called recursively by a set() call below */
80 if (in_shutdown++)
81 return;
82
83 seteuid(0);
84
85 if (queue != -1)
86 if (mq_close(queue))
87 perror("mq_close() during shutdown");
88 if (queue_path)
89 /*
90 * Be silent if this fails, if we cleaned up already it's
91 * expected to fail
92 */
93 mq_unlink(queue_path);
94 if (default_settings) {
95 if (saved_def_msgs)
96 __set(def_msgs, saved_def_msgs,
97 "failed to restore saved_def_msgs");
98 if (saved_def_msgsize)
99 __set(def_msgsize, saved_def_msgsize,
100 "failed to restore saved_def_msgsize");
101 }
102 if (saved_max_msgs)
103 __set(max_msgs, saved_max_msgs,
104 "failed to restore saved_max_msgs");
105 if (saved_max_msgsize)
106 __set(max_msgsize, saved_max_msgsize,
107 "failed to restore saved_max_msgsize");
108 if (exit_val)
109 error(exit_val, errno, "%s at %d", err_cause, line_no);
110 exit(0);
111}
112
113static inline int get(FILE *stream)
114{
115 int value;
116 rewind(stream);
117 if (fscanf(stream, "%d", &value) != 1)
118 shutdown(4, "Error reading /proc entry", __LINE__ - 1);
119 return value;
120}
121
122static inline void set(FILE *stream, int value)
123{
124 int new_value;
125
126 rewind(stream);
127 if (fprintf(stream, "%d", value) < 0)
128 return shutdown(5, "Failed writing to /proc file",
129 __LINE__ - 1);
130 new_value = get(stream);
131 if (new_value != value)
132 return shutdown(5, "We didn't get what we wrote to /proc back",
133 __LINE__ - 1);
134}
135
136static inline void getr(int type, struct rlimit *rlim)
137{
138 if (getrlimit(type, rlim))
139 shutdown(6, "getrlimit()", __LINE__ - 1);
140}
141
142static inline void setr(int type, struct rlimit *rlim)
143{
144 if (setrlimit(type, rlim))
145 shutdown(7, "setrlimit()", __LINE__ - 1);
146}
147
148void validate_current_settings()
149{
150 int rlim_needed;
151
152 if (cur_limits.rlim_cur < 4096) {
153 printf("Current rlimit value for POSIX message queue bytes is "
154 "unreasonably low,\nincreasing.\n\n");
155 cur_limits.rlim_cur = 8192;
156 cur_limits.rlim_max = 16384;
157 setr(RLIMIT_MSGQUEUE, &cur_limits);
158 }
159
160 if (default_settings) {
161 rlim_needed = (cur_def_msgs + 1) * (cur_def_msgsize + 1 +
162 2 * sizeof(void *));
163 if (rlim_needed > cur_limits.rlim_cur) {
164 printf("Temporarily lowering default queue parameters "
165 "to something that will work\n"
166 "with the current rlimit values.\n\n");
167 set(def_msgs, 10);
168 cur_def_msgs = 10;
169 set(def_msgsize, 128);
170 cur_def_msgsize = 128;
171 }
172 } else {
173 rlim_needed = (cur_max_msgs + 1) * (cur_max_msgsize + 1 +
174 2 * sizeof(void *));
175 if (rlim_needed > cur_limits.rlim_cur) {
176 printf("Temporarily lowering maximum queue parameters "
177 "to something that will work\n"
178 "with the current rlimit values in case this is "
179 "a kernel that ties the default\n"
180 "queue parameters to the maximum queue "
181 "parameters.\n\n");
182 set(max_msgs, 10);
183 cur_max_msgs = 10;
184 set(max_msgsize, 128);
185 cur_max_msgsize = 128;
186 }
187 }
188}
189
190/*
191 * test_queue - Test opening a queue, shutdown if we fail. This should
192 * only be called in situations that should never fail. We clean up
193 * after ourselves and return the queue attributes in *result.
194 */
195static inline void test_queue(struct mq_attr *attr, struct mq_attr *result)
196{
197 int flags = O_RDWR | O_EXCL | O_CREAT;
198 int perms = DEFFILEMODE;
199
200 if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
201 shutdown(1, "mq_open()", __LINE__);
202 if (mq_getattr(queue, result))
203 shutdown(1, "mq_getattr()", __LINE__);
204 if (mq_close(queue))
205 shutdown(1, "mq_close()", __LINE__);
206 queue = -1;
207 if (mq_unlink(queue_path))
208 shutdown(1, "mq_unlink()", __LINE__);
209}
210
211/*
212 * Same as test_queue above, but failure is not fatal.
213 * Returns:
214 * 0 - Failed to create a queue
215 * 1 - Created a queue, attributes in *result
216 */
217static inline int test_queue_fail(struct mq_attr *attr, struct mq_attr *result)
218{
219 int flags = O_RDWR | O_EXCL | O_CREAT;
220 int perms = DEFFILEMODE;
221
222 if ((queue = mq_open(queue_path, flags, perms, attr)) == -1)
223 return 0;
224 if (mq_getattr(queue, result))
225 shutdown(1, "mq_getattr()", __LINE__);
226 if (mq_close(queue))
227 shutdown(1, "mq_close()", __LINE__);
228 queue = -1;
229 if (mq_unlink(queue_path))
230 shutdown(1, "mq_unlink()", __LINE__);
231 return 1;
232}
233
234int main(int argc, char *argv[])
235{
236 struct mq_attr attr, result;
237
238 if (argc != 2) {
239 fprintf(stderr, "Must pass a valid queue name\n\n");
240 fprintf(stderr, usage, argv[0]);
241 exit(1);
242 }
243
244 /*
245 * Although we can create a msg queue with a non-absolute path name,
246 * unlink will fail. So, if the name doesn't start with a /, add one
247 * when we save it.
248 */
249 if (*argv[1] == '/')
250 queue_path = strdup(argv[1]);
251 else {
252 queue_path = malloc(strlen(argv[1]) + 2);
253 if (!queue_path) {
254 perror("malloc()");
255 exit(1);
256 }
257 queue_path[0] = '/';
258 queue_path[1] = 0;
259 strcat(queue_path, argv[1]);
260 }
261
262 if (getuid() != 0) {
263 fprintf(stderr, "Not running as root, but almost all tests "
264 "require root in order to modify\nsystem settings. "
265 "Exiting.\n");
266 exit(1);
267 }
268
269 /* Find out what files there are for us to make tweaks in */
270 def_msgs = fopen(DEF_MSGS, "r+");
271 def_msgsize = fopen(DEF_MSGSIZE, "r+");
272 max_msgs = fopen(MAX_MSGS, "r+");
273 max_msgsize = fopen(MAX_MSGSIZE, "r+");
274
275 if (!max_msgs)
276 shutdown(2, "Failed to open msg_max", __LINE__);
277 if (!max_msgsize)
278 shutdown(2, "Failed to open msgsize_max", __LINE__);
279 if (def_msgs || def_msgsize)
280 default_settings = 1;
281
282 /* Load up the current system values for everything we can */
283 getr(RLIMIT_MSGQUEUE, &saved_limits);
284 cur_limits = saved_limits;
285 if (default_settings) {
286 saved_def_msgs = cur_def_msgs = get(def_msgs);
287 saved_def_msgsize = cur_def_msgsize = get(def_msgsize);
288 }
289 saved_max_msgs = cur_max_msgs = get(max_msgs);
290 saved_max_msgsize = cur_max_msgsize = get(max_msgsize);
291
292 /* Tell the user our initial state */
293 printf("\nInitial system state:\n");
294 printf("\tUsing queue path:\t\t%s\n", queue_path);
295 printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", saved_limits.rlim_cur);
296 printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", saved_limits.rlim_max);
297 printf("\tMaximum Message Size:\t\t%d\n", saved_max_msgsize);
298 printf("\tMaximum Queue Size:\t\t%d\n", saved_max_msgs);
299 if (default_settings) {
300 printf("\tDefault Message Size:\t\t%d\n", saved_def_msgsize);
301 printf("\tDefault Queue Size:\t\t%d\n", saved_def_msgs);
302 } else {
303 printf("\tDefault Message Size:\t\tNot Supported\n");
304 printf("\tDefault Queue Size:\t\tNot Supported\n");
305 }
306 printf("\n");
307
308 validate_current_settings();
309
310 printf("Adjusted system state for testing:\n");
311 printf("\tRLIMIT_MSGQUEUE(soft):\t\t%d\n", cur_limits.rlim_cur);
312 printf("\tRLIMIT_MSGQUEUE(hard):\t\t%d\n", cur_limits.rlim_max);
313 printf("\tMaximum Message Size:\t\t%d\n", cur_max_msgsize);
314 printf("\tMaximum Queue Size:\t\t%d\n", cur_max_msgs);
315 if (default_settings) {
316 printf("\tDefault Message Size:\t\t%d\n", cur_def_msgsize);
317 printf("\tDefault Queue Size:\t\t%d\n", cur_def_msgs);
318 }
319
320 printf("\n\nTest series 1, behavior when no attr struct "
321 "passed to mq_open:\n");
322 if (!default_settings) {
323 test_queue(NULL, &result);
324 printf("Given sane system settings, mq_open without an attr "
325 "struct succeeds:\tPASS\n");
326 if (result.mq_maxmsg != cur_max_msgs ||
327 result.mq_msgsize != cur_max_msgsize) {
328 printf("Kernel does not support setting the default "
329 "mq attributes,\nbut also doesn't tie the "
330 "defaults to the maximums:\t\t\tPASS\n");
331 } else {
332 set(max_msgs, ++cur_max_msgs);
333 set(max_msgsize, ++cur_max_msgsize);
334 test_queue(NULL, &result);
335 if (result.mq_maxmsg == cur_max_msgs &&
336 result.mq_msgsize == cur_max_msgsize)
337 printf("Kernel does not support setting the "
338 "default mq attributes and\n"
339 "also ties system wide defaults to "
340 "the system wide maximums:\t\t"
341 "FAIL\n");
342 else
343 printf("Kernel does not support setting the "
344 "default mq attributes,\n"
345 "but also doesn't tie the defaults to "
346 "the maximums:\t\t\tPASS\n");
347 }
348 } else {
349 printf("Kernel supports setting defaults separately from "
350 "maximums:\t\tPASS\n");
351 /*
352 * While we are here, go ahead and test that the kernel
353 * properly follows the default settings
354 */
355 test_queue(NULL, &result);
356 printf("Given sane values, mq_open without an attr struct "
357 "succeeds:\t\tPASS\n");
358 if (result.mq_maxmsg != cur_def_msgs ||
359 result.mq_msgsize != cur_def_msgsize)
360 printf("Kernel supports setting defaults, but does "
361 "not actually honor them:\tFAIL\n\n");
362 else {
363 set(def_msgs, ++cur_def_msgs);
364 set(def_msgsize, ++cur_def_msgsize);
365 /* In case max was the same as the default */
366 set(max_msgs, ++cur_max_msgs);
367 set(max_msgsize, ++cur_max_msgsize);
368 test_queue(NULL, &result);
369 if (result.mq_maxmsg != cur_def_msgs ||
370 result.mq_msgsize != cur_def_msgsize)
371 printf("Kernel supports setting defaults, but "
372 "does not actually honor them:\t"
373 "FAIL\n");
374 else
375 printf("Kernel properly honors default setting "
376 "knobs:\t\t\t\tPASS\n");
377 }
378 set(def_msgs, cur_max_msgs + 1);
379 cur_def_msgs = cur_max_msgs + 1;
380 set(def_msgsize, cur_max_msgsize + 1);
381 cur_def_msgsize = cur_max_msgsize + 1;
382 if (cur_def_msgs * (cur_def_msgsize + 2 * sizeof(void *)) >=
383 cur_limits.rlim_cur) {
384 cur_limits.rlim_cur = (cur_def_msgs + 2) *
385 (cur_def_msgsize + 2 * sizeof(void *));
386 cur_limits.rlim_max = 2 * cur_limits.rlim_cur;
387 setr(RLIMIT_MSGQUEUE, &cur_limits);
388 }
389 if (test_queue_fail(NULL, &result)) {
390 if (result.mq_maxmsg == cur_max_msgs &&
391 result.mq_msgsize == cur_max_msgsize)
392 printf("Kernel properly limits default values "
393 "to lesser of default/max:\t\tPASS\n");
394 else
395 printf("Kernel does not properly set default "
396 "queue parameters when\ndefaults > "
397 "max:\t\t\t\t\t\t\t\tFAIL\n");
398 } else
399 printf("Kernel fails to open mq because defaults are "
400 "greater than maximums:\tFAIL\n");
401 set(def_msgs, --cur_def_msgs);
402 set(def_msgsize, --cur_def_msgsize);
403 cur_limits.rlim_cur = cur_limits.rlim_max = cur_def_msgs *
404 cur_def_msgsize;
405 setr(RLIMIT_MSGQUEUE, &cur_limits);
406 if (test_queue_fail(NULL, &result))
407 printf("Kernel creates queue even though defaults "
408 "would exceed\nrlimit setting:"
409 "\t\t\t\t\t\t\t\tFAIL\n");
410 else
411 printf("Kernel properly fails to create queue when "
412 "defaults would\nexceed rlimit:"
413 "\t\t\t\t\t\t\t\tPASS\n");
414 }
415
416 /*
417 * Test #2 - open with an attr struct that exceeds rlimit
418 */
419 printf("\n\nTest series 2, behavior when attr struct is "
420 "passed to mq_open:\n");
421 cur_max_msgs = 32;
422 cur_max_msgsize = cur_limits.rlim_max >> 4;
423 set(max_msgs, cur_max_msgs);
424 set(max_msgsize, cur_max_msgsize);
425 attr.mq_maxmsg = cur_max_msgs;
426 attr.mq_msgsize = cur_max_msgsize;
427 if (test_queue_fail(&attr, &result))
428 printf("Queue open in excess of rlimit max when euid = 0 "
429 "succeeded:\t\tFAIL\n");
430 else
431 printf("Queue open in excess of rlimit max when euid = 0 "
432 "failed:\t\tPASS\n");
433 attr.mq_maxmsg = cur_max_msgs + 1;
434 attr.mq_msgsize = 10;
435 if (test_queue_fail(&attr, &result))
436 printf("Queue open with mq_maxmsg > limit when euid = 0 "
437 "succeeded:\t\tPASS\n");
438 else
439 printf("Queue open with mq_maxmsg > limit when euid = 0 "
440 "failed:\t\tFAIL\n");
441 attr.mq_maxmsg = 1;
442 attr.mq_msgsize = cur_max_msgsize + 1;
443 if (test_queue_fail(&attr, &result))
444 printf("Queue open with mq_msgsize > limit when euid = 0 "
445 "succeeded:\t\tPASS\n");
446 else
447 printf("Queue open with mq_msgsize > limit when euid = 0 "
448 "failed:\t\tFAIL\n");
449 attr.mq_maxmsg = 65536;
450 attr.mq_msgsize = 65536;
451 if (test_queue_fail(&attr, &result))
452 printf("Queue open with total size > 2GB when euid = 0 "
453 "succeeded:\t\tFAIL\n");
454 else
455 printf("Queue open with total size > 2GB when euid = 0 "
456 "failed:\t\t\tPASS\n");
457 seteuid(99);
458 attr.mq_maxmsg = cur_max_msgs;
459 attr.mq_msgsize = cur_max_msgsize;
460 if (test_queue_fail(&attr, &result))
461 printf("Queue open in excess of rlimit max when euid = 99 "
462 "succeeded:\t\tFAIL\n");
463 else
464 printf("Queue open in excess of rlimit max when euid = 99 "
465 "failed:\t\tPASS\n");
466 attr.mq_maxmsg = cur_max_msgs + 1;
467 attr.mq_msgsize = 10;
468 if (test_queue_fail(&attr, &result))
469 printf("Queue open with mq_maxmsg > limit when euid = 99 "
470 "succeeded:\t\tFAIL\n");
471 else
472 printf("Queue open with mq_maxmsg > limit when euid = 99 "
473 "failed:\t\tPASS\n");
474 attr.mq_maxmsg = 1;
475 attr.mq_msgsize = cur_max_msgsize + 1;
476 if (test_queue_fail(&attr, &result))
477 printf("Queue open with mq_msgsize > limit when euid = 99 "
478 "succeeded:\t\tFAIL\n");
479 else
480 printf("Queue open with mq_msgsize > limit when euid = 99 "
481 "failed:\t\tPASS\n");
482 attr.mq_maxmsg = 65536;
483 attr.mq_msgsize = 65536;
484 if (test_queue_fail(&attr, &result))
485 printf("Queue open with total size > 2GB when euid = 99 "
486 "succeeded:\t\tFAIL\n");
487 else
488 printf("Queue open with total size > 2GB when euid = 99 "
489 "failed:\t\t\tPASS\n");
490
491 shutdown(0,"",0);
492}
diff --git a/tools/testing/selftests/mqueue/mq_perf_tests.c b/tools/testing/selftests/mqueue/mq_perf_tests.c
new file mode 100644
index 000000000000..2fadd4b97045
--- /dev/null
+++ b/tools/testing/selftests/mqueue/mq_perf_tests.c
@@ -0,0 +1,741 @@
1/*
2 * This application is Copyright 2012 Red Hat, Inc.
3 * Doug Ledford <dledford@redhat.com>
4 *
5 * mq_perf_tests is free software: you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation, version 3.
8 *
9 * mq_perf_tests is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * For the full text of the license, see <http://www.gnu.org/licenses/>.
15 *
16 * mq_perf_tests.c
17 * Tests various types of message queue workloads, concentrating on those
18 * situations that invole large message sizes, large message queue depths,
19 * or both, and reports back useful metrics about kernel message queue
20 * performance.
21 *
22 */
23#define _GNU_SOURCE
24#include <stdio.h>
25#include <stdlib.h>
26#include <unistd.h>
27#include <fcntl.h>
28#include <string.h>
29#include <limits.h>
30#include <errno.h>
31#include <signal.h>
32#include <pthread.h>
33#include <sched.h>
34#include <sys/types.h>
35#include <sys/time.h>
36#include <sys/resource.h>
37#include <sys/stat.h>
38#include <mqueue.h>
39#include <popt.h>
40
41static char *usage =
42"Usage:\n"
43" %s [-c #[,#..] -f] path\n"
44"\n"
45" -c # Skip most tests and go straight to a high queue depth test\n"
46" and then run that test continuously (useful for running at\n"
47" the same time as some other workload to see how much the\n"
48" cache thrashing caused by adding messages to a very deep\n"
49" queue impacts the performance of other programs). The number\n"
50" indicates which CPU core we should bind the process to during\n"
51" the run. If you have more than one physical CPU, then you\n"
52" will need one copy per physical CPU package, and you should\n"
53" specify the CPU cores to pin ourself to via a comma separated\n"
54" list of CPU values.\n"
55" -f Only usable with continuous mode. Pin ourself to the CPUs\n"
56" as requested, then instead of looping doing a high mq\n"
57" workload, just busy loop. This will allow us to lock up a\n"
58" single CPU just like we normally would, but without actually\n"
59" thrashing the CPU cache. This is to make it easier to get\n"
60" comparable numbers from some other workload running on the\n"
61" other CPUs. One set of numbers with # CPUs locked up running\n"
62" an mq workload, and another set of numbers with those same\n"
63" CPUs locked away from the test workload, but not doing\n"
64" anything to trash the cache like the mq workload might.\n"
65" path Path name of the message queue to create\n"
66"\n"
67" Note: this program must be run as root in order to enable all tests\n"
68"\n";
69
70char *MAX_MSGS = "/proc/sys/fs/mqueue/msg_max";
71char *MAX_MSGSIZE = "/proc/sys/fs/mqueue/msgsize_max";
72
73#define min(a, b) ((a) < (b) ? (a) : (b))
74#define MAX_CPUS 64
75char *cpu_option_string;
76int cpus_to_pin[MAX_CPUS];
77int num_cpus_to_pin;
78pthread_t cpu_threads[MAX_CPUS];
79pthread_t main_thread;
80cpu_set_t *cpu_set;
81int cpu_set_size;
82int cpus_online;
83
84#define MSG_SIZE 16
85#define TEST1_LOOPS 10000000
86#define TEST2_LOOPS 100000
87int continuous_mode;
88int continuous_mode_fake;
89
90struct rlimit saved_limits, cur_limits;
91int saved_max_msgs, saved_max_msgsize;
92int cur_max_msgs, cur_max_msgsize;
93FILE *max_msgs, *max_msgsize;
94int cur_nice;
95char *queue_path = "/mq_perf_tests";
96mqd_t queue = -1;
97struct mq_attr result;
98int mq_prio_max;
99
100const struct poptOption options[] = {
101 {
102 .longName = "continuous",
103 .shortName = 'c',
104 .argInfo = POPT_ARG_STRING,
105 .arg = &cpu_option_string,
106 .val = 'c',
107 .descrip = "Run continuous tests at a high queue depth in "
108 "order to test the effects of cache thrashing on "
109 "other tasks on the system. This test is intended "
110 "to be run on one core of each physical CPU while "
111 "some other CPU intensive task is run on all the other "
112 "cores of that same physical CPU and the other task "
113 "is timed. It is assumed that the process of adding "
114 "messages to the message queue in a tight loop will "
115 "impact that other task to some degree. Once the "
116 "tests are performed in this way, you should then "
117 "re-run the tests using fake mode in order to check "
118 "the difference in time required to perform the CPU "
119 "intensive task",
120 .argDescrip = "cpu[,cpu]",
121 },
122 {
123 .longName = "fake",
124 .shortName = 'f',
125 .argInfo = POPT_ARG_NONE,
126 .arg = &continuous_mode_fake,
127 .val = 0,
128 .descrip = "Tie up the CPUs that we would normally tie up in"
129 "continuous mode, but don't actually do any mq stuff, "
130 "just keep the CPU busy so it can't be used to process "
131 "system level tasks as this would free up resources on "
132 "the other CPU cores and skew the comparison between "
133 "the no-mqueue work and mqueue work tests",
134 .argDescrip = NULL,
135 },
136 {
137 .longName = "path",
138 .shortName = 'p',
139 .argInfo = POPT_ARG_STRING | POPT_ARGFLAG_SHOW_DEFAULT,
140 .arg = &queue_path,
141 .val = 'p',
142 .descrip = "The name of the path to use in the mqueue "
143 "filesystem for our tests",
144 .argDescrip = "pathname",
145 },
146 POPT_AUTOHELP
147 POPT_TABLEEND
148};
149
150static inline void __set(FILE *stream, int value, char *err_msg);
151void shutdown(int exit_val, char *err_cause, int line_no);
152void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context);
153void sig_action(int signum, siginfo_t *info, void *context);
154static inline int get(FILE *stream);
155static inline void set(FILE *stream, int value);
156static inline int try_set(FILE *stream, int value);
157static inline void getr(int type, struct rlimit *rlim);
158static inline void setr(int type, struct rlimit *rlim);
159static inline void open_queue(struct mq_attr *attr);
160void increase_limits(void);
161
162static inline void __set(FILE *stream, int value, char *err_msg)
163{
164 rewind(stream);
165 if (fprintf(stream, "%d", value) < 0)
166 perror(err_msg);
167}
168
169
170void shutdown(int exit_val, char *err_cause, int line_no)
171{
172 static int in_shutdown = 0;
173 int errno_at_shutdown = errno;
174 int i;
175
176 /* In case we get called by multiple threads or from an sighandler */
177 if (in_shutdown++)
178 return;
179
180 for (i = 0; i < num_cpus_to_pin; i++)
181 if (cpu_threads[i]) {
182 pthread_kill(cpu_threads[i], SIGUSR1);
183 pthread_join(cpu_threads[i], NULL);
184 }
185
186 if (queue != -1)
187 if (mq_close(queue))
188 perror("mq_close() during shutdown");
189 if (queue_path)
190 /*
191 * Be silent if this fails, if we cleaned up already it's
192 * expected to fail
193 */
194 mq_unlink(queue_path);
195 if (saved_max_msgs)
196 __set(max_msgs, saved_max_msgs,
197 "failed to restore saved_max_msgs");
198 if (saved_max_msgsize)
199 __set(max_msgsize, saved_max_msgsize,
200 "failed to restore saved_max_msgsize");
201 if (exit_val)
202 error(exit_val, errno_at_shutdown, "%s at %d",
203 err_cause, line_no);
204 exit(0);
205}
206
207void sig_action_SIGUSR1(int signum, siginfo_t *info, void *context)
208{
209 if (pthread_self() != main_thread)
210 pthread_exit(0);
211 else {
212 fprintf(stderr, "Caught signal %d in SIGUSR1 handler, "
213 "exiting\n", signum);
214 shutdown(0, "", 0);
215 fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n");
216 exit(0);
217 }
218}
219
220void sig_action(int signum, siginfo_t *info, void *context)
221{
222 if (pthread_self() != main_thread)
223 pthread_kill(main_thread, signum);
224 else {
225 fprintf(stderr, "Caught signal %d, exiting\n", signum);
226 shutdown(0, "", 0);
227 fprintf(stderr, "\n\nReturned from shutdown?!?!\n\n");
228 exit(0);
229 }
230}
231
232static inline int get(FILE *stream)
233{
234 int value;
235 rewind(stream);
236 if (fscanf(stream, "%d", &value) != 1)
237 shutdown(4, "Error reading /proc entry", __LINE__);
238 return value;
239}
240
241static inline void set(FILE *stream, int value)
242{
243 int new_value;
244
245 rewind(stream);
246 if (fprintf(stream, "%d", value) < 0)
247 return shutdown(5, "Failed writing to /proc file", __LINE__);
248 new_value = get(stream);
249 if (new_value != value)
250 return shutdown(5, "We didn't get what we wrote to /proc back",
251 __LINE__);
252}
253
254static inline int try_set(FILE *stream, int value)
255{
256 int new_value;
257
258 rewind(stream);
259 fprintf(stream, "%d", value);
260 new_value = get(stream);
261 return new_value == value;
262}
263
264static inline void getr(int type, struct rlimit *rlim)
265{
266 if (getrlimit(type, rlim))
267 shutdown(6, "getrlimit()", __LINE__);
268}
269
270static inline void setr(int type, struct rlimit *rlim)
271{
272 if (setrlimit(type, rlim))
273 shutdown(7, "setrlimit()", __LINE__);
274}
275
276/**
277 * open_queue - open the global queue for testing
278 * @attr - An attr struct specifying the desired queue traits
279 * @result - An attr struct that lists the actual traits the queue has
280 *
281 * This open is not allowed to fail, failure will result in an orderly
282 * shutdown of the program. The global queue_path is used to set what
283 * queue to open, the queue descriptor is saved in the global queue
284 * variable.
285 */
286static inline void open_queue(struct mq_attr *attr)
287{
288 int flags = O_RDWR | O_EXCL | O_CREAT | O_NONBLOCK;
289 int perms = DEFFILEMODE;
290
291 queue = mq_open(queue_path, flags, perms, attr);
292 if (queue == -1)
293 shutdown(1, "mq_open()", __LINE__);
294 if (mq_getattr(queue, &result))
295 shutdown(1, "mq_getattr()", __LINE__);
296 printf("\n\tQueue %s created:\n", queue_path);
297 printf("\t\tmq_flags:\t\t\t%s\n", result.mq_flags & O_NONBLOCK ?
298 "O_NONBLOCK" : "(null)");
299 printf("\t\tmq_maxmsg:\t\t\t%d\n", result.mq_maxmsg);
300 printf("\t\tmq_msgsize:\t\t\t%d\n", result.mq_msgsize);
301 printf("\t\tmq_curmsgs:\t\t\t%d\n", result.mq_curmsgs);
302}
303
304void *fake_cont_thread(void *arg)
305{
306 int i;
307
308 for (i = 0; i < num_cpus_to_pin; i++)
309 if (cpu_threads[i] == pthread_self())
310 break;
311 printf("\tStarted fake continuous mode thread %d on CPU %d\n", i,
312 cpus_to_pin[i]);
313 while (1)
314 ;
315}
316
317void *cont_thread(void *arg)
318{
319 char buff[MSG_SIZE];
320 int i, priority;
321
322 for (i = 0; i < num_cpus_to_pin; i++)
323 if (cpu_threads[i] == pthread_self())
324 break;
325 printf("\tStarted continuous mode thread %d on CPU %d\n", i,
326 cpus_to_pin[i]);
327 while (1) {
328 while (mq_send(queue, buff, sizeof(buff), 0) == 0)
329 ;
330 mq_receive(queue, buff, sizeof(buff), &priority);
331 }
332}
333
334#define drain_queue() \
335 while (mq_receive(queue, buff, MSG_SIZE, &prio_in) == MSG_SIZE)
336
337#define do_untimed_send() \
338 do { \
339 if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
340 shutdown(3, "Test send failure", __LINE__); \
341 } while (0)
342
343#define do_send_recv() \
344 do { \
345 clock_gettime(clock, &start); \
346 if (mq_send(queue, buff, MSG_SIZE, prio_out)) \
347 shutdown(3, "Test send failure", __LINE__); \
348 clock_gettime(clock, &middle); \
349 if (mq_receive(queue, buff, MSG_SIZE, &prio_in) != MSG_SIZE) \
350 shutdown(3, "Test receive failure", __LINE__); \
351 clock_gettime(clock, &end); \
352 nsec = ((middle.tv_sec - start.tv_sec) * 1000000000) + \
353 (middle.tv_nsec - start.tv_nsec); \
354 send_total.tv_nsec += nsec; \
355 if (send_total.tv_nsec >= 1000000000) { \
356 send_total.tv_sec++; \
357 send_total.tv_nsec -= 1000000000; \
358 } \
359 nsec = ((end.tv_sec - middle.tv_sec) * 1000000000) + \
360 (end.tv_nsec - middle.tv_nsec); \
361 recv_total.tv_nsec += nsec; \
362 if (recv_total.tv_nsec >= 1000000000) { \
363 recv_total.tv_sec++; \
364 recv_total.tv_nsec -= 1000000000; \
365 } \
366 } while (0)
367
368struct test {
369 char *desc;
370 void (*func)(int *);
371};
372
373void const_prio(int *prio)
374{
375 return;
376}
377
378void inc_prio(int *prio)
379{
380 if (++*prio == mq_prio_max)
381 *prio = 0;
382}
383
384void dec_prio(int *prio)
385{
386 if (--*prio < 0)
387 *prio = mq_prio_max - 1;
388}
389
390void random_prio(int *prio)
391{
392 *prio = random() % mq_prio_max;
393}
394
395struct test test2[] = {
396 {"\n\tTest #2a: Time send/recv message, queue full, constant prio\n",
397 const_prio},
398 {"\n\tTest #2b: Time send/recv message, queue full, increasing prio\n",
399 inc_prio},
400 {"\n\tTest #2c: Time send/recv message, queue full, decreasing prio\n",
401 dec_prio},
402 {"\n\tTest #2d: Time send/recv message, queue full, random prio\n",
403 random_prio},
404 {NULL, NULL}
405};
406
407/**
408 * Tests to perform (all done with MSG_SIZE messages):
409 *
410 * 1) Time to add/remove message with 0 messages on queue
411 * 1a) with constant prio
412 * 2) Time to add/remove message when queue close to capacity:
413 * 2a) with constant prio
414 * 2b) with increasing prio
415 * 2c) with decreasing prio
416 * 2d) with random prio
417 * 3) Test limits of priorities honored (double check _SC_MQ_PRIO_MAX)
418 */
419void *perf_test_thread(void *arg)
420{
421 char buff[MSG_SIZE];
422 int prio_out, prio_in;
423 int i;
424 clockid_t clock;
425 pthread_t *t;
426 struct timespec res, start, middle, end, send_total, recv_total;
427 unsigned long long nsec;
428 struct test *cur_test;
429
430 t = &cpu_threads[0];
431 printf("\n\tStarted mqueue performance test thread on CPU %d\n",
432 cpus_to_pin[0]);
433 mq_prio_max = sysconf(_SC_MQ_PRIO_MAX);
434 if (mq_prio_max == -1)
435 shutdown(2, "sysconf(_SC_MQ_PRIO_MAX)", __LINE__);
436 if (pthread_getcpuclockid(cpu_threads[0], &clock) != 0)
437 shutdown(2, "pthread_getcpuclockid", __LINE__);
438
439 if (clock_getres(clock, &res))
440 shutdown(2, "clock_getres()", __LINE__);
441
442 printf("\t\tMax priorities:\t\t\t%d\n", mq_prio_max);
443 printf("\t\tClock resolution:\t\t%d nsec%s\n", res.tv_nsec,
444 res.tv_nsec > 1 ? "s" : "");
445
446
447
448 printf("\n\tTest #1: Time send/recv message, queue empty\n");
449 printf("\t\t(%d iterations)\n", TEST1_LOOPS);
450 prio_out = 0;
451 send_total.tv_sec = 0;
452 send_total.tv_nsec = 0;
453 recv_total.tv_sec = 0;
454 recv_total.tv_nsec = 0;
455 for (i = 0; i < TEST1_LOOPS; i++)
456 do_send_recv();
457 printf("\t\tSend msg:\t\t\t%d.%ds total time\n",
458 send_total.tv_sec, send_total.tv_nsec);
459 nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
460 send_total.tv_nsec) / TEST1_LOOPS;
461 printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
462 printf("\t\tRecv msg:\t\t\t%d.%ds total time\n",
463 recv_total.tv_sec, recv_total.tv_nsec);
464 nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
465 recv_total.tv_nsec) / TEST1_LOOPS;
466 printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
467
468
469 for (cur_test = test2; cur_test->desc != NULL; cur_test++) {
470 printf(cur_test->desc);
471 printf("\t\t(%d iterations)\n", TEST2_LOOPS);
472 prio_out = 0;
473 send_total.tv_sec = 0;
474 send_total.tv_nsec = 0;
475 recv_total.tv_sec = 0;
476 recv_total.tv_nsec = 0;
477 printf("\t\tFilling queue...");
478 fflush(stdout);
479 clock_gettime(clock, &start);
480 for (i = 0; i < result.mq_maxmsg - 1; i++) {
481 do_untimed_send();
482 cur_test->func(&prio_out);
483 }
484 clock_gettime(clock, &end);
485 nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) *
486 1000000000) + (end.tv_nsec - start.tv_nsec);
487 printf("done.\t\t%lld.%llds\n", nsec / 1000000000,
488 nsec % 1000000000);
489 printf("\t\tTesting...");
490 fflush(stdout);
491 for (i = 0; i < TEST2_LOOPS; i++) {
492 do_send_recv();
493 cur_test->func(&prio_out);
494 }
495 printf("done.\n");
496 printf("\t\tSend msg:\t\t\t%d.%ds total time\n",
497 send_total.tv_sec, send_total.tv_nsec);
498 nsec = ((unsigned long long)send_total.tv_sec * 1000000000 +
499 send_total.tv_nsec) / TEST2_LOOPS;
500 printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
501 printf("\t\tRecv msg:\t\t\t%d.%ds total time\n",
502 recv_total.tv_sec, recv_total.tv_nsec);
503 nsec = ((unsigned long long)recv_total.tv_sec * 1000000000 +
504 recv_total.tv_nsec) / TEST2_LOOPS;
505 printf("\t\t\t\t\t\t%d nsec/msg\n", nsec);
506 printf("\t\tDraining queue...");
507 fflush(stdout);
508 clock_gettime(clock, &start);
509 drain_queue();
510 clock_gettime(clock, &end);
511 nsec = ((unsigned long long)(end.tv_sec - start.tv_sec) *
512 1000000000) + (end.tv_nsec - start.tv_nsec);
513 printf("done.\t\t%lld.%llds\n", nsec / 1000000000,
514 nsec % 1000000000);
515 }
516 return 0;
517}
518
519void increase_limits(void)
520{
521 cur_limits.rlim_cur = RLIM_INFINITY;
522 cur_limits.rlim_max = RLIM_INFINITY;
523 setr(RLIMIT_MSGQUEUE, &cur_limits);
524 while (try_set(max_msgs, cur_max_msgs += 10))
525 ;
526 cur_max_msgs = get(max_msgs);
527 while (try_set(max_msgsize, cur_max_msgsize += 1024))
528 ;
529 cur_max_msgsize = get(max_msgsize);
530 if (setpriority(PRIO_PROCESS, 0, -20) != 0)
531 shutdown(2, "setpriority()", __LINE__);
532 cur_nice = -20;
533}
534
535int main(int argc, char *argv[])
536{
537 struct mq_attr attr;
538 char *option, *next_option;
539 int i, cpu;
540 struct sigaction sa;
541 poptContext popt_context;
542 char rc;
543 void *retval;
544
545 main_thread = pthread_self();
546 num_cpus_to_pin = 0;
547
548 if (sysconf(_SC_NPROCESSORS_ONLN) == -1) {
549 perror("sysconf(_SC_NPROCESSORS_ONLN)");
550 exit(1);
551 }
552 cpus_online = min(MAX_CPUS, sysconf(_SC_NPROCESSORS_ONLN));
553 cpu_set = CPU_ALLOC(cpus_online);
554 if (cpu_set == NULL) {
555 perror("CPU_ALLOC()");
556 exit(1);
557 }
558 cpu_set_size = CPU_ALLOC_SIZE(cpus_online);
559 CPU_ZERO_S(cpu_set_size, cpu_set);
560
561 popt_context = poptGetContext(NULL, argc, (const char **)argv,
562 options, 0);
563
564 while ((rc = poptGetNextOpt(popt_context)) > 0) {
565 switch (rc) {
566 case 'c':
567 continuous_mode = 1;
568 option = cpu_option_string;
569 do {
570 next_option = strchr(option, ',');
571 if (next_option)
572 *next_option = '\0';
573 cpu = atoi(option);
574 if (cpu >= cpus_online)
575 fprintf(stderr, "CPU %d exceeds "
576 "cpus online, ignoring.\n",
577 cpu);
578 else
579 cpus_to_pin[num_cpus_to_pin++] = cpu;
580 if (next_option)
581 option = ++next_option;
582 } while (next_option && num_cpus_to_pin < MAX_CPUS);
583 /* Double check that they didn't give us the same CPU
584 * more than once */
585 for (cpu = 0; cpu < num_cpus_to_pin; cpu++) {
586 if (CPU_ISSET_S(cpus_to_pin[cpu], cpu_set_size,
587 cpu_set)) {
588 fprintf(stderr, "Any given CPU may "
589 "only be given once.\n");
590 exit(1);
591 } else
592 CPU_SET_S(cpus_to_pin[cpu],
593 cpu_set_size, cpu_set);
594 }
595 break;
596 case 'p':
597 /*
598 * Although we can create a msg queue with a
599 * non-absolute path name, unlink will fail. So,
600 * if the name doesn't start with a /, add one
601 * when we save it.
602 */
603 option = queue_path;
604 if (*option != '/') {
605 queue_path = malloc(strlen(option) + 2);
606 if (!queue_path) {
607 perror("malloc()");
608 exit(1);
609 }
610 queue_path[0] = '/';
611 queue_path[1] = 0;
612 strcat(queue_path, option);
613 free(option);
614 }
615 break;
616 }
617 }
618
619 if (continuous_mode && num_cpus_to_pin == 0) {
620 fprintf(stderr, "Must pass at least one CPU to continuous "
621 "mode.\n");
622 poptPrintUsage(popt_context, stderr, 0);
623 exit(1);
624 } else if (!continuous_mode) {
625 num_cpus_to_pin = 1;
626 cpus_to_pin[0] = cpus_online - 1;
627 }
628
629 if (getuid() != 0) {
630 fprintf(stderr, "Not running as root, but almost all tests "
631 "require root in order to modify\nsystem settings. "
632 "Exiting.\n");
633 exit(1);
634 }
635
636 max_msgs = fopen(MAX_MSGS, "r+");
637 max_msgsize = fopen(MAX_MSGSIZE, "r+");
638 if (!max_msgs)
639 shutdown(2, "Failed to open msg_max", __LINE__);
640 if (!max_msgsize)
641 shutdown(2, "Failed to open msgsize_max", __LINE__);
642
643 /* Load up the current system values for everything we can */
644 getr(RLIMIT_MSGQUEUE, &saved_limits);
645 cur_limits = saved_limits;
646 saved_max_msgs = cur_max_msgs = get(max_msgs);
647 saved_max_msgsize = cur_max_msgsize = get(max_msgsize);
648 errno = 0;
649 cur_nice = getpriority(PRIO_PROCESS, 0);
650 if (errno)
651 shutdown(2, "getpriority()", __LINE__);
652
653 /* Tell the user our initial state */
654 printf("\nInitial system state:\n");
655 printf("\tUsing queue path:\t\t\t%s\n", queue_path);
656 printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n", saved_limits.rlim_cur);
657 printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n", saved_limits.rlim_max);
658 printf("\tMaximum Message Size:\t\t\t%d\n", saved_max_msgsize);
659 printf("\tMaximum Queue Size:\t\t\t%d\n", saved_max_msgs);
660 printf("\tNice value:\t\t\t\t%d\n", cur_nice);
661 printf("\n");
662
663 increase_limits();
664
665 printf("Adjusted system state for testing:\n");
666 if (cur_limits.rlim_cur == RLIM_INFINITY) {
667 printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t(unlimited)\n");
668 printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t(unlimited)\n");
669 } else {
670 printf("\tRLIMIT_MSGQUEUE(soft):\t\t\t%d\n",
671 cur_limits.rlim_cur);
672 printf("\tRLIMIT_MSGQUEUE(hard):\t\t\t%d\n",
673 cur_limits.rlim_max);
674 }
675 printf("\tMaximum Message Size:\t\t\t%d\n", cur_max_msgsize);
676 printf("\tMaximum Queue Size:\t\t\t%d\n", cur_max_msgs);
677 printf("\tNice value:\t\t\t\t%d\n", cur_nice);
678 printf("\tContinuous mode:\t\t\t(%s)\n", continuous_mode ?
679 (continuous_mode_fake ? "fake mode" : "enabled") :
680 "disabled");
681 printf("\tCPUs to pin:\t\t\t\t%d", cpus_to_pin[0]);
682 for (cpu = 1; cpu < num_cpus_to_pin; cpu++)
683 printf(",%d", cpus_to_pin[cpu]);
684 printf("\n");
685
686 sa.sa_sigaction = sig_action_SIGUSR1;
687 sigemptyset(&sa.sa_mask);
688 sigaddset(&sa.sa_mask, SIGHUP);
689 sigaddset(&sa.sa_mask, SIGINT);
690 sigaddset(&sa.sa_mask, SIGQUIT);
691 sigaddset(&sa.sa_mask, SIGTERM);
692 sa.sa_flags = SA_SIGINFO;
693 if (sigaction(SIGUSR1, &sa, NULL) == -1)
694 shutdown(1, "sigaction(SIGUSR1)", __LINE__);
695 sa.sa_sigaction = sig_action;
696 if (sigaction(SIGHUP, &sa, NULL) == -1)
697 shutdown(1, "sigaction(SIGHUP)", __LINE__);
698 if (sigaction(SIGINT, &sa, NULL) == -1)
699 shutdown(1, "sigaction(SIGINT)", __LINE__);
700 if (sigaction(SIGQUIT, &sa, NULL) == -1)
701 shutdown(1, "sigaction(SIGQUIT)", __LINE__);
702 if (sigaction(SIGTERM, &sa, NULL) == -1)
703 shutdown(1, "sigaction(SIGTERM)", __LINE__);
704
705 if (!continuous_mode_fake) {
706 attr.mq_flags = O_NONBLOCK;
707 attr.mq_maxmsg = cur_max_msgs;
708 attr.mq_msgsize = MSG_SIZE;
709 open_queue(&attr);
710 }
711 for (i = 0; i < num_cpus_to_pin; i++) {
712 pthread_attr_t thread_attr;
713 void *thread_func;
714
715 if (continuous_mode_fake)
716 thread_func = &fake_cont_thread;
717 else if (continuous_mode)
718 thread_func = &cont_thread;
719 else
720 thread_func = &perf_test_thread;
721
722 CPU_ZERO_S(cpu_set_size, cpu_set);
723 CPU_SET_S(cpus_to_pin[i], cpu_set_size, cpu_set);
724 pthread_attr_init(&thread_attr);
725 pthread_attr_setaffinity_np(&thread_attr, cpu_set_size,
726 cpu_set);
727 if (pthread_create(&cpu_threads[i], &thread_attr, thread_func,
728 NULL))
729 shutdown(1, "pthread_create()", __LINE__);
730 pthread_attr_destroy(&thread_attr);
731 }
732
733 if (!continuous_mode) {
734 pthread_join(cpu_threads[0], &retval);
735 shutdown((long)retval, "perf_test_thread()", __LINE__);
736 } else {
737 while (1)
738 sleep(1);
739 }
740 shutdown(0, "", 0);
741}
diff --git a/usr/Kconfig b/usr/Kconfig
index 65b845bd4e3e..085872bb2bb5 100644
--- a/usr/Kconfig
+++ b/usr/Kconfig
@@ -134,7 +134,7 @@ config INITRAMFS_COMPRESSION_BZIP2
134 depends on RD_BZIP2 134 depends on RD_BZIP2
135 help 135 help
136 Its compression ratio and speed is intermediate. 136 Its compression ratio and speed is intermediate.
137 Decompression speed is slowest among the four. The initramfs 137 Decompression speed is slowest among the choices. The initramfs
138 size is about 10% smaller with bzip2, in comparison to gzip. 138 size is about 10% smaller with bzip2, in comparison to gzip.
139 Bzip2 uses a large amount of memory. For modern kernels you 139 Bzip2 uses a large amount of memory. For modern kernels you
140 will need at least 8MB RAM or more for booting. 140 will need at least 8MB RAM or more for booting.
@@ -143,9 +143,9 @@ config INITRAMFS_COMPRESSION_LZMA
143 bool "LZMA" 143 bool "LZMA"
144 depends on RD_LZMA 144 depends on RD_LZMA
145 help 145 help
146 The most recent compression algorithm. 146 This algorithm's compression ratio is best.
147 Its ratio is best, decompression speed is between the other 147 Decompression speed is between the other choices.
148 three. Compression is slowest. The initramfs size is about 33% 148 Compression is slowest. The initramfs size is about 33%
149 smaller with LZMA in comparison to gzip. 149 smaller with LZMA in comparison to gzip.
150 150
151config INITRAMFS_COMPRESSION_XZ 151config INITRAMFS_COMPRESSION_XZ
@@ -161,7 +161,7 @@ config INITRAMFS_COMPRESSION_LZO
161 bool "LZO" 161 bool "LZO"
162 depends on RD_LZO 162 depends on RD_LZO
163 help 163 help
164 Its compression ratio is the poorest among the four. The kernel 164 Its compression ratio is the poorest among the choices. The kernel
165 size is about 10% bigger than gzip; however its speed 165 size is about 10% bigger than gzip; however its speed
166 (both compression and decompression) is the fastest. 166 (both compression and decompression) is the fastest.
167 167
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 01f572c10c71..b1e091ae2f37 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -635,7 +635,6 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
635 int r = 0, idx; 635 int r = 0, idx;
636 struct kvm_assigned_dev_kernel *match; 636 struct kvm_assigned_dev_kernel *match;
637 struct pci_dev *dev; 637 struct pci_dev *dev;
638 u8 header_type;
639 638
640 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)) 639 if (!(assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU))
641 return -EINVAL; 640 return -EINVAL;
@@ -668,8 +667,7 @@ static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
668 } 667 }
669 668
670 /* Don't allow bridges to be assigned */ 669 /* Don't allow bridges to be assigned */
671 pci_read_config_byte(dev, PCI_HEADER_TYPE, &header_type); 670 if (dev->hdr_type != PCI_HEADER_TYPE_NORMAL) {
672 if ((header_type & PCI_HEADER_TYPE) != PCI_HEADER_TYPE_NORMAL) {
673 r = -EPERM; 671 r = -EPERM;
674 goto out_put; 672 goto out_put;
675 } 673 }
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index a6a0365475ed..5afb43114020 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -332,6 +332,7 @@ static int setup_routing_entry(struct kvm_irq_routing_table *rt,
332 */ 332 */
333 hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link) 333 hlist_for_each_entry(ei, n, &rt->map[ue->gsi], link)
334 if (ei->type == KVM_IRQ_ROUTING_MSI || 334 if (ei->type == KVM_IRQ_ROUTING_MSI ||
335 ue->type == KVM_IRQ_ROUTING_MSI ||
335 ue->u.irqchip.irqchip == ei->irqchip.irqchip) 336 ue->u.irqchip.irqchip == ei->irqchip.irqchip)
336 return r; 337 return r;
337 338